diff options
Diffstat (limited to 'drivers/gpio/gpio-aggregator.c')
-rw-r--r-- | drivers/gpio/gpio-aggregator.c | 1104 |
1 files changed, 939 insertions, 165 deletions
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c index d232ea865356..6f941db02c04 100644 --- a/drivers/gpio/gpio-aggregator.c +++ b/drivers/gpio/gpio-aggregator.c @@ -9,10 +9,13 @@ #include <linux/bitmap.h> #include <linux/bitops.h> +#include <linux/configfs.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/idr.h> #include <linux/kernel.h> +#include <linux/list.h> +#include <linux/lockdep.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/mutex.h> @@ -27,226 +30,200 @@ #include <linux/gpio/driver.h> #include <linux/gpio/machine.h> +#include "dev-sync-probe.h" + #define AGGREGATOR_MAX_GPIOS 512 +#define AGGREGATOR_LEGACY_PREFIX "_sysfs" /* * GPIO Aggregator sysfs interface */ struct gpio_aggregator { + struct dev_sync_probe_data probe_data; + struct config_group group; struct gpiod_lookup_table *lookups; - struct platform_device *pdev; + struct mutex lock; + int id; + + /* List of gpio_aggregator_line. Always added in order */ + struct list_head list_head; + + /* used by legacy sysfs interface only */ + bool init_via_sysfs; char args[]; }; +struct gpio_aggregator_line { + struct config_group group; + struct gpio_aggregator *parent; + struct list_head entry; + + /* Line index within the aggregator device */ + unsigned int idx; + + /* Custom name for the virtual line */ + const char *name; + /* GPIO chip label or line name */ + const char *key; + /* Can be negative to indicate lookup by line name */ + int offset; + + enum gpio_lookup_flags flags; +}; + +struct gpio_aggregator_pdev_meta { + bool init_via_sysfs; +}; + static DEFINE_MUTEX(gpio_aggregator_lock); /* protects idr */ static DEFINE_IDR(gpio_aggregator_idr); -static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key, - int hwnum, unsigned int *n) +static int gpio_aggregator_alloc(struct gpio_aggregator **aggr, size_t arg_size) { - struct gpiod_lookup_table *lookups; + int ret; - lookups = krealloc(aggr->lookups, struct_size(lookups, table, *n + 2), - GFP_KERNEL); - if (!lookups) + struct gpio_aggregator *new __free(kfree) = kzalloc( + sizeof(*new) + arg_size, GFP_KERNEL); + if (!new) return -ENOMEM; - lookups->table[*n] = GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0); + scoped_guard(mutex, &gpio_aggregator_lock) + ret = idr_alloc(&gpio_aggregator_idr, new, 0, 0, GFP_KERNEL); - (*n)++; - memset(&lookups->table[*n], 0, sizeof(lookups->table[*n])); + if (ret < 0) + return ret; - aggr->lookups = lookups; + new->id = ret; + INIT_LIST_HEAD(&new->list_head); + mutex_init(&new->lock); + *aggr = no_free_ptr(new); return 0; } -static int aggr_parse(struct gpio_aggregator *aggr) +static void gpio_aggregator_free(struct gpio_aggregator *aggr) { - char *args = skip_spaces(aggr->args); - char *name, *offsets, *p; - unsigned int i, n = 0; - int error = 0; - - unsigned long *bitmap __free(bitmap) = - bitmap_alloc(AGGREGATOR_MAX_GPIOS, GFP_KERNEL); - if (!bitmap) - return -ENOMEM; - - args = next_arg(args, &name, &p); - while (*args) { - args = next_arg(args, &offsets, &p); - - p = get_options(offsets, 0, &error); - if (error == 0 || *p) { - /* Named GPIO line */ - error = aggr_add_gpio(aggr, name, U16_MAX, &n); - if (error) - return error; + scoped_guard(mutex, &gpio_aggregator_lock) + idr_remove(&gpio_aggregator_idr, aggr->id); - name = offsets; - continue; - } + mutex_destroy(&aggr->lock); + kfree(aggr); +} - /* GPIO chip + offset(s) */ - error = bitmap_parselist(offsets, bitmap, AGGREGATOR_MAX_GPIOS); - if (error) { - pr_err("Cannot parse %s: %d\n", offsets, error); - return error; - } +static int gpio_aggregator_add_gpio(struct gpio_aggregator *aggr, + const char *key, int hwnum, unsigned int *n) +{ + struct gpiod_lookup_table *lookups; - for_each_set_bit(i, bitmap, AGGREGATOR_MAX_GPIOS) { - error = aggr_add_gpio(aggr, name, i, &n); - if (error) - return error; - } + lookups = krealloc(aggr->lookups, struct_size(lookups, table, *n + 2), + GFP_KERNEL); + if (!lookups) + return -ENOMEM; - args = next_arg(args, &name, &p); - } + lookups->table[*n] = GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0); - if (!n) { - pr_err("No GPIOs specified\n"); - return -EINVAL; - } + (*n)++; + memset(&lookups->table[*n], 0, sizeof(lookups->table[*n])); + aggr->lookups = lookups; return 0; } -static ssize_t new_device_store(struct device_driver *driver, const char *buf, - size_t count) +static bool gpio_aggregator_is_active(struct gpio_aggregator *aggr) { - struct gpio_aggregator *aggr; - struct platform_device *pdev; - int res, id; + lockdep_assert_held(&aggr->lock); - if (!try_module_get(THIS_MODULE)) - return -ENOENT; - - /* kernfs guarantees string termination, so count + 1 is safe */ - aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL); - if (!aggr) { - res = -ENOMEM; - goto put_module; - } - - memcpy(aggr->args, buf, count + 1); + return aggr->probe_data.pdev && platform_get_drvdata(aggr->probe_data.pdev); +} - aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1), - GFP_KERNEL); - if (!aggr->lookups) { - res = -ENOMEM; - goto free_ga; - } +/* Only aggregators created via legacy sysfs can be "activating". */ +static bool gpio_aggregator_is_activating(struct gpio_aggregator *aggr) +{ + lockdep_assert_held(&aggr->lock); - mutex_lock(&gpio_aggregator_lock); - id = idr_alloc(&gpio_aggregator_idr, aggr, 0, 0, GFP_KERNEL); - mutex_unlock(&gpio_aggregator_lock); + return aggr->probe_data.pdev && !platform_get_drvdata(aggr->probe_data.pdev); +} - if (id < 0) { - res = id; - goto free_table; - } +static size_t gpio_aggregator_count_lines(struct gpio_aggregator *aggr) +{ + lockdep_assert_held(&aggr->lock); - aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, id); - if (!aggr->lookups->dev_id) { - res = -ENOMEM; - goto remove_idr; - } + return list_count_nodes(&aggr->list_head); +} - res = aggr_parse(aggr); - if (res) - goto free_dev_id; +static struct gpio_aggregator_line * +gpio_aggregator_line_alloc(struct gpio_aggregator *parent, unsigned int idx, + char *key, int offset) +{ + struct gpio_aggregator_line *line; - gpiod_add_lookup_table(aggr->lookups); + line = kzalloc(sizeof(*line), GFP_KERNEL); + if (!line) + return ERR_PTR(-ENOMEM); - pdev = platform_device_register_simple(DRV_NAME, id, NULL, 0); - if (IS_ERR(pdev)) { - res = PTR_ERR(pdev); - goto remove_table; + if (key) { + line->key = kstrdup(key, GFP_KERNEL); + if (!line->key) { + kfree(line); + return ERR_PTR(-ENOMEM); + } } - aggr->pdev = pdev; - module_put(THIS_MODULE); - return count; + line->flags = GPIO_LOOKUP_FLAGS_DEFAULT; + line->parent = parent; + line->idx = idx; + line->offset = offset; + INIT_LIST_HEAD(&line->entry); -remove_table: - gpiod_remove_lookup_table(aggr->lookups); -free_dev_id: - kfree(aggr->lookups->dev_id); -remove_idr: - mutex_lock(&gpio_aggregator_lock); - idr_remove(&gpio_aggregator_idr, id); - mutex_unlock(&gpio_aggregator_lock); -free_table: - kfree(aggr->lookups); -free_ga: - kfree(aggr); -put_module: - module_put(THIS_MODULE); - return res; -} - -static DRIVER_ATTR_WO(new_device); - -static void gpio_aggregator_free(struct gpio_aggregator *aggr) -{ - platform_device_unregister(aggr->pdev); - gpiod_remove_lookup_table(aggr->lookups); - kfree(aggr->lookups->dev_id); - kfree(aggr->lookups); - kfree(aggr); + return line; } -static ssize_t delete_device_store(struct device_driver *driver, - const char *buf, size_t count) +static void gpio_aggregator_line_add(struct gpio_aggregator *aggr, + struct gpio_aggregator_line *line) { - struct gpio_aggregator *aggr; - unsigned int id; - int error; + struct gpio_aggregator_line *tmp; - if (!str_has_prefix(buf, DRV_NAME ".")) - return -EINVAL; + lockdep_assert_held(&aggr->lock); - error = kstrtouint(buf + strlen(DRV_NAME "."), 10, &id); - if (error) - return error; - - if (!try_module_get(THIS_MODULE)) - return -ENOENT; - - mutex_lock(&gpio_aggregator_lock); - aggr = idr_remove(&gpio_aggregator_idr, id); - mutex_unlock(&gpio_aggregator_lock); - if (!aggr) { - module_put(THIS_MODULE); - return -ENOENT; + list_for_each_entry(tmp, &aggr->list_head, entry) { + if (tmp->idx > line->idx) { + list_add_tail(&line->entry, &tmp->entry); + return; + } } - - gpio_aggregator_free(aggr); - module_put(THIS_MODULE); - return count; + list_add_tail(&line->entry, &aggr->list_head); } -static DRIVER_ATTR_WO(delete_device); - -static struct attribute *gpio_aggregator_attrs[] = { - &driver_attr_new_device.attr, - &driver_attr_delete_device.attr, - NULL -}; -ATTRIBUTE_GROUPS(gpio_aggregator); -static int __exit gpio_aggregator_idr_remove(int id, void *p, void *data) +static void gpio_aggregator_line_del(struct gpio_aggregator *aggr, + struct gpio_aggregator_line *line) { - gpio_aggregator_free(p); - return 0; + lockdep_assert_held(&aggr->lock); + + list_del(&line->entry); } -static void __exit gpio_aggregator_remove_all(void) +static void gpio_aggregator_free_lines(struct gpio_aggregator *aggr) { - mutex_lock(&gpio_aggregator_lock); - idr_for_each(&gpio_aggregator_idr, gpio_aggregator_idr_remove, NULL); - idr_destroy(&gpio_aggregator_idr); - mutex_unlock(&gpio_aggregator_lock); + struct gpio_aggregator_line *line, *tmp; + + list_for_each_entry_safe(line, tmp, &aggr->list_head, entry) { + configfs_unregister_group(&line->group); + /* + * Normally, we acquire aggr->lock within the configfs + * callback. However, in the legacy sysfs interface case, + * calling configfs_(un)register_group while holding + * aggr->lock could cause a deadlock. Fortunately, this is + * unnecessary because the new_device/delete_device path + * and the module unload path are mutually exclusive, + * thanks to an explicit try_module_get. That's why this + * minimal scoped_guard suffices. + */ + scoped_guard(mutex, &aggr->lock) + gpio_aggregator_line_del(aggr, line); + kfree(line->key); + kfree(line->name); + kfree(line); + } } @@ -582,6 +559,728 @@ static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev, return fwd; } +/* + * Configfs interface + */ + +static struct gpio_aggregator * +to_gpio_aggregator(struct config_item *item) +{ + struct config_group *group = to_config_group(item); + + return container_of(group, struct gpio_aggregator, group); +} + +static struct gpio_aggregator_line * +to_gpio_aggregator_line(struct config_item *item) +{ + struct config_group *group = to_config_group(item); + + return container_of(group, struct gpio_aggregator_line, group); +} + +static struct fwnode_handle * +gpio_aggregator_make_device_sw_node(struct gpio_aggregator *aggr) +{ + struct property_entry properties[2]; + struct gpio_aggregator_line *line; + size_t num_lines; + int n = 0; + + memset(properties, 0, sizeof(properties)); + + num_lines = gpio_aggregator_count_lines(aggr); + if (num_lines == 0) + return NULL; + + const char **line_names __free(kfree) = kcalloc( + num_lines, sizeof(*line_names), GFP_KERNEL); + if (!line_names) + return ERR_PTR(-ENOMEM); + + /* The list is always sorted as new elements are inserted in order. */ + list_for_each_entry(line, &aggr->list_head, entry) + line_names[n++] = line->name ?: ""; + + properties[0] = PROPERTY_ENTRY_STRING_ARRAY_LEN( + "gpio-line-names", + line_names, num_lines); + + return fwnode_create_software_node(properties, NULL); +} + +static int gpio_aggregator_activate(struct gpio_aggregator *aggr) +{ + struct platform_device_info pdevinfo; + struct gpio_aggregator_line *line; + struct fwnode_handle *swnode; + unsigned int n = 0; + int ret = 0; + + if (gpio_aggregator_count_lines(aggr) == 0) + return -EINVAL; + + aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1), + GFP_KERNEL); + if (!aggr->lookups) + return -ENOMEM; + + swnode = gpio_aggregator_make_device_sw_node(aggr); + if (IS_ERR(swnode)) { + ret = PTR_ERR(swnode); + goto err_remove_lookups; + } + + memset(&pdevinfo, 0, sizeof(pdevinfo)); + pdevinfo.name = DRV_NAME; + pdevinfo.id = aggr->id; + pdevinfo.fwnode = swnode; + + /* The list is always sorted as new elements are inserted in order. */ + list_for_each_entry(line, &aggr->list_head, entry) { + /* + * - Either GPIO chip label or line name must be configured + * (i.e. line->key must be non-NULL) + * - Line directories must be named with sequential numeric + * suffixes starting from 0. (i.e. ./line0, ./line1, ...) + */ + if (!line->key || line->idx != n) { + ret = -EINVAL; + goto err_remove_swnode; + } + + if (line->offset < 0) + ret = gpio_aggregator_add_gpio(aggr, line->key, + U16_MAX, &n); + else + ret = gpio_aggregator_add_gpio(aggr, line->key, + line->offset, &n); + if (ret) + goto err_remove_swnode; + } + + aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, aggr->id); + if (!aggr->lookups->dev_id) { + ret = -ENOMEM; + goto err_remove_swnode; + } + + gpiod_add_lookup_table(aggr->lookups); + + ret = dev_sync_probe_register(&aggr->probe_data, &pdevinfo); + if (ret) + goto err_remove_lookup_table; + + return 0; + +err_remove_lookup_table: + kfree(aggr->lookups->dev_id); + gpiod_remove_lookup_table(aggr->lookups); +err_remove_swnode: + fwnode_remove_software_node(swnode); +err_remove_lookups: + kfree(aggr->lookups); + + return ret; +} + +static void gpio_aggregator_deactivate(struct gpio_aggregator *aggr) +{ + dev_sync_probe_unregister(&aggr->probe_data); + gpiod_remove_lookup_table(aggr->lookups); + kfree(aggr->lookups->dev_id); + kfree(aggr->lookups); +} + +static void gpio_aggregator_lockup_configfs(struct gpio_aggregator *aggr, + bool lock) +{ + struct configfs_subsystem *subsys = aggr->group.cg_subsys; + struct gpio_aggregator_line *line; + + /* + * The device only needs to depend on leaf lines. This is + * sufficient to lock up all the configfs entries that the + * instantiated, alive device depends on. + */ + list_for_each_entry(line, &aggr->list_head, entry) { + if (lock) + configfs_depend_item_unlocked( + subsys, &line->group.cg_item); + else + configfs_undepend_item_unlocked( + &line->group.cg_item); + } +} + +static ssize_t +gpio_aggregator_line_key_show(struct config_item *item, char *page) +{ + struct gpio_aggregator_line *line = to_gpio_aggregator_line(item); + struct gpio_aggregator *aggr = line->parent; + + guard(mutex)(&aggr->lock); + + return sysfs_emit(page, "%s\n", line->key ?: ""); +} + +static ssize_t +gpio_aggregator_line_key_store(struct config_item *item, const char *page, + size_t count) +{ + struct gpio_aggregator_line *line = to_gpio_aggregator_line(item); + struct gpio_aggregator *aggr = line->parent; + + char *key __free(kfree) = kstrndup(skip_spaces(page), count, + GFP_KERNEL); + if (!key) + return -ENOMEM; + + strim(key); + + guard(mutex)(&aggr->lock); + + if (gpio_aggregator_is_activating(aggr) || + gpio_aggregator_is_active(aggr)) + return -EBUSY; + + kfree(line->key); + line->key = no_free_ptr(key); + + return count; +} +CONFIGFS_ATTR(gpio_aggregator_line_, key); + +static ssize_t +gpio_aggregator_line_name_show(struct config_item *item, char *page) +{ + struct gpio_aggregator_line *line = to_gpio_aggregator_line(item); + struct gpio_aggregator *aggr = line->parent; + + guard(mutex)(&aggr->lock); + + return sysfs_emit(page, "%s\n", line->name ?: ""); +} + +static ssize_t +gpio_aggregator_line_name_store(struct config_item *item, const char *page, + size_t count) +{ + struct gpio_aggregator_line *line = to_gpio_aggregator_line(item); + struct gpio_aggregator *aggr = line->parent; + + char *name __free(kfree) = kstrndup(skip_spaces(page), count, + GFP_KERNEL); + if (!name) + return -ENOMEM; + + strim(name); + + guard(mutex)(&aggr->lock); + + if (gpio_aggregator_is_activating(aggr) || + gpio_aggregator_is_active(aggr)) + return -EBUSY; + + kfree(line->name); + line->name = no_free_ptr(name); + + return count; +} +CONFIGFS_ATTR(gpio_aggregator_line_, name); + +static ssize_t +gpio_aggregator_line_offset_show(struct config_item *item, char *page) +{ + struct gpio_aggregator_line *line = to_gpio_aggregator_line(item); + struct gpio_aggregator *aggr = line->parent; + + guard(mutex)(&aggr->lock); + + return sysfs_emit(page, "%d\n", line->offset); +} + +static ssize_t +gpio_aggregator_line_offset_store(struct config_item *item, const char *page, + size_t count) +{ + struct gpio_aggregator_line *line = to_gpio_aggregator_line(item); + struct gpio_aggregator *aggr = line->parent; + int offset, ret; + + ret = kstrtoint(page, 0, &offset); + if (ret) + return ret; + + /* + * When offset == -1, 'key' represents a line name to lookup. + * When 0 <= offset < 65535, 'key' represents the label of the chip with + * the 'offset' value representing the line within that chip. + * + * GPIOLIB uses the U16_MAX value to indicate lookup by line name so + * the greatest offset we can accept is (U16_MAX - 1). + */ + if (offset > (U16_MAX - 1) || offset < -1) + return -EINVAL; + + guard(mutex)(&aggr->lock); + + if (gpio_aggregator_is_activating(aggr) || + gpio_aggregator_is_active(aggr)) + return -EBUSY; + + line->offset = offset; + + return count; +} +CONFIGFS_ATTR(gpio_aggregator_line_, offset); + +static struct configfs_attribute *gpio_aggregator_line_attrs[] = { + &gpio_aggregator_line_attr_key, + &gpio_aggregator_line_attr_name, + &gpio_aggregator_line_attr_offset, + NULL +}; + +static ssize_t +gpio_aggregator_device_dev_name_show(struct config_item *item, char *page) +{ + struct gpio_aggregator *aggr = to_gpio_aggregator(item); + struct platform_device *pdev; + + guard(mutex)(&aggr->lock); + + pdev = aggr->probe_data.pdev; + if (pdev) + return sysfs_emit(page, "%s\n", dev_name(&pdev->dev)); + + return sysfs_emit(page, "%s.%d\n", DRV_NAME, aggr->id); +} +CONFIGFS_ATTR_RO(gpio_aggregator_device_, dev_name); + +static ssize_t +gpio_aggregator_device_live_show(struct config_item *item, char *page) +{ + struct gpio_aggregator *aggr = to_gpio_aggregator(item); + + guard(mutex)(&aggr->lock); + + return sysfs_emit(page, "%c\n", + gpio_aggregator_is_active(aggr) ? '1' : '0'); +} + +static ssize_t +gpio_aggregator_device_live_store(struct config_item *item, const char *page, + size_t count) +{ + struct gpio_aggregator *aggr = to_gpio_aggregator(item); + int ret = 0; + bool live; + + ret = kstrtobool(page, &live); + if (ret) + return ret; + + if (!try_module_get(THIS_MODULE)) + return -ENOENT; + + if (live && !aggr->init_via_sysfs) + gpio_aggregator_lockup_configfs(aggr, true); + + scoped_guard(mutex, &aggr->lock) { + if (gpio_aggregator_is_activating(aggr) || + (live == gpio_aggregator_is_active(aggr))) + ret = -EPERM; + else if (live) + ret = gpio_aggregator_activate(aggr); + else + gpio_aggregator_deactivate(aggr); + } + + /* + * Undepend is required only if device disablement (live == 0) + * succeeds or if device enablement (live == 1) fails. + */ + if (live == !!ret && !aggr->init_via_sysfs) + gpio_aggregator_lockup_configfs(aggr, false); + + module_put(THIS_MODULE); + + return ret ?: count; +} +CONFIGFS_ATTR(gpio_aggregator_device_, live); + +static struct configfs_attribute *gpio_aggregator_device_attrs[] = { + &gpio_aggregator_device_attr_dev_name, + &gpio_aggregator_device_attr_live, + NULL +}; + +static void +gpio_aggregator_line_release(struct config_item *item) +{ + struct gpio_aggregator_line *line = to_gpio_aggregator_line(item); + struct gpio_aggregator *aggr = line->parent; + + guard(mutex)(&aggr->lock); + + gpio_aggregator_line_del(aggr, line); + kfree(line->key); + kfree(line->name); + kfree(line); +} + +static struct configfs_item_operations gpio_aggregator_line_item_ops = { + .release = gpio_aggregator_line_release, +}; + +static const struct config_item_type gpio_aggregator_line_type = { + .ct_item_ops = &gpio_aggregator_line_item_ops, + .ct_attrs = gpio_aggregator_line_attrs, + .ct_owner = THIS_MODULE, +}; + +static void gpio_aggregator_device_release(struct config_item *item) +{ + struct gpio_aggregator *aggr = to_gpio_aggregator(item); + + /* + * At this point, aggr is neither active nor activating, + * so calling gpio_aggregator_deactivate() is always unnecessary. + */ + gpio_aggregator_free(aggr); +} + +static struct configfs_item_operations gpio_aggregator_device_item_ops = { + .release = gpio_aggregator_device_release, +}; + +static struct config_group * +gpio_aggregator_device_make_group(struct config_group *group, const char *name) +{ + struct gpio_aggregator *aggr = to_gpio_aggregator(&group->cg_item); + struct gpio_aggregator_line *line; + unsigned int idx; + int ret, nchar; + + ret = sscanf(name, "line%u%n", &idx, &nchar); + if (ret != 1 || nchar != strlen(name)) + return ERR_PTR(-EINVAL); + + if (aggr->init_via_sysfs) + /* + * Aggregators created via legacy sysfs interface are exposed as + * default groups, which means rmdir(2) is prohibited for them. + * For simplicity, and to avoid confusion, we also prohibit + * mkdir(2). + */ + return ERR_PTR(-EPERM); + + guard(mutex)(&aggr->lock); + + if (gpio_aggregator_is_active(aggr)) + return ERR_PTR(-EBUSY); + + list_for_each_entry(line, &aggr->list_head, entry) + if (line->idx == idx) + return ERR_PTR(-EINVAL); + + line = gpio_aggregator_line_alloc(aggr, idx, NULL, -1); + if (IS_ERR(line)) + return ERR_CAST(line); + + config_group_init_type_name(&line->group, name, &gpio_aggregator_line_type); + + gpio_aggregator_line_add(aggr, line); + + return &line->group; +} + +static struct configfs_group_operations gpio_aggregator_device_group_ops = { + .make_group = gpio_aggregator_device_make_group, +}; + +static const struct config_item_type gpio_aggregator_device_type = { + .ct_group_ops = &gpio_aggregator_device_group_ops, + .ct_item_ops = &gpio_aggregator_device_item_ops, + .ct_attrs = gpio_aggregator_device_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_group * +gpio_aggregator_make_group(struct config_group *group, const char *name) +{ + struct gpio_aggregator *aggr; + int ret; + + /* + * "_sysfs" prefix is reserved for auto-generated config group + * for devices create via legacy sysfs interface. + */ + if (strncmp(name, AGGREGATOR_LEGACY_PREFIX, + sizeof(AGGREGATOR_LEGACY_PREFIX) - 1) == 0) + return ERR_PTR(-EINVAL); + + /* arg space is unneeded */ + ret = gpio_aggregator_alloc(&aggr, 0); + if (ret) + return ERR_PTR(ret); + + config_group_init_type_name(&aggr->group, name, &gpio_aggregator_device_type); + dev_sync_probe_init(&aggr->probe_data); + + return &aggr->group; +} + +static struct configfs_group_operations gpio_aggregator_group_ops = { + .make_group = gpio_aggregator_make_group, +}; + +static const struct config_item_type gpio_aggregator_type = { + .ct_group_ops = &gpio_aggregator_group_ops, + .ct_owner = THIS_MODULE, +}; + +static struct configfs_subsystem gpio_aggregator_subsys = { + .su_group = { + .cg_item = { + .ci_namebuf = DRV_NAME, + .ci_type = &gpio_aggregator_type, + }, + }, +}; + +/* + * Sysfs interface + */ +static int gpio_aggregator_parse(struct gpio_aggregator *aggr) +{ + char *args = skip_spaces(aggr->args); + struct gpio_aggregator_line *line; + char name[CONFIGFS_ITEM_NAME_LEN]; + char *key, *offsets, *p; + unsigned int i, n = 0; + int error = 0; + + unsigned long *bitmap __free(bitmap) = + bitmap_alloc(AGGREGATOR_MAX_GPIOS, GFP_KERNEL); + if (!bitmap) + return -ENOMEM; + + args = next_arg(args, &key, &p); + while (*args) { + args = next_arg(args, &offsets, &p); + + p = get_options(offsets, 0, &error); + if (error == 0 || *p) { + /* Named GPIO line */ + scnprintf(name, sizeof(name), "line%u", n); + line = gpio_aggregator_line_alloc(aggr, n, key, -1); + if (IS_ERR(line)) { + error = PTR_ERR(line); + goto err; + } + config_group_init_type_name(&line->group, name, + &gpio_aggregator_line_type); + error = configfs_register_group(&aggr->group, + &line->group); + if (error) + goto err; + scoped_guard(mutex, &aggr->lock) + gpio_aggregator_line_add(aggr, line); + + error = gpio_aggregator_add_gpio(aggr, key, U16_MAX, &n); + if (error) + goto err; + + key = offsets; + continue; + } + + /* GPIO chip + offset(s) */ + error = bitmap_parselist(offsets, bitmap, AGGREGATOR_MAX_GPIOS); + if (error) { + pr_err("Cannot parse %s: %d\n", offsets, error); + goto err; + } + + for_each_set_bit(i, bitmap, AGGREGATOR_MAX_GPIOS) { + scnprintf(name, sizeof(name), "line%u", n); + line = gpio_aggregator_line_alloc(aggr, n, key, i); + if (IS_ERR(line)) { + error = PTR_ERR(line); + goto err; + } + config_group_init_type_name(&line->group, name, + &gpio_aggregator_line_type); + error = configfs_register_group(&aggr->group, + &line->group); + if (error) + goto err; + scoped_guard(mutex, &aggr->lock) + gpio_aggregator_line_add(aggr, line); + + error = gpio_aggregator_add_gpio(aggr, key, i, &n); + if (error) + goto err; + } + + args = next_arg(args, &key, &p); + } + + if (!n) { + pr_err("No GPIOs specified\n"); + error = -EINVAL; + goto err; + } + + return 0; + +err: + gpio_aggregator_free_lines(aggr); + return error; +} + +static ssize_t gpio_aggregator_new_device_store(struct device_driver *driver, + const char *buf, size_t count) +{ + struct gpio_aggregator_pdev_meta meta = { .init_via_sysfs = true }; + char name[CONFIGFS_ITEM_NAME_LEN]; + struct gpio_aggregator *aggr; + struct platform_device *pdev; + int res; + + if (!try_module_get(THIS_MODULE)) + return -ENOENT; + + /* kernfs guarantees string termination, so count + 1 is safe */ + res = gpio_aggregator_alloc(&aggr, count + 1); + if (res) + goto put_module; + + memcpy(aggr->args, buf, count + 1); + + aggr->init_via_sysfs = true; + aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1), + GFP_KERNEL); + if (!aggr->lookups) { + res = -ENOMEM; + goto free_ga; + } + + aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, aggr->id); + if (!aggr->lookups->dev_id) { + res = -ENOMEM; + goto free_table; + } + + scnprintf(name, sizeof(name), "%s.%d", AGGREGATOR_LEGACY_PREFIX, aggr->id); + config_group_init_type_name(&aggr->group, name, &gpio_aggregator_device_type); + + /* + * Since the device created by sysfs might be toggled via configfs + * 'live' attribute later, this initialization is needed. + */ + dev_sync_probe_init(&aggr->probe_data); + + /* Expose to configfs */ + res = configfs_register_group(&gpio_aggregator_subsys.su_group, + &aggr->group); + if (res) + goto free_dev_id; + + res = gpio_aggregator_parse(aggr); + if (res) + goto unregister_group; + + gpiod_add_lookup_table(aggr->lookups); + + pdev = platform_device_register_data(NULL, DRV_NAME, aggr->id, &meta, sizeof(meta)); + if (IS_ERR(pdev)) { + res = PTR_ERR(pdev); + goto remove_table; + } + + aggr->probe_data.pdev = pdev; + module_put(THIS_MODULE); + return count; + +remove_table: + gpiod_remove_lookup_table(aggr->lookups); +unregister_group: + configfs_unregister_group(&aggr->group); +free_dev_id: + kfree(aggr->lookups->dev_id); +free_table: + kfree(aggr->lookups); +free_ga: + gpio_aggregator_free(aggr); +put_module: + module_put(THIS_MODULE); + return res; +} + +static struct driver_attribute driver_attr_gpio_aggregator_new_device = + __ATTR(new_device, 0200, NULL, gpio_aggregator_new_device_store); + +static void gpio_aggregator_destroy(struct gpio_aggregator *aggr) +{ + scoped_guard(mutex, &aggr->lock) { + if (gpio_aggregator_is_activating(aggr) || + gpio_aggregator_is_active(aggr)) + gpio_aggregator_deactivate(aggr); + } + gpio_aggregator_free_lines(aggr); + configfs_unregister_group(&aggr->group); + kfree(aggr); +} + +static ssize_t gpio_aggregator_delete_device_store(struct device_driver *driver, + const char *buf, size_t count) +{ + struct gpio_aggregator *aggr; + unsigned int id; + int error; + + if (!str_has_prefix(buf, DRV_NAME ".")) + return -EINVAL; + + error = kstrtouint(buf + strlen(DRV_NAME "."), 10, &id); + if (error) + return error; + + if (!try_module_get(THIS_MODULE)) + return -ENOENT; + + mutex_lock(&gpio_aggregator_lock); + aggr = idr_find(&gpio_aggregator_idr, id); + /* + * For simplicity, devices created via configfs cannot be deleted + * via sysfs. + */ + if (aggr && aggr->init_via_sysfs) + idr_remove(&gpio_aggregator_idr, id); + else { + mutex_unlock(&gpio_aggregator_lock); + module_put(THIS_MODULE); + return -ENOENT; + } + mutex_unlock(&gpio_aggregator_lock); + + gpio_aggregator_destroy(aggr); + module_put(THIS_MODULE); + return count; +} + +static struct driver_attribute driver_attr_gpio_aggregator_delete_device = + __ATTR(delete_device, 0200, NULL, gpio_aggregator_delete_device_store); + +static struct attribute *gpio_aggregator_attrs[] = { + &driver_attr_gpio_aggregator_new_device.attr, + &driver_attr_gpio_aggregator_delete_device.attr, + NULL +}; +ATTRIBUTE_GROUPS(gpio_aggregator); /* * GPIO Aggregator platform device @@ -589,7 +1288,9 @@ static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev, static int gpio_aggregator_probe(struct platform_device *pdev) { + struct gpio_aggregator_pdev_meta *meta; struct device *dev = &pdev->dev; + bool init_via_sysfs = false; struct gpio_desc **descs; struct gpiochip_fwd *fwd; unsigned long features; @@ -603,10 +1304,28 @@ static int gpio_aggregator_probe(struct platform_device *pdev) if (!descs) return -ENOMEM; + meta = dev_get_platdata(&pdev->dev); + if (meta && meta->init_via_sysfs) + init_via_sysfs = true; + for (i = 0; i < n; i++) { descs[i] = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS); - if (IS_ERR(descs[i])) + if (IS_ERR(descs[i])) { + /* + * Deferred probing is not suitable when the aggregator + * is created via configfs. They should just retry later + * whenever they like. For device creation via sysfs, + * error is propagated without overriding for backward + * compatibility. .prevent_deferred_probe is kept unset + * for other cases. + */ + if (!init_via_sysfs && !dev_of_node(dev) && + descs[i] == ERR_PTR(-EPROBE_DEFER)) { + pr_warn("Deferred probe canceled for creation via configfs.\n"); + return -ENODEV; + } return PTR_ERR(descs[i]); + } } features = (uintptr_t)device_get_match_data(dev); @@ -640,9 +1359,63 @@ static struct platform_driver gpio_aggregator_driver = { }, }; +static int __exit gpio_aggregator_idr_remove(int id, void *p, void *data) +{ + /* + * There should be no aggregator created via configfs, as their + * presence would prevent module unloading. + */ + gpio_aggregator_destroy(p); + return 0; +} + +static void __exit gpio_aggregator_remove_all(void) +{ + /* + * Configfs callbacks acquire gpio_aggregator_lock when accessing + * gpio_aggregator_idr, so to prevent lock inversion deadlock, we + * cannot protect idr_for_each invocation here with + * gpio_aggregator_lock, as gpio_aggregator_idr_remove() accesses + * configfs groups. Fortunately, the new_device/delete_device path + * and the module unload path are mutually exclusive, thanks to an + * explicit try_module_get inside of those driver attr handlers. + * Also, when we reach here, no configfs entries present or being + * created. Therefore, no need to protect with gpio_aggregator_lock + * below. + */ + idr_for_each(&gpio_aggregator_idr, gpio_aggregator_idr_remove, NULL); + idr_destroy(&gpio_aggregator_idr); +} + static int __init gpio_aggregator_init(void) { - return platform_driver_register(&gpio_aggregator_driver); + int ret = 0; + + config_group_init(&gpio_aggregator_subsys.su_group); + mutex_init(&gpio_aggregator_subsys.su_mutex); + ret = configfs_register_subsystem(&gpio_aggregator_subsys); + if (ret) { + pr_err("Failed to register the '%s' configfs subsystem: %d\n", + gpio_aggregator_subsys.su_group.cg_item.ci_namebuf, ret); + mutex_destroy(&gpio_aggregator_subsys.su_mutex); + return ret; + } + + /* + * CAVEAT: This must occur after configfs registration. Otherwise, + * a race condition could arise: driver attribute groups might be + * exposed and accessed by users before configfs registration + * completes. new_device_store() does not expect a partially + * initialized configfs state. + */ + ret = platform_driver_register(&gpio_aggregator_driver); + if (ret) { + pr_err("Failed to register the platform driver: %d\n", ret); + mutex_destroy(&gpio_aggregator_subsys.su_mutex); + configfs_unregister_subsystem(&gpio_aggregator_subsys); + } + + return ret; } module_init(gpio_aggregator_init); @@ -650,6 +1423,7 @@ static void __exit gpio_aggregator_exit(void) { gpio_aggregator_remove_all(); platform_driver_unregister(&gpio_aggregator_driver); + configfs_unregister_subsystem(&gpio_aggregator_subsys); } module_exit(gpio_aggregator_exit); |