diff options
Diffstat (limited to 'drivers')
516 files changed, 28519 insertions, 6308 deletions
diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c index 5a058e565b01..c6cf7068d23c 100644 --- a/drivers/accel/amdxdna/aie2_pci.c +++ b/drivers/accel/amdxdna/aie2_pci.c @@ -512,12 +512,6 @@ static int aie2_init(struct amdxdna_dev *xdna) goto release_fw; } - ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); - if (ret) { - XDNA_ERR(xdna, "Enable PASID failed, ret %d", ret); - goto free_irq; - } - psp_conf.fw_size = fw->size; psp_conf.fw_buf = fw->data; for (i = 0; i < PSP_MAX_REGS; i++) @@ -526,14 +520,14 @@ static int aie2_init(struct amdxdna_dev *xdna) if (!ndev->psp_hdl) { XDNA_ERR(xdna, "failed to create psp"); ret = -ENOMEM; - goto disable_sva; + goto free_irq; } xdna->dev_handle = ndev; ret = aie2_hw_start(xdna); if (ret) { XDNA_ERR(xdna, "start npu failed, ret %d", ret); - goto disable_sva; + goto free_irq; } ret = aie2_mgmt_fw_query(ndev); @@ -584,8 +578,6 @@ async_event_free: aie2_error_async_events_free(ndev); stop_hw: aie2_hw_stop(xdna); -disable_sva: - iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); free_irq: pci_free_irq_vectors(pdev); release_fw: @@ -601,7 +593,6 @@ static void aie2_fini(struct amdxdna_dev *xdna) aie2_hw_stop(xdna); aie2_error_async_events_free(ndev); - iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); pci_free_irq_vectors(pdev); } diff --git a/drivers/acpi/acpi_mrrm.c b/drivers/acpi/acpi_mrrm.c index 26c1a4e6b6ec..47ea3ccc2142 100644 --- a/drivers/acpi/acpi_mrrm.c +++ b/drivers/acpi/acpi_mrrm.c @@ -157,8 +157,10 @@ static __init int add_boot_memory_ranges(void) for (int i = 0; i < mrrm_mem_entry_num; i++) { name = kasprintf(GFP_KERNEL, "range%d", i); - if (!name) + if (!name) { + ret = -ENOMEM; break; + } kobj = kobject_create_and_add(name, pkobj); diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c index 803e3e893825..ff0802ace19b 100644 --- a/drivers/acpi/acpica/utnonansi.c +++ b/drivers/acpi/acpica/utnonansi.c @@ -168,7 +168,7 @@ void acpi_ut_safe_strncpy(char *dest, char *source, acpi_size dest_size) { /* Always terminate destination string */ - memcpy(dest, source, dest_size); + strncpy(dest, source, dest_size); dest[dest_size - 1] = 0; } diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c index ca3484dac5c4..fea11a35eea3 100644 --- a/drivers/acpi/apei/einj-core.c +++ b/drivers/acpi/apei/einj-core.c @@ -766,7 +766,7 @@ static int __init einj_probe(struct faux_device *fdev) rc = einj_get_available_error_type(&available_error_type); if (rc) - return rc; + goto err_put_table; rc = -ENOMEM; einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir()); diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c index 0a725e46d017..53816dfab645 100644 --- a/drivers/acpi/numa/srat.c +++ b/drivers/acpi/numa/srat.c @@ -14,6 +14,7 @@ #include <linux/errno.h> #include <linux/acpi.h> #include <linux/memblock.h> +#include <linux/memory.h> #include <linux/numa.h> #include <linux/nodemask.h> #include <linux/topology.h> @@ -429,13 +430,23 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header, { struct acpi_cedt_cfmws *cfmws; int *fake_pxm = arg; - u64 start, end; + u64 start, end, align; int node; + int err; cfmws = (struct acpi_cedt_cfmws *)header; start = cfmws->base_hpa; end = cfmws->base_hpa + cfmws->window_size; + /* Align memblock size to CFMW regions if possible */ + align = 1UL << __ffs(start | end); + if (align >= SZ_256M) { + err = memory_block_advise_max_size(align); + if (err) + pr_warn("CFMWS: memblock size advise failed (%d)\n", err); + } else + pr_err("CFMWS: [BIOS BUG] base/size alignment violates spec\n"); + /* * The SRAT may have already described NUMA details for all, * or a portion of, this CFMWS HPA range. Extend the memblks @@ -453,7 +464,7 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header, return -EINVAL; } - if (numa_add_memblk(node, start, end) < 0) { + if (numa_add_reserved_memblk(node, start, end) < 0) { /* CXL driver must handle the NUMA_NO_NODE case */ pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n", node, start, end); diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 19469e7f88c2..ed3e69dc785c 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -110,6 +110,57 @@ static void memory_block_release(struct device *dev) kfree(mem); } + +/* Max block size to be set by memory_block_advise_max_size */ +static unsigned long memory_block_advised_size; +static bool memory_block_advised_size_queried; + +/** + * memory_block_advise_max_size() - advise memory hotplug on the max suggested + * block size, usually for alignment. + * @size: suggestion for maximum block size. must be aligned on power of 2. + * + * Early boot software (pre-allocator init) may advise archs on the max block + * size. This value can only decrease after initialization, as the intent is + * to identify the largest supported alignment for all sources. + * + * Use of this value is arch-defined, as is min/max block size. + * + * Return: 0 on success + * -EINVAL if size is 0 or not pow2 aligned + * -EBUSY if value has already been probed + */ +int __init memory_block_advise_max_size(unsigned long size) +{ + if (!size || !is_power_of_2(size)) + return -EINVAL; + + if (memory_block_advised_size_queried) + return -EBUSY; + + if (memory_block_advised_size) + memory_block_advised_size = min(memory_block_advised_size, size); + else + memory_block_advised_size = size; + + return 0; +} + +/** + * memory_block_advised_max_size() - query advised max hotplug block size. + * + * After the first call, the value can never change. Callers looking for the + * actual block size should use memory_block_size_bytes. This interface is + * intended for use by arch-init when initializing the hotplug block size. + * + * Return: advised size in bytes, or 0 if never set. + */ +unsigned long memory_block_advised_max_size(void) +{ + memory_block_advised_size_queried = true; + return memory_block_advised_size; +} + unsigned long __weak memory_block_size_bytes(void) { return MIN_MEMORY_BLOCK_SIZE; diff --git a/drivers/base/node.c b/drivers/base/node.c index 618712071a1e..c19094481630 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -7,6 +7,7 @@ #include <linux/init.h> #include <linux/mm.h> #include <linux/memory.h> +#include <linux/mempolicy.h> #include <linux/vmstat.h> #include <linux/notifier.h> #include <linux/node.h> @@ -214,6 +215,14 @@ void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord, break; } } + + /* When setting CPU access coordinates, update mempolicy */ + if (access == ACCESS_COORDINATE_CPU) { + if (mempolicy_set_node_perf(nid, coord)) { + pr_info("failed to set mempolicy attrs for node %d\n", + nid); + } + } } EXPORT_SYMBOL_GPL(node_set_perf_attrs); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index fda7d8624889..94e6e9b80bf0 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -734,114 +734,19 @@ static void read_from_bdev_async(struct zram *zram, struct page *page, submit_bio(bio); } -#define PAGE_WB_SIG "page_index=" - -#define PAGE_WRITEBACK 0 -#define HUGE_WRITEBACK (1<<0) -#define IDLE_WRITEBACK (1<<1) -#define INCOMPRESSIBLE_WRITEBACK (1<<2) - -static int scan_slots_for_writeback(struct zram *zram, u32 mode, - unsigned long nr_pages, - unsigned long index, - struct zram_pp_ctl *ctl) +static int zram_writeback_slots(struct zram *zram, struct zram_pp_ctl *ctl) { - for (; nr_pages != 0; index++, nr_pages--) { - bool ok = true; - - zram_slot_lock(zram, index); - if (!zram_allocated(zram, index)) - goto next; - - if (zram_test_flag(zram, index, ZRAM_WB) || - zram_test_flag(zram, index, ZRAM_SAME)) - goto next; - - if (mode & IDLE_WRITEBACK && - !zram_test_flag(zram, index, ZRAM_IDLE)) - goto next; - if (mode & HUGE_WRITEBACK && - !zram_test_flag(zram, index, ZRAM_HUGE)) - goto next; - if (mode & INCOMPRESSIBLE_WRITEBACK && - !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) - goto next; - - ok = place_pp_slot(zram, ctl, index); -next: - zram_slot_unlock(zram, index); - if (!ok) - break; - } - - return 0; -} - -static ssize_t writeback_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) -{ - struct zram *zram = dev_to_zram(dev); - unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; - struct zram_pp_ctl *ctl = NULL; + unsigned long blk_idx = 0; + struct page *page = NULL; struct zram_pp_slot *pps; - unsigned long index = 0; - struct bio bio; struct bio_vec bio_vec; - struct page *page = NULL; - ssize_t ret = len; - int mode, err; - unsigned long blk_idx = 0; - - if (sysfs_streq(buf, "idle")) - mode = IDLE_WRITEBACK; - else if (sysfs_streq(buf, "huge")) - mode = HUGE_WRITEBACK; - else if (sysfs_streq(buf, "huge_idle")) - mode = IDLE_WRITEBACK | HUGE_WRITEBACK; - else if (sysfs_streq(buf, "incompressible")) - mode = INCOMPRESSIBLE_WRITEBACK; - else { - if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1)) - return -EINVAL; - - if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) || - index >= nr_pages) - return -EINVAL; - - nr_pages = 1; - mode = PAGE_WRITEBACK; - } - - down_read(&zram->init_lock); - if (!init_done(zram)) { - ret = -EINVAL; - goto release_init_lock; - } - - /* Do not permit concurrent post-processing actions. */ - if (atomic_xchg(&zram->pp_in_progress, 1)) { - up_read(&zram->init_lock); - return -EAGAIN; - } - - if (!zram->backing_dev) { - ret = -ENODEV; - goto release_init_lock; - } + struct bio bio; + int ret = 0, err; + u32 index; page = alloc_page(GFP_KERNEL); - if (!page) { - ret = -ENOMEM; - goto release_init_lock; - } - - ctl = init_pp_ctl(); - if (!ctl) { - ret = -ENOMEM; - goto release_init_lock; - } - - scan_slots_for_writeback(zram, mode, nr_pages, index, ctl); + if (!page) + return -ENOMEM; while ((pps = select_pp_slot(ctl))) { spin_lock(&zram->wb_limit_lock); @@ -929,10 +834,215 @@ next: if (blk_idx) free_block_bdev(zram, blk_idx); - -release_init_lock: if (page) __free_page(page); + + return ret; +} + +#define PAGE_WRITEBACK 0 +#define HUGE_WRITEBACK (1 << 0) +#define IDLE_WRITEBACK (1 << 1) +#define INCOMPRESSIBLE_WRITEBACK (1 << 2) + +static int parse_page_index(char *val, unsigned long nr_pages, + unsigned long *lo, unsigned long *hi) +{ + int ret; + + ret = kstrtoul(val, 10, lo); + if (ret) + return ret; + if (*lo >= nr_pages) + return -ERANGE; + *hi = *lo + 1; + return 0; +} + +static int parse_page_indexes(char *val, unsigned long nr_pages, + unsigned long *lo, unsigned long *hi) +{ + char *delim; + int ret; + + delim = strchr(val, '-'); + if (!delim) + return -EINVAL; + + *delim = 0x00; + ret = kstrtoul(val, 10, lo); + if (ret) + return ret; + if (*lo >= nr_pages) + return -ERANGE; + + ret = kstrtoul(delim + 1, 10, hi); + if (ret) + return ret; + if (*hi >= nr_pages || *lo > *hi) + return -ERANGE; + *hi += 1; + return 0; +} + +static int parse_mode(char *val, u32 *mode) +{ + *mode = 0; + + if (!strcmp(val, "idle")) + *mode = IDLE_WRITEBACK; + if (!strcmp(val, "huge")) + *mode = HUGE_WRITEBACK; + if (!strcmp(val, "huge_idle")) + *mode = IDLE_WRITEBACK | HUGE_WRITEBACK; + if (!strcmp(val, "incompressible")) + *mode = INCOMPRESSIBLE_WRITEBACK; + + if (*mode == 0) + return -EINVAL; + return 0; +} + +static int scan_slots_for_writeback(struct zram *zram, u32 mode, + unsigned long lo, unsigned long hi, + struct zram_pp_ctl *ctl) +{ + u32 index = lo; + + while (index < hi) { + bool ok = true; + + zram_slot_lock(zram, index); + if (!zram_allocated(zram, index)) + goto next; + + if (zram_test_flag(zram, index, ZRAM_WB) || + zram_test_flag(zram, index, ZRAM_SAME)) + goto next; + + if (mode & IDLE_WRITEBACK && + !zram_test_flag(zram, index, ZRAM_IDLE)) + goto next; + if (mode & HUGE_WRITEBACK && + !zram_test_flag(zram, index, ZRAM_HUGE)) + goto next; + if (mode & INCOMPRESSIBLE_WRITEBACK && + !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) + goto next; + + ok = place_pp_slot(zram, ctl, index); +next: + zram_slot_unlock(zram, index); + if (!ok) + break; + index++; + } + + return 0; +} + +static ssize_t writeback_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct zram *zram = dev_to_zram(dev); + u64 nr_pages = zram->disksize >> PAGE_SHIFT; + unsigned long lo = 0, hi = nr_pages; + struct zram_pp_ctl *ctl = NULL; + char *args, *param, *val; + ssize_t ret = len; + int err, mode = 0; + + down_read(&zram->init_lock); + if (!init_done(zram)) { + up_read(&zram->init_lock); + return -EINVAL; + } + + /* Do not permit concurrent post-processing actions. */ + if (atomic_xchg(&zram->pp_in_progress, 1)) { + up_read(&zram->init_lock); + return -EAGAIN; + } + + if (!zram->backing_dev) { + ret = -ENODEV; + goto release_init_lock; + } + + ctl = init_pp_ctl(); + if (!ctl) { + ret = -ENOMEM; + goto release_init_lock; + } + + args = skip_spaces(buf); + while (*args) { + args = next_arg(args, ¶m, &val); + + /* + * Workaround to support the old writeback interface. + * + * The old writeback interface has a minor inconsistency and + * requires key=value only for page_index parameter, while the + * writeback mode is a valueless parameter. + * + * This is not the case anymore and now all parameters are + * required to have values, however, we need to support the + * legacy writeback interface format so we check if we can + * recognize a valueless parameter as the (legacy) writeback + * mode. + */ + if (!val || !*val) { + err = parse_mode(param, &mode); + if (err) { + ret = err; + goto release_init_lock; + } + + scan_slots_for_writeback(zram, mode, lo, hi, ctl); + break; + } + + if (!strcmp(param, "type")) { + err = parse_mode(val, &mode); + if (err) { + ret = err; + goto release_init_lock; + } + + scan_slots_for_writeback(zram, mode, lo, hi, ctl); + break; + } + + if (!strcmp(param, "page_index")) { + err = parse_page_index(val, nr_pages, &lo, &hi); + if (err) { + ret = err; + goto release_init_lock; + } + + scan_slots_for_writeback(zram, mode, lo, hi, ctl); + continue; + } + + if (!strcmp(param, "page_indexes")) { + err = parse_page_indexes(val, nr_pages, &lo, &hi); + if (err) { + ret = err; + goto release_init_lock; + } + + scan_slots_for_writeback(zram, mode, lo, hi, ctl); + continue; + } + } + + err = zram_writeback_slots(zram, ctl); + if (err) + ret = err; + +release_init_lock: release_pp_ctl(zram, ctl); atomic_set(&zram->pp_in_progress, 0); up_read(&zram->init_lock); @@ -1694,7 +1804,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page, */ handle = zs_malloc(zram->mem_pool, PAGE_SIZE, GFP_NOIO | __GFP_NOWARN | - __GFP_HIGHMEM | __GFP_MOVABLE); + __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page)); if (IS_ERR_VALUE(handle)) return PTR_ERR((void *)handle); @@ -1761,7 +1871,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) handle = zs_malloc(zram->mem_pool, comp_len, GFP_NOIO | __GFP_NOWARN | - __GFP_HIGHMEM | __GFP_MOVABLE); + __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page)); if (IS_ERR_VALUE(handle)) { zcomp_stream_put(zstrm); return PTR_ERR((void *)handle); @@ -1981,10 +2091,15 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, * We are holding per-CPU stream mutex and entry lock so better * avoid direct reclaim. Allocation error is not fatal since * we still have the old object in the mem_pool. + * + * XXX: technically, the node we really want here is the node that holds + * the original compressed data. But that would require us to modify + * zsmalloc API to return this information. For now, we will make do with + * the node of the page allocated for recompression. */ handle_new = zs_malloc(zram->mem_pool, comp_len_new, GFP_NOIO | __GFP_NOWARN | - __GFP_HIGHMEM | __GFP_MOVABLE); + __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page)); if (IS_ERR_VALUE(handle_new)) { zcomp_stream_put(zstrm); return PTR_ERR((void *)handle_new); diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c index ee29162da4ee..91ef99c42344 100644 --- a/drivers/bus/brcmstb_gisb.c +++ b/drivers/bus/brcmstb_gisb.c @@ -395,10 +395,7 @@ static struct attribute *gisb_arb_sysfs_attrs[] = { &dev_attr_gisb_arb_timeout.attr, NULL, }; - -static struct attribute_group gisb_arb_sysfs_attr_group = { - .attrs = gisb_arb_sysfs_attrs, -}; +ATTRIBUTE_GROUPS(gisb_arb_sysfs); static const struct of_device_id brcmstb_gisb_arb_of_match[] = { { .compatible = "brcm,gisb-arb", .data = gisb_offsets_bcm7445 }, @@ -490,10 +487,6 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev) } } - err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group); - if (err) - return err; - platform_set_drvdata(pdev, gdev); list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list); @@ -550,6 +543,7 @@ static struct platform_driver brcmstb_gisb_arb_driver = { .name = "brcm-gisb-arb", .of_match_table = brcmstb_gisb_arb_of_match, .pm = &brcmstb_gisb_arb_pm_ops, + .dev_groups = gisb_arb_sysfs_groups, }, }; diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c index 52053f7c6d9a..c63a7e688db6 100644 --- a/drivers/bus/fsl-mc/dprc-driver.c +++ b/drivers/bus/fsl-mc/dprc-driver.c @@ -806,8 +806,6 @@ int dprc_cleanup(struct fsl_mc_device *mc_dev) dev_set_msi_domain(&mc_dev->dev, NULL); } - fsl_mc_cleanup_all_resource_pools(mc_dev); - /* if this step fails we cannot go further with cleanup as there is no way of * communicating with the firmware */ diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c index dd1b5c0fb7e2..38d40c09b719 100644 --- a/drivers/bus/fsl-mc/dprc.c +++ b/drivers/bus/fsl-mc/dprc.c @@ -489,7 +489,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io, cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr); cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num); cmd_params->obj_id = cpu_to_le32(obj_id); - strscpy_pad(cmd_params->obj_type, obj_type, 16); + strscpy(cmd_params->obj_type, obj_type); /* send command to mc*/ return mc_send_command(mc_io, &cmd); @@ -561,7 +561,7 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io, cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params; cmd_params->obj_id = cpu_to_le32(obj_id); cmd_params->region_index = region_index; - strscpy_pad(cmd_params->obj_type, obj_type, 16); + strscpy(cmd_params->obj_type, obj_type); /* send command to mc*/ err = mc_send_command(mc_io, &cmd); diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c index 6c3beb82dd1b..d2ea59471323 100644 --- a/drivers/bus/fsl-mc/fsl-mc-allocator.c +++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c @@ -555,27 +555,6 @@ void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev) } } -static void fsl_mc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev, - enum fsl_mc_pool_type pool_type) -{ - struct fsl_mc_resource *resource; - struct fsl_mc_resource *next; - struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); - struct fsl_mc_resource_pool *res_pool = - &mc_bus->resource_pools[pool_type]; - - list_for_each_entry_safe(resource, next, &res_pool->free_list, node) - devm_kfree(&mc_bus_dev->dev, resource); -} - -void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev) -{ - int pool_type; - - for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) - fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type); -} - /* * fsl_mc_allocator_probe - callback invoked when an allocatable device is * being added to the system diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index a8be8cf246fb..7671bd158545 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -139,9 +139,9 @@ static int fsl_mc_bus_uevent(const struct device *dev, struct kobj_uevent_env *e static int fsl_mc_dma_configure(struct device *dev) { + const struct device_driver *drv = READ_ONCE(dev->driver); struct device *dma_dev = dev; struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); - struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); u32 input_id = mc_dev->icid; int ret; @@ -153,8 +153,8 @@ static int fsl_mc_dma_configure(struct device *dev) else ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id); - /* @mc_drv may not be valid when we're called from the IOMMU layer */ - if (!ret && dev->driver && !mc_drv->driver_managed_dma) { + /* @drv may not be valid when we're called from the IOMMU layer */ + if (!ret && drv && !to_fsl_mc_driver(drv)->driver_managed_dma) { ret = iommu_device_use_default_domain(dev); if (ret) arch_teardown_dma_ops(dev); @@ -906,8 +906,10 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc, error_cleanup_dev: kfree(mc_dev->regions); - kfree(mc_bus); - kfree(mc_dev); + if (mc_bus) + kfree(mc_bus); + else + kfree(mc_dev); return error; } diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h index e1b7ec3ed1a7..beed4c53533d 100644 --- a/drivers/bus/fsl-mc/fsl-mc-private.h +++ b/drivers/bus/fsl-mc/fsl-mc-private.h @@ -629,8 +629,6 @@ int __init fsl_mc_allocator_driver_init(void); void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev); -void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev); - int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus, enum fsl_mc_pool_type pool_type, struct fsl_mc_resource diff --git a/drivers/bus/fsl-mc/fsl-mc-uapi.c b/drivers/bus/fsl-mc/fsl-mc-uapi.c index 9c4c1395fcdb..823969e4159c 100644 --- a/drivers/bus/fsl-mc/fsl-mc-uapi.c +++ b/drivers/bus/fsl-mc/fsl-mc-uapi.c @@ -48,6 +48,7 @@ enum fsl_mc_cmd_index { DPRC_GET_POOL, DPRC_GET_POOL_COUNT, DPRC_GET_CONNECTION, + DPRC_GET_MEM, DPCI_GET_LINK_STATE, DPCI_GET_PEER_ATTR, DPAIOP_GET_SL_VERSION, @@ -194,6 +195,12 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = { .token = true, .size = 32, }, + [DPRC_GET_MEM] = { + .cmdid_value = 0x16D0, + .cmdid_mask = 0xFFF0, + .token = true, + .size = 12, + }, [DPCI_GET_LINK_STATE] = { .cmdid_value = 0x0E10, @@ -275,13 +282,13 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = { .size = 8, }, [DPSW_GET_TAILDROP] = { - .cmdid_value = 0x0A80, + .cmdid_value = 0x0A90, .cmdid_mask = 0xFFF0, .token = true, .size = 14, }, [DPSW_SET_TAILDROP] = { - .cmdid_value = 0x0A90, + .cmdid_value = 0x0A80, .cmdid_mask = 0xFFF0, .token = true, .size = 24, diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c index a0ad7866cbfc..cd8754763f40 100644 --- a/drivers/bus/fsl-mc/mc-io.c +++ b/drivers/bus/fsl-mc/mc-io.c @@ -214,12 +214,19 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev, if (error < 0) goto error_cleanup_resource; - dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev, - &dpmcp_dev->dev, - DL_FLAG_AUTOREMOVE_CONSUMER); - if (!dpmcp_dev->consumer_link) { - error = -EINVAL; - goto error_cleanup_mc_io; + /* If the DPRC device itself tries to allocate a portal (usually for + * UAPI interaction), don't add a device link between them since the + * DPMCP device is an actual child device of the DPRC and a reverse + * dependency is not allowed. + */ + if (mc_dev != mc_bus_dev) { + dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev, + &dpmcp_dev->dev, + DL_FLAG_AUTOREMOVE_CONSUMER); + if (!dpmcp_dev->consumer_link) { + error = -EINVAL; + goto error_cleanup_mc_io; + } } *new_mc_io = mc_io; diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c index f2052cd0a051..b22c59d57c8f 100644 --- a/drivers/bus/fsl-mc/mc-sys.c +++ b/drivers/bus/fsl-mc/mc-sys.c @@ -19,7 +19,7 @@ /* * Timeout in milliseconds to wait for the completion of an MC command */ -#define MC_CMD_COMPLETION_TIMEOUT_MS 500 +#define MC_CMD_COMPLETION_TIMEOUT_MS 15000 /* * usleep_range() min and max values used to throttle down polling diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index f67b927ae4ca..9f624e5da991 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -677,51 +677,6 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata) return 0; } -/* Interconnect instances to probe before l4_per instances */ -static struct resource early_bus_ranges[] = { - /* am3/4 l4_wkup */ - { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, }, - /* omap4/5 and dra7 l4_cfg */ - { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, }, - /* omap4 l4_wkup */ - { .start = 0x4a300000, .end = 0x4a300000 + 0x30000, }, - /* omap5 and dra7 l4_wkup without dra7 dcan segment */ - { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, }, -}; - -static atomic_t sysc_defer = ATOMIC_INIT(10); - -/** - * sysc_defer_non_critical - defer non_critical interconnect probing - * @ddata: device driver data - * - * We want to probe l4_cfg and l4_wkup interconnect instances before any - * l4_per instances as l4_per instances depend on resources on l4_cfg and - * l4_wkup interconnects. - */ -static int sysc_defer_non_critical(struct sysc *ddata) -{ - struct resource *res; - int i; - - if (!atomic_read(&sysc_defer)) - return 0; - - for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) { - res = &early_bus_ranges[i]; - if (ddata->module_pa >= res->start && - ddata->module_pa <= res->end) { - atomic_set(&sysc_defer, 0); - - return 0; - } - } - - atomic_dec_if_positive(&sysc_defer); - - return -EPROBE_DEFER; -} - static struct device_node *stdout_path; static void sysc_init_stdout_path(struct sysc *ddata) @@ -947,10 +902,6 @@ static int sysc_map_and_check_registers(struct sysc *ddata) if (error) return error; - error = sysc_defer_non_critical(ddata); - if (error) - return error; - sysc_check_children(ddata); if (!of_property_present(np, "reg")) @@ -2036,6 +1987,21 @@ static void sysc_module_disable_quirk_pruss(struct sysc *ddata) sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); } +static void sysc_module_enable_quirk_pruss(struct sysc *ddata) +{ + u32 reg; + + reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); + + /* + * Clearing the SYSC_PRUSS_STANDBY_INIT bit - Updates OCP master + * port configuration to enable memory access outside of the + * PRU-ICSS subsystem. + */ + reg &= (~SYSC_PRUSS_STANDBY_INIT); + sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); +} + static void sysc_init_module_quirks(struct sysc *ddata) { if (ddata->legacy_mode || !ddata->name) @@ -2088,8 +2054,10 @@ static void sysc_init_module_quirks(struct sysc *ddata) ddata->module_disable_quirk = sysc_reset_done_quirk_wdt; } - if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS) + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS) { + ddata->module_enable_quirk = sysc_module_enable_quirk_pruss; ddata->module_disable_quirk = sysc_module_disable_quirk_pruss; + } } static int sysc_clockdomain_init(struct sysc *ddata) diff --git a/drivers/cache/sifive_ccache.c b/drivers/cache/sifive_ccache.c index 6874b72ec59d..e1a283805ea7 100644 --- a/drivers/cache/sifive_ccache.c +++ b/drivers/cache/sifive_ccache.c @@ -118,6 +118,8 @@ static void ccache_config_read(void) } static const struct of_device_id sifive_ccache_ids[] = { + { .compatible = "eswin,eic7700-l3-cache", + .data = (void *)(QUIRK_NONSTANDARD_CACHE_OPS) }, { .compatible = "sifive,fu540-c000-ccache" }, { .compatible = "sifive,fu740-c000-ccache" }, { .compatible = "starfive,jh7100-ccache", diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 713573b6c86c..19c1ed280fd7 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -517,6 +517,7 @@ source "drivers/clk/samsung/Kconfig" source "drivers/clk/sifive/Kconfig" source "drivers/clk/socfpga/Kconfig" source "drivers/clk/sophgo/Kconfig" +source "drivers/clk/spacemit/Kconfig" source "drivers/clk/sprd/Kconfig" source "drivers/clk/starfive/Kconfig" source "drivers/clk/sunxi/Kconfig" diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index bf4bd45adc3a..42867cd37c33 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -145,6 +145,7 @@ obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/ obj-$(CONFIG_CLK_SIFIVE) += sifive/ obj-y += socfpga/ obj-y += sophgo/ +obj-y += spacemit/ obj-$(CONFIG_PLAT_SPEAR) += spear/ obj-y += sprd/ obj-$(CONFIG_ARCH_STI) += st/ diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c index 2b0ea882f1e4..0171e6b2bfca 100644 --- a/drivers/clk/bcm/clk-kona.c +++ b/drivers/clk/bcm/clk-kona.c @@ -53,24 +53,6 @@ static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div) return (u64)reg_div + ((u64)1 << div->u.s.frac_width); } -/* - * Build a scaled divider value as close as possible to the - * given whole part (div_value) and fractional part (expressed - * in billionths). - */ -u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths) -{ - u64 combined; - - BUG_ON(!div_value); - BUG_ON(billionths >= BILLION); - - combined = (u64)div_value * BILLION + billionths; - combined <<= div->u.s.frac_width; - - return DIV_ROUND_CLOSEST_ULL(combined, BILLION); -} - /* The scaled minimum divisor representable by a divider */ static inline u64 scaled_div_min(struct bcm_clk_div *div) diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h index e09655024ac2..348a3454ce40 100644 --- a/drivers/clk/bcm/clk-kona.h +++ b/drivers/clk/bcm/clk-kona.h @@ -492,8 +492,6 @@ extern struct clk_ops kona_peri_clk_ops; /* Externally visible functions */ extern u64 scaled_div_max(struct bcm_clk_div *div); -extern u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, - u32 billionths); extern void __init kona_dt_ccu_setup(struct ccu_data *ccu, struct device_node *node); diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c index 0e1fe3759530..8e4fde03ed23 100644 --- a/drivers/clk/bcm/clk-raspberrypi.c +++ b/drivers/clk/bcm/clk-raspberrypi.c @@ -286,6 +286,8 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi, init.name = devm_kasprintf(rpi->dev, GFP_KERNEL, "fw-clk-%s", rpi_firmware_clk_names[id]); + if (!init.name) + return ERR_PTR(-ENOMEM); init.ops = &raspberrypi_firmware_clk_ops; init.flags = CLK_GET_RATE_NOCACHE; @@ -480,4 +482,3 @@ module_platform_driver(raspberrypi_clk_driver); MODULE_AUTHOR("Nicolas Saenz Julienne <nsaenzjulienne@suse.de>"); MODULE_DESCRIPTION("Raspberry Pi firmware clock driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:raspberrypi-clk"); diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c index 6807a2efa93b..bfb6bbdc036c 100644 --- a/drivers/clk/davinci/pll.c +++ b/drivers/clk/davinci/pll.c @@ -763,13 +763,14 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node, return PTR_ERR(clk); } - child = of_get_child_by_name(node, "pllout"); - if (of_device_is_available(child)) + child = of_get_available_child_by_name(node, "pllout"); + if (child) { of_clk_add_provider(child, of_clk_src_simple_get, clk); - of_node_put(child); + of_node_put(child); + } - child = of_get_child_by_name(node, "sysclk"); - if (of_device_is_available(child)) { + child = of_get_available_child_by_name(node, "sysclk"); + if (child) { struct clk_onecell_data *clk_data; struct clk **clks; int n_clks = max_sysclk_id + 1; @@ -803,11 +804,11 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node, clks[(*div_info)->id] = clk; } of_clk_add_provider(child, of_clk_src_onecell_get, clk_data); + of_node_put(child); } - of_node_put(child); - child = of_get_child_by_name(node, "auxclk"); - if (of_device_is_available(child)) { + child = of_get_available_child_by_name(node, "auxclk"); + if (child) { char child_name[MAX_NAME_SIZE]; snprintf(child_name, MAX_NAME_SIZE, "%s_auxclk", info->name); @@ -818,11 +819,12 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node, child_name, PTR_ERR(clk)); else of_clk_add_provider(child, of_clk_src_simple_get, clk); + + of_node_put(child); } - of_node_put(child); - child = of_get_child_by_name(node, "obsclk"); - if (of_device_is_available(child)) { + child = of_get_available_child_by_name(node, "obsclk"); + if (child) { if (obsclk_info) clk = davinci_pll_obsclk_register(dev, obsclk_info, base); else @@ -833,8 +835,8 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node, PTR_ERR(clk)); else of_clk_add_provider(child, of_clk_src_simple_get, clk); + of_node_put(child); } - of_node_put(child); return 0; } diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig index be2e3a5f8336..ff003dc5ab20 100644 --- a/drivers/clk/meson/Kconfig +++ b/drivers/clk/meson/Kconfig @@ -55,7 +55,7 @@ config COMMON_CLK_MESON_CPU_DYNDIV config COMMON_CLK_MESON8B bool "Meson8 SoC Clock controller support" depends on ARM - default y + default ARCH_MESON select COMMON_CLK_MESON_REGMAP select COMMON_CLK_MESON_CLKC_UTILS select COMMON_CLK_MESON_MPLL @@ -70,7 +70,7 @@ config COMMON_CLK_MESON8B config COMMON_CLK_GXBB tristate "GXBB and GXL SoC clock controllers support" depends on ARM64 - default y + default ARCH_MESON select COMMON_CLK_MESON_REGMAP select COMMON_CLK_MESON_DUALDIV select COMMON_CLK_MESON_VID_PLL_DIV @@ -86,7 +86,7 @@ config COMMON_CLK_GXBB config COMMON_CLK_AXG tristate "AXG SoC clock controllers support" depends on ARM64 - default y + default ARCH_MESON select COMMON_CLK_MESON_REGMAP select COMMON_CLK_MESON_DUALDIV select COMMON_CLK_MESON_MPLL @@ -136,7 +136,7 @@ config COMMON_CLK_A1_PERIPHERALS config COMMON_CLK_C3_PLL tristate "Amlogic C3 PLL clock controller" depends on ARM64 - default y + default ARCH_MESON select COMMON_CLK_MESON_REGMAP select COMMON_CLK_MESON_PLL select COMMON_CLK_MESON_CLKC_UTILS @@ -149,7 +149,7 @@ config COMMON_CLK_C3_PLL config COMMON_CLK_C3_PERIPHERALS tristate "Amlogic C3 peripherals clock controller" depends on ARM64 - default y + default ARCH_MESON select COMMON_CLK_MESON_REGMAP select COMMON_CLK_MESON_DUALDIV select COMMON_CLK_MESON_CLKC_UTILS @@ -163,7 +163,7 @@ config COMMON_CLK_C3_PERIPHERALS config COMMON_CLK_G12A tristate "G12 and SM1 SoC clock controllers support" depends on ARM64 - default y + default ARCH_MESON select COMMON_CLK_MESON_REGMAP select COMMON_CLK_MESON_DUALDIV select COMMON_CLK_MESON_MPLL @@ -181,7 +181,7 @@ config COMMON_CLK_G12A config COMMON_CLK_S4_PLL tristate "S4 SoC PLL clock controllers support" depends on ARM64 - default y + default ARCH_MESON select COMMON_CLK_MESON_CLKC_UTILS select COMMON_CLK_MESON_MPLL select COMMON_CLK_MESON_PLL @@ -194,7 +194,7 @@ config COMMON_CLK_S4_PLL config COMMON_CLK_S4_PERIPHERALS tristate "S4 SoC peripherals clock controllers support" depends on ARM64 - default y + default ARCH_MESON select COMMON_CLK_MESON_CLKC_UTILS select COMMON_CLK_MESON_REGMAP select COMMON_CLK_MESON_DUALDIV diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index ceabebb1863d..d9e546e006d7 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c @@ -4093,6 +4093,7 @@ static const struct clk_parent_data spicc_sclk_parent_data[] = { { .hw = &g12a_clk81.hw }, { .hw = &g12a_fclk_div4.hw }, { .hw = &g12a_fclk_div3.hw }, + { .hw = &g12a_fclk_div2.hw }, { .hw = &g12a_fclk_div5.hw }, { .hw = &g12a_fclk_div7.hw }, }; diff --git a/drivers/clk/qcom/apcs-sdx55.c b/drivers/clk/qcom/apcs-sdx55.c index 76ece6c4a969..3ba01622d8f0 100644 --- a/drivers/clk/qcom/apcs-sdx55.c +++ b/drivers/clk/qcom/apcs-sdx55.c @@ -111,7 +111,11 @@ static int qcom_apcs_sdx55_clk_probe(struct platform_device *pdev) * driver, there seems to be no better place to do this. So do it here! */ cpu_dev = get_cpu_device(0); - dev_pm_domain_attach(cpu_dev, true); + ret = dev_pm_domain_attach(cpu_dev, true); + if (ret) { + dev_err_probe(dev, ret, "can't get PM domain: %d\n", ret); + goto err; + } return 0; diff --git a/drivers/clk/qcom/camcc-sa8775p.c b/drivers/clk/qcom/camcc-sa8775p.c index 11bd2e234811..50e5a131261b 100644 --- a/drivers/clk/qcom/camcc-sa8775p.c +++ b/drivers/clk/qcom/camcc-sa8775p.c @@ -10,7 +10,7 @@ #include <linux/pm_runtime.h> #include <linux/regmap.h> -#include <dt-bindings/clock/qcom,sa8775p-camcc.h> +#include <dt-bindings/clock/qcom,qcs8300-camcc.h> #include "clk-alpha-pll.h" #include "clk-branch.h" @@ -1681,6 +1681,24 @@ static struct clk_branch cam_cc_sm_obs_clk = { }, }; +static struct clk_branch cam_cc_titan_top_accu_shift_clk = { + .halt_reg = 0x131f0, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x131f0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_titan_top_accu_shift_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct gdsc cam_cc_titan_top_gdsc = { .gdscr = 0x131bc, .en_rest_wait_val = 0x2, @@ -1775,6 +1793,7 @@ static struct clk_regmap *cam_cc_sa8775p_clocks[] = { [CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr, [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr, [CAM_CC_SM_OBS_CLK] = &cam_cc_sm_obs_clk.clkr, + [CAM_CC_TITAN_TOP_ACCU_SHIFT_CLK] = NULL, [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr, [CAM_CC_QDSS_DEBUG_XO_CLK] = &cam_cc_qdss_debug_xo_clk.clkr, }; @@ -1811,6 +1830,7 @@ static const struct qcom_cc_desc cam_cc_sa8775p_desc = { }; static const struct of_device_id cam_cc_sa8775p_match_table[] = { + { .compatible = "qcom,qcs8300-camcc" }, { .compatible = "qcom,sa8775p-camcc" }, { } }; @@ -1841,10 +1861,83 @@ static int cam_cc_sa8775p_probe(struct platform_device *pdev) clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config); clk_lucid_evo_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config); - /* Keep some clocks always enabled */ - qcom_branch_set_clk_en(regmap, 0x13194); /* CAM_CC_CAMNOC_XO_CLK */ - qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_GDSC_CLK */ - qcom_branch_set_clk_en(regmap, 0x13208); /* CAM_CC_SLEEP_CLK */ + if (device_is_compatible(&pdev->dev, "qcom,qcs8300-camcc")) { + cam_cc_camnoc_axi_clk_src.cmd_rcgr = 0x13154; + cam_cc_camnoc_axi_clk.halt_reg = 0x1316c; + cam_cc_camnoc_axi_clk.clkr.enable_reg = 0x1316c; + cam_cc_camnoc_dcd_xo_clk.halt_reg = 0x13174; + cam_cc_camnoc_dcd_xo_clk.clkr.enable_reg = 0x13174; + + cam_cc_csi0phytimer_clk_src.cmd_rcgr = 0x15054; + cam_cc_csi1phytimer_clk_src.cmd_rcgr = 0x15078; + cam_cc_csi2phytimer_clk_src.cmd_rcgr = 0x15098; + cam_cc_csid_clk_src.cmd_rcgr = 0x13134; + + cam_cc_mclk0_clk_src.cmd_rcgr = 0x15000; + cam_cc_mclk1_clk_src.cmd_rcgr = 0x1501c; + cam_cc_mclk2_clk_src.cmd_rcgr = 0x15038; + + cam_cc_fast_ahb_clk_src.cmd_rcgr = 0x13104; + cam_cc_slow_ahb_clk_src.cmd_rcgr = 0x1311c; + cam_cc_xo_clk_src.cmd_rcgr = 0x131b8; + cam_cc_sleep_clk_src.cmd_rcgr = 0x131d4; + + cam_cc_core_ahb_clk.halt_reg = 0x131b4; + cam_cc_core_ahb_clk.clkr.enable_reg = 0x131b4; + + cam_cc_cpas_ahb_clk.halt_reg = 0x130f4; + cam_cc_cpas_ahb_clk.clkr.enable_reg = 0x130f4; + cam_cc_cpas_fast_ahb_clk.halt_reg = 0x130fc; + cam_cc_cpas_fast_ahb_clk.clkr.enable_reg = 0x130fc; + + cam_cc_csi0phytimer_clk.halt_reg = 0x1506c; + cam_cc_csi0phytimer_clk.clkr.enable_reg = 0x1506c; + cam_cc_csi1phytimer_clk.halt_reg = 0x15090; + cam_cc_csi1phytimer_clk.clkr.enable_reg = 0x15090; + cam_cc_csi2phytimer_clk.halt_reg = 0x150b0; + cam_cc_csi2phytimer_clk.clkr.enable_reg = 0x150b0; + cam_cc_csid_clk.halt_reg = 0x1314c; + cam_cc_csid_clk.clkr.enable_reg = 0x1314c; + cam_cc_csid_csiphy_rx_clk.halt_reg = 0x15074; + cam_cc_csid_csiphy_rx_clk.clkr.enable_reg = 0x15074; + cam_cc_csiphy0_clk.halt_reg = 0x15070; + cam_cc_csiphy0_clk.clkr.enable_reg = 0x15070; + cam_cc_csiphy1_clk.halt_reg = 0x15094; + cam_cc_csiphy1_clk.clkr.enable_reg = 0x15094; + cam_cc_csiphy2_clk.halt_reg = 0x150b4; + cam_cc_csiphy2_clk.clkr.enable_reg = 0x150b4; + + cam_cc_mclk0_clk.halt_reg = 0x15018; + cam_cc_mclk0_clk.clkr.enable_reg = 0x15018; + cam_cc_mclk1_clk.halt_reg = 0x15034; + cam_cc_mclk1_clk.clkr.enable_reg = 0x15034; + cam_cc_mclk2_clk.halt_reg = 0x15050; + cam_cc_mclk2_clk.clkr.enable_reg = 0x15050; + cam_cc_qdss_debug_xo_clk.halt_reg = 0x1319c; + cam_cc_qdss_debug_xo_clk.clkr.enable_reg = 0x1319c; + + cam_cc_titan_top_gdsc.gdscr = 0x131a0; + + cam_cc_sa8775p_clocks[CAM_CC_CCI_3_CLK] = NULL; + cam_cc_sa8775p_clocks[CAM_CC_CCI_3_CLK_SRC] = NULL; + cam_cc_sa8775p_clocks[CAM_CC_CSI3PHYTIMER_CLK] = NULL; + cam_cc_sa8775p_clocks[CAM_CC_CSI3PHYTIMER_CLK_SRC] = NULL; + cam_cc_sa8775p_clocks[CAM_CC_CSIPHY3_CLK] = NULL; + cam_cc_sa8775p_clocks[CAM_CC_MCLK3_CLK] = NULL; + cam_cc_sa8775p_clocks[CAM_CC_MCLK3_CLK_SRC] = NULL; + cam_cc_sa8775p_clocks[CAM_CC_TITAN_TOP_ACCU_SHIFT_CLK] = + &cam_cc_titan_top_accu_shift_clk.clkr; + + /* Keep some clocks always enabled */ + qcom_branch_set_clk_en(regmap, 0x13178); /* CAM_CC_CAMNOC_XO_CLK */ + qcom_branch_set_clk_en(regmap, 0x131d0); /* CAM_CC_GDSC_CLK */ + qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_SLEEP_CLK */ + } else { + /* Keep some clocks always enabled */ + qcom_branch_set_clk_en(regmap, 0x13194); /* CAM_CC_CAMNOC_XO_CLK */ + qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_GDSC_CLK */ + qcom_branch_set_clk_en(regmap, 0x13208); /* CAM_CC_SLEEP_CLK */ + } ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sa8775p_desc, regmap); diff --git a/drivers/clk/qcom/camcc-sm6350.c b/drivers/clk/qcom/camcc-sm6350.c index 1871970fb046..8aac97d29ce3 100644 --- a/drivers/clk/qcom/camcc-sm6350.c +++ b/drivers/clk/qcom/camcc-sm6350.c @@ -1695,6 +1695,9 @@ static struct clk_branch camcc_sys_tmr_clk = { static struct gdsc bps_gdsc = { .gdscr = 0x6004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "bps_gdsc", }, @@ -1704,6 +1707,9 @@ static struct gdsc bps_gdsc = { static struct gdsc ipe_0_gdsc = { .gdscr = 0x7004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "ipe_0_gdsc", }, @@ -1713,6 +1719,9 @@ static struct gdsc ipe_0_gdsc = { static struct gdsc ife_0_gdsc = { .gdscr = 0x9004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "ife_0_gdsc", }, @@ -1721,6 +1730,9 @@ static struct gdsc ife_0_gdsc = { static struct gdsc ife_1_gdsc = { .gdscr = 0xa004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "ife_1_gdsc", }, @@ -1729,6 +1741,9 @@ static struct gdsc ife_1_gdsc = { static struct gdsc ife_2_gdsc = { .gdscr = 0xb004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "ife_2_gdsc", }, @@ -1737,6 +1752,9 @@ static struct gdsc ife_2_gdsc = { static struct gdsc titan_top_gdsc = { .gdscr = 0x14004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "titan_top_gdsc", }, diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c index c7675930fde1..00fb3e53a388 100644 --- a/drivers/clk/qcom/clk-rpmh.c +++ b/drivers/clk/qcom/clk-rpmh.c @@ -66,6 +66,8 @@ struct clk_rpmh { struct clk_rpmh_desc { struct clk_hw **clks; size_t num_clks; + /* RPMh clock clkaN are optional for this platform */ + bool clka_optional; }; static DEFINE_MUTEX(rpmh_clk_lock); @@ -648,6 +650,7 @@ static struct clk_hw *sm8550_rpmh_clocks[] = { static const struct clk_rpmh_desc clk_rpmh_sm8550 = { .clks = sm8550_rpmh_clocks, .num_clks = ARRAY_SIZE(sm8550_rpmh_clocks), + .clka_optional = true, }; static struct clk_hw *sm8650_rpmh_clocks[] = { @@ -679,6 +682,7 @@ static struct clk_hw *sm8650_rpmh_clocks[] = { static const struct clk_rpmh_desc clk_rpmh_sm8650 = { .clks = sm8650_rpmh_clocks, .num_clks = ARRAY_SIZE(sm8650_rpmh_clocks), + .clka_optional = true, }; static struct clk_hw *sc7280_rpmh_clocks[] = { @@ -847,6 +851,7 @@ static struct clk_hw *sm8750_rpmh_clocks[] = { static const struct clk_rpmh_desc clk_rpmh_sm8750 = { .clks = sm8750_rpmh_clocks, .num_clks = ARRAY_SIZE(sm8750_rpmh_clocks), + .clka_optional = true, }; static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec, @@ -890,6 +895,12 @@ static int clk_rpmh_probe(struct platform_device *pdev) rpmh_clk = to_clk_rpmh(hw_clks[i]); res_addr = cmd_db_read_addr(rpmh_clk->res_name); if (!res_addr) { + hw_clks[i] = NULL; + + if (desc->clka_optional && + !strncmp(rpmh_clk->res_name, "clka", sizeof("clka") - 1)) + continue; + dev_err(&pdev->dev, "missing RPMh resource address for %s\n", rpmh_clk->res_name); return -ENODEV; diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c index e703ecf00e44..b0bd163a449c 100644 --- a/drivers/clk/qcom/dispcc-sm6350.c +++ b/drivers/clk/qcom/dispcc-sm6350.c @@ -681,6 +681,9 @@ static struct clk_branch disp_cc_xo_clk = { static struct gdsc mdss_gdsc = { .gdscr = 0x1004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "mdss_gdsc", }, diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c index 7431c9a65044..45193b3d714b 100644 --- a/drivers/clk/qcom/gcc-msm8939.c +++ b/drivers/clk/qcom/gcc-msm8939.c @@ -432,7 +432,7 @@ static const struct parent_map gcc_xo_gpll0_gpll1a_gpll6_sleep_map[] = { { P_XO, 0 }, { P_GPLL0, 1 }, { P_GPLL1_AUX, 2 }, - { P_GPLL6, 2 }, + { P_GPLL6, 3 }, { P_SLEEP_CLK, 6 }, }; @@ -1113,7 +1113,7 @@ static struct clk_rcg2 jpeg0_clk_src = { }; static const struct freq_tbl ftbl_gcc_camss_mclk0_1_clk[] = { - F(24000000, P_GPLL0, 1, 1, 45), + F(24000000, P_GPLL6, 1, 1, 45), F(66670000, P_GPLL0, 12, 0, 0), { } }; diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c index 74346dc02606..a4d6dff9d0f7 100644 --- a/drivers/clk/qcom/gcc-sm6350.c +++ b/drivers/clk/qcom/gcc-sm6350.c @@ -2320,6 +2320,9 @@ static struct clk_branch gcc_video_xo_clk = { static struct gdsc usb30_prim_gdsc = { .gdscr = 0x1a004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "usb30_prim_gdsc", }, @@ -2328,6 +2331,9 @@ static struct gdsc usb30_prim_gdsc = { static struct gdsc ufs_phy_gdsc = { .gdscr = 0x3a004, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0xf, .pd = { .name = "ufs_phy_gdsc", }, diff --git a/drivers/clk/qcom/gcc-sm8650.c b/drivers/clk/qcom/gcc-sm8650.c index fa1672c4e7d8..24f98062b9dd 100644 --- a/drivers/clk/qcom/gcc-sm8650.c +++ b/drivers/clk/qcom/gcc-sm8650.c @@ -3817,7 +3817,9 @@ static int gcc_sm8650_probe(struct platform_device *pdev) qcom_branch_set_clk_en(regmap, 0x32004); /* GCC_VIDEO_AHB_CLK */ qcom_branch_set_clk_en(regmap, 0x32030); /* GCC_VIDEO_XO_CLK */ + /* FORCE_MEM_CORE_ON for ufs phy ice core and gcc ufs phy axi clocks */ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true); + qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_axi_clk, true); /* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */ regmap_write(regmap, 0x52150, 0x0); diff --git a/drivers/clk/qcom/gcc-sm8750.c b/drivers/clk/qcom/gcc-sm8750.c index b36d70976095..8092dd6b37b5 100644 --- a/drivers/clk/qcom/gcc-sm8750.c +++ b/drivers/clk/qcom/gcc-sm8750.c @@ -3244,8 +3244,9 @@ static int gcc_sm8750_probe(struct platform_device *pdev) regmap_update_bits(regmap, 0x52010, BIT(20), BIT(20)); regmap_update_bits(regmap, 0x52010, BIT(21), BIT(21)); - /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */ + /* FORCE_MEM_CORE_ON for ufs phy ice core and gcc ufs phy axi clocks */ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true); + qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_axi_clk, true); return qcom_cc_really_probe(&pdev->dev, &gcc_sm8750_desc, regmap); } diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c index 009f39139b64..3e44757e25d3 100644 --- a/drivers/clk/qcom/gcc-x1e80100.c +++ b/drivers/clk/qcom/gcc-x1e80100.c @@ -6753,6 +6753,10 @@ static int gcc_x1e80100_probe(struct platform_device *pdev) /* Clear GDSC_SLEEP_ENA_VOTE to stop votes being auto-removed in sleep. */ regmap_write(regmap, 0x52224, 0x0); + /* FORCE_MEM_CORE_ON for ufs phy ice core and gcc ufs phy axi clocks */ + qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true); + qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_axi_clk, true); + return qcom_cc_really_probe(&pdev->dev, &gcc_x1e80100_desc, regmap); } diff --git a/drivers/clk/qcom/gpucc-sm6350.c b/drivers/clk/qcom/gpucc-sm6350.c index 35ed0500bc59..ee89c42413f8 100644 --- a/drivers/clk/qcom/gpucc-sm6350.c +++ b/drivers/clk/qcom/gpucc-sm6350.c @@ -413,6 +413,9 @@ static struct clk_branch gpu_cc_gx_vsense_clk = { static struct gdsc gpu_cx_gdsc = { .gdscr = 0x106c, .gds_hw_ctrl = 0x1540, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0x8, .pd = { .name = "gpu_cx_gdsc", }, @@ -423,6 +426,9 @@ static struct gdsc gpu_cx_gdsc = { static struct gdsc gpu_gx_gdsc = { .gdscr = 0x100c, .clamp_io_ctrl = 0x1508, + .en_rest_wait_val = 0x2, + .en_few_wait_val = 0x2, + .clk_dis_wait_val = 0x2, .pd = { .name = "gpu_gx_gdsc", .power_on = gdsc_gx_do_nothing_enable, diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig index 5a4bc3f94d49..50c20119d12a 100644 --- a/drivers/clk/renesas/Kconfig +++ b/drivers/clk/renesas/Kconfig @@ -41,6 +41,7 @@ config CLK_RENESAS select CLK_R9A08G045 if ARCH_R9A08G045 select CLK_R9A09G011 if ARCH_R9A09G011 select CLK_R9A09G047 if ARCH_R9A09G047 + select CLK_R9A09G056 if ARCH_R9A09G056 select CLK_R9A09G057 if ARCH_R9A09G057 select CLK_SH73A0 if ARCH_SH73A0 @@ -199,6 +200,10 @@ config CLK_R9A09G047 bool "RZ/G3E clock support" if COMPILE_TEST select CLK_RZV2H +config CLK_R9A09G056 + bool "RZ/V2N clock support" if COMPILE_TEST + select CLK_RZV2H + config CLK_R9A09G057 bool "RZ/V2H(P) clock support" if COMPILE_TEST select CLK_RZV2H diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile index 2d6e746939c4..f9075bca6e95 100644 --- a/drivers/clk/renesas/Makefile +++ b/drivers/clk/renesas/Makefile @@ -38,6 +38,7 @@ obj-$(CONFIG_CLK_R9A07G054) += r9a07g044-cpg.o obj-$(CONFIG_CLK_R9A08G045) += r9a08g045-cpg.o obj-$(CONFIG_CLK_R9A09G011) += r9a09g011-cpg.o obj-$(CONFIG_CLK_R9A09G047) += r9a09g047-cpg.o +obj-$(CONFIG_CLK_R9A09G056) += r9a09g056-cpg.o obj-$(CONFIG_CLK_R9A09G057) += r9a09g057-cpg.o obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o diff --git a/drivers/clk/renesas/r9a09g047-cpg.c b/drivers/clk/renesas/r9a09g047-cpg.c index e9cf4342d0cf..21699999cedd 100644 --- a/drivers/clk/renesas/r9a09g047-cpg.c +++ b/drivers/clk/renesas/r9a09g047-cpg.c @@ -16,7 +16,7 @@ enum clk_ids { /* Core Clock Outputs exported to DT */ - LAST_DT_CORE_CLK = R9A09G047_IOTOP_0_SHCLK, + LAST_DT_CORE_CLK = R9A09G047_GBETH_1_CLK_PTP_REF_I, /* External Input Clocks */ CLK_AUDIO_EXTAL, @@ -31,7 +31,14 @@ enum clk_ids { CLK_PLLVDO, /* Internal Core Clocks */ + CLK_PLLCM33_DIV3, + CLK_PLLCM33_DIV4, + CLK_PLLCM33_DIV5, CLK_PLLCM33_DIV16, + CLK_PLLCM33_GEAR, + CLK_SMUX2_XSPI_CLK0, + CLK_SMUX2_XSPI_CLK1, + CLK_PLLCM33_XSPI, CLK_PLLCLN_DIV2, CLK_PLLCLN_DIV8, CLK_PLLCLN_DIV16, @@ -41,6 +48,7 @@ enum clk_ids { CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_DIV16, CLK_PLLVDO_CRU0, + CLK_PLLVDO_GPU, /* Module Clocks */ MOD_CLK_BASE, @@ -60,6 +68,14 @@ static const struct clk_div_table dtable_2_4[] = { {0, 0}, }; +static const struct clk_div_table dtable_2_16[] = { + {0, 2}, + {1, 4}, + {2, 8}, + {3, 16}, + {0, 0}, +}; + static const struct clk_div_table dtable_2_64[] = { {0, 2}, {1, 4}, @@ -69,6 +85,10 @@ static const struct clk_div_table dtable_2_64[] = { {0, 0}, }; +/* Mux clock tables */ +static const char * const smux2_xspi_clk0[] = { ".pllcm33_div3", ".pllcm33_div4" }; +static const char * const smux2_xspi_clk1[] = { ".smux2_xspi_clk0", ".pllcm33_div5" }; + static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = { /* External Clock Inputs */ DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL), @@ -79,12 +99,21 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = { DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3), DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3), DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3), - DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)), + DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55), DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2), /* Internal Core Clocks */ + DEF_FIXED(".pllcm33_div3", CLK_PLLCM33_DIV3, CLK_PLLCM33, 1, 3), + DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4), + DEF_FIXED(".pllcm33_div5", CLK_PLLCM33_DIV5, CLK_PLLCM33, 1, 5), DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16), + DEF_DDIV(".pllcm33_gear", CLK_PLLCM33_GEAR, CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64), + + DEF_SMUX(".smux2_xspi_clk0", CLK_SMUX2_XSPI_CLK0, SSEL1_SELCTL2, smux2_xspi_clk0), + DEF_SMUX(".smux2_xspi_clk1", CLK_SMUX2_XSPI_CLK1, SSEL1_SELCTL3, smux2_xspi_clk1), + DEF_CSDIV(".pllcm33_xspi", CLK_PLLCM33_XSPI, CLK_SMUX2_XSPI_CLK1, CSDIV0_DIVCTL3, + dtable_2_16), DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2), DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8), DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16), @@ -96,6 +125,7 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = { DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16), DEF_DDIV(".pllvdo_cru0", CLK_PLLVDO_CRU0, CLK_PLLVDO, CDDIV3_DIVCTL3, dtable_2_4), + DEF_DDIV(".pllvdo_gpu", CLK_PLLVDO_GPU, CLK_PLLVDO, CDDIV3_DIVCTL1, dtable_2_64), /* Core Clocks */ DEF_FIXED("sys_0_pclk", R9A09G047_SYS_0_PCLK, CLK_QEXTAL, 1, 1), @@ -108,6 +138,7 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = { DEF_DDIV("ca55_0_coreclk3", R9A09G047_CA55_0_CORECLK3, CLK_PLLCA55, CDDIV1_DIVCTL3, dtable_1_8), DEF_FIXED("iotop_0_shclk", R9A09G047_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1), + DEF_FIXED("spi_clk_spi", R9A09G047_SPI_CLK_SPI, CLK_PLLCM33_XSPI, 1, 2), }; static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = { @@ -153,6 +184,12 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = { BUS_MSTOP(10, BIT(14))), DEF_MOD("canfd_0_clkc", CLK_PLLCLN_DIV20, 9, 14, 4, 30, BUS_MSTOP(10, BIT(14))), + DEF_MOD("spi_hclk", CLK_PLLCM33_GEAR, 9, 15, 4, 31, + BUS_MSTOP(4, BIT(5))), + DEF_MOD("spi_aclk", CLK_PLLCM33_GEAR, 10, 0, 5, 0, + BUS_MSTOP(4, BIT(5))), + DEF_MOD_NO_PM("spi_clk_spix2", CLK_PLLCM33_XSPI, 10, 1, 5, 2, + BUS_MSTOP(4, BIT(5))), DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3, BUS_MSTOP(8, BIT(2))), DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4, @@ -183,6 +220,12 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = { BUS_MSTOP(9, BIT(4))), DEF_MOD("cru_0_pclk", CLK_PLLDTY_DIV16, 13, 4, 6, 20, BUS_MSTOP(9, BIT(4))), + DEF_MOD("ge3d_clk", CLK_PLLVDO_GPU, 15, 0, 7, 16, + BUS_MSTOP(3, BIT(4))), + DEF_MOD("ge3d_axi_clk", CLK_PLLDTY_ACPU_DIV2, 15, 1, 7, 17, + BUS_MSTOP(3, BIT(4))), + DEF_MOD("ge3d_ace_clk", CLK_PLLDTY_ACPU_DIV2, 15, 2, 7, 18, + BUS_MSTOP(3, BIT(4))), DEF_MOD("tsu_1_pclk", CLK_QEXTAL, 16, 10, 8, 10, BUS_MSTOP(2, BIT(15))), }; @@ -207,12 +250,17 @@ static const struct rzv2h_reset r9a09g047_resets[] __initconst = { DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */ DEF_RST(10, 1, 4, 18), /* CANFD_0_RSTP_N */ DEF_RST(10, 2, 4, 19), /* CANFD_0_RSTC_N */ + DEF_RST(10, 3, 4, 20), /* SPI_HRESETN */ + DEF_RST(10, 4, 4, 21), /* SPI_ARESETN */ DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */ DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */ DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */ DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */ DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */ DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */ + DEF_RST(13, 13, 6, 14), /* GE3D_RESETN */ + DEF_RST(13, 14, 6, 15), /* GE3D_AXI_RESETN */ + DEF_RST(13, 15, 6, 16), /* GE3D_ACE_RESETN */ DEF_RST(15, 8, 7, 9), /* TSU_1_PRESETN */ }; diff --git a/drivers/clk/renesas/r9a09g056-cpg.c b/drivers/clk/renesas/r9a09g056-cpg.c new file mode 100644 index 000000000000..e2712a25c43a --- /dev/null +++ b/drivers/clk/renesas/r9a09g056-cpg.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas RZ/V2N CPG driver + * + * Copyright (C) 2025 Renesas Electronics Corp. + */ + +#include <linux/clk-provider.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/kernel.h> + +#include <dt-bindings/clock/renesas,r9a09g056-cpg.h> + +#include "rzv2h-cpg.h" + +enum clk_ids { + /* Core Clock Outputs exported to DT */ + LAST_DT_CORE_CLK = R9A09G056_GBETH_1_CLK_PTP_REF_I, + + /* External Input Clocks */ + CLK_AUDIO_EXTAL, + CLK_RTXIN, + CLK_QEXTAL, + + /* PLL Clocks */ + CLK_PLLCM33, + CLK_PLLCLN, + CLK_PLLDTY, + CLK_PLLCA55, + + /* Internal Core Clocks */ + CLK_PLLCM33_DIV16, + CLK_PLLCLN_DIV2, + CLK_PLLCLN_DIV8, + CLK_PLLDTY_ACPU, + CLK_PLLDTY_ACPU_DIV4, + + /* Module Clocks */ + MOD_CLK_BASE, +}; + +static const struct clk_div_table dtable_1_8[] = { + {0, 1}, + {1, 2}, + {2, 4}, + {3, 8}, + {0, 0}, +}; + +static const struct clk_div_table dtable_2_64[] = { + {0, 2}, + {1, 4}, + {2, 8}, + {3, 16}, + {4, 64}, + {0, 0}, +}; + +static const struct cpg_core_clk r9a09g056_core_clks[] __initconst = { + /* External Clock Inputs */ + DEF_INPUT("audio_extal", CLK_AUDIO_EXTAL), + DEF_INPUT("rtxin", CLK_RTXIN), + DEF_INPUT("qextal", CLK_QEXTAL), + + /* PLL Clocks */ + DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3), + DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3), + DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3), + DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55), + + /* Internal Core Clocks */ + DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16), + + DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2), + DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8), + + DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64), + DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4), + + /* Core Clocks */ + DEF_FIXED("sys_0_pclk", R9A09G056_SYS_0_PCLK, CLK_QEXTAL, 1, 1), + DEF_DDIV("ca55_0_coreclk0", R9A09G056_CA55_0_CORE_CLK0, CLK_PLLCA55, + CDDIV1_DIVCTL0, dtable_1_8), + DEF_DDIV("ca55_0_coreclk1", R9A09G056_CA55_0_CORE_CLK1, CLK_PLLCA55, + CDDIV1_DIVCTL1, dtable_1_8), + DEF_DDIV("ca55_0_coreclk2", R9A09G056_CA55_0_CORE_CLK2, CLK_PLLCA55, + CDDIV1_DIVCTL2, dtable_1_8), + DEF_DDIV("ca55_0_coreclk3", R9A09G056_CA55_0_CORE_CLK3, CLK_PLLCA55, + CDDIV1_DIVCTL3, dtable_1_8), + DEF_FIXED("iotop_0_shclk", R9A09G056_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1), +}; + +static const struct rzv2h_mod_clk r9a09g056_mod_clks[] __initconst = { + DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19, + BUS_MSTOP(3, BIT(5))), + DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15, + BUS_MSTOP(3, BIT(14))), + DEF_MOD("sdhi_0_imclk", CLK_PLLCLN_DIV8, 10, 3, 5, 3, + BUS_MSTOP(8, BIT(2))), + DEF_MOD("sdhi_0_imclk2", CLK_PLLCLN_DIV8, 10, 4, 5, 4, + BUS_MSTOP(8, BIT(2))), + DEF_MOD("sdhi_0_clk_hs", CLK_PLLCLN_DIV2, 10, 5, 5, 5, + BUS_MSTOP(8, BIT(2))), + DEF_MOD("sdhi_0_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 6, 5, 6, + BUS_MSTOP(8, BIT(2))), + DEF_MOD("sdhi_1_imclk", CLK_PLLCLN_DIV8, 10, 7, 5, 7, + BUS_MSTOP(8, BIT(3))), + DEF_MOD("sdhi_1_imclk2", CLK_PLLCLN_DIV8, 10, 8, 5, 8, + BUS_MSTOP(8, BIT(3))), + DEF_MOD("sdhi_1_clk_hs", CLK_PLLCLN_DIV2, 10, 9, 5, 9, + BUS_MSTOP(8, BIT(3))), + DEF_MOD("sdhi_1_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 10, 5, 10, + BUS_MSTOP(8, BIT(3))), + DEF_MOD("sdhi_2_imclk", CLK_PLLCLN_DIV8, 10, 11, 5, 11, + BUS_MSTOP(8, BIT(4))), + DEF_MOD("sdhi_2_imclk2", CLK_PLLCLN_DIV8, 10, 12, 5, 12, + BUS_MSTOP(8, BIT(4))), + DEF_MOD("sdhi_2_clk_hs", CLK_PLLCLN_DIV2, 10, 13, 5, 13, + BUS_MSTOP(8, BIT(4))), + DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14, + BUS_MSTOP(8, BIT(4))), +}; + +static const struct rzv2h_reset r9a09g056_resets[] __initconst = { + DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */ + DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */ + DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */ + DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */ + DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */ + DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */ + DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */ +}; + +const struct rzv2h_cpg_info r9a09g056_cpg_info __initconst = { + /* Core Clocks */ + .core_clks = r9a09g056_core_clks, + .num_core_clks = ARRAY_SIZE(r9a09g056_core_clks), + .last_dt_core_clk = LAST_DT_CORE_CLK, + .num_total_core_clks = MOD_CLK_BASE, + + /* Module Clocks */ + .mod_clks = r9a09g056_mod_clks, + .num_mod_clks = ARRAY_SIZE(r9a09g056_mod_clks), + .num_hw_mod_clks = 25 * 16, + + /* Resets */ + .resets = r9a09g056_resets, + .num_resets = ARRAY_SIZE(r9a09g056_resets), + + .num_mstop_bits = 192, +}; diff --git a/drivers/clk/renesas/r9a09g057-cpg.c b/drivers/clk/renesas/r9a09g057-cpg.c index d63eafbca780..3c40e36259fe 100644 --- a/drivers/clk/renesas/r9a09g057-cpg.c +++ b/drivers/clk/renesas/r9a09g057-cpg.c @@ -16,7 +16,7 @@ enum clk_ids { /* Core Clock Outputs exported to DT */ - LAST_DT_CORE_CLK = R9A09G057_IOTOP_0_SHCLK, + LAST_DT_CORE_CLK = R9A09G057_GBETH_1_CLK_PTP_REF_I, /* External Input Clocks */ CLK_AUDIO_EXTAL, @@ -29,6 +29,7 @@ enum clk_ids { CLK_PLLDTY, CLK_PLLCA55, CLK_PLLVDO, + CLK_PLLGPU, /* Internal Core Clocks */ CLK_PLLCM33_DIV4, @@ -40,6 +41,7 @@ enum clk_ids { CLK_PLLDTY_ACPU, CLK_PLLDTY_ACPU_DIV2, CLK_PLLDTY_ACPU_DIV4, + CLK_PLLDTY_DIV8, CLK_PLLDTY_DIV16, CLK_PLLDTY_RCPU, CLK_PLLDTY_RCPU_DIV4, @@ -47,6 +49,7 @@ enum clk_ids { CLK_PLLVDO_CRU1, CLK_PLLVDO_CRU2, CLK_PLLVDO_CRU3, + CLK_PLLGPU_GEAR, /* Module Clocks */ MOD_CLK_BASE, @@ -85,8 +88,9 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = { DEF_FIXED(".pllcm33", CLK_PLLCM33, CLK_QEXTAL, 200, 3), DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3), DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3), - DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLL_CONF(0x64)), + DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55), DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2), + DEF_PLL(".pllgpu", CLK_PLLGPU, CLK_QEXTAL, PLLGPU), /* Internal Core Clocks */ DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4), @@ -101,6 +105,7 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = { DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64), DEF_FIXED(".plldty_acpu_div2", CLK_PLLDTY_ACPU_DIV2, CLK_PLLDTY_ACPU, 1, 2), DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4), + DEF_FIXED(".plldty_div8", CLK_PLLDTY_DIV8, CLK_PLLDTY, 1, 8), DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16), DEF_DDIV(".plldty_rcpu", CLK_PLLDTY_RCPU, CLK_PLLDTY, CDDIV3_DIVCTL2, dtable_2_64), DEF_FIXED(".plldty_rcpu_div4", CLK_PLLDTY_RCPU_DIV4, CLK_PLLDTY_RCPU, 1, 4), @@ -110,6 +115,8 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = { DEF_DDIV(".pllvdo_cru2", CLK_PLLVDO_CRU2, CLK_PLLVDO, CDDIV4_DIVCTL1, dtable_2_4), DEF_DDIV(".pllvdo_cru3", CLK_PLLVDO_CRU3, CLK_PLLVDO, CDDIV4_DIVCTL2, dtable_2_4), + DEF_DDIV(".pllgpu_gear", CLK_PLLGPU_GEAR, CLK_PLLGPU, CDDIV3_DIVCTL1, dtable_2_64), + /* Core Clocks */ DEF_FIXED("sys_0_pclk", R9A09G057_SYS_0_PCLK, CLK_QEXTAL, 1, 1), DEF_DDIV("ca55_0_coreclk0", R9A09G057_CA55_0_CORE_CLK0, CLK_PLLCA55, @@ -121,6 +128,8 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = { DEF_DDIV("ca55_0_coreclk3", R9A09G057_CA55_0_CORE_CLK3, CLK_PLLCA55, CDDIV1_DIVCTL3, dtable_1_8), DEF_FIXED("iotop_0_shclk", R9A09G057_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1), + DEF_FIXED("usb2_0_clk_core0", R9A09G057_USB2_0_CLK_CORE0, CLK_QEXTAL, 1, 1), + DEF_FIXED("usb2_0_clk_core1", R9A09G057_USB2_0_CLK_CORE1, CLK_QEXTAL, 1, 1), }; static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = { @@ -214,6 +223,16 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = { BUS_MSTOP(8, BIT(4))), DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14, BUS_MSTOP(8, BIT(4))), + DEF_MOD("usb2_0_u2h0_hclk", CLK_PLLDTY_DIV8, 11, 3, 5, 19, + BUS_MSTOP(7, BIT(7))), + DEF_MOD("usb2_0_u2h1_hclk", CLK_PLLDTY_DIV8, 11, 4, 5, 20, + BUS_MSTOP(7, BIT(8))), + DEF_MOD("usb2_0_u2p_exr_cpuclk", CLK_PLLDTY_ACPU_DIV4, 11, 5, 5, 21, + BUS_MSTOP(7, BIT(9))), + DEF_MOD("usb2_0_pclk_usbtst0", CLK_PLLDTY_ACPU_DIV4, 11, 6, 5, 22, + BUS_MSTOP(7, BIT(10))), + DEF_MOD("usb2_0_pclk_usbtst1", CLK_PLLDTY_ACPU_DIV4, 11, 7, 5, 23, + BUS_MSTOP(7, BIT(11))), DEF_MOD("cru_0_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 2, 6, 18, BUS_MSTOP(9, BIT(4))), DEF_MOD_NO_PM("cru_0_vclk", CLK_PLLVDO_CRU0, 13, 3, 6, 19, @@ -238,6 +257,12 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = { BUS_MSTOP(9, BIT(7))), DEF_MOD("cru_3_pclk", CLK_PLLDTY_DIV16, 13, 13, 6, 29, BUS_MSTOP(9, BIT(7))), + DEF_MOD("gpu_0_clk", CLK_PLLGPU_GEAR, 15, 0, 7, 16, + BUS_MSTOP(3, BIT(4))), + DEF_MOD("gpu_0_axi_clk", CLK_PLLDTY_ACPU_DIV2, 15, 1, 7, 17, + BUS_MSTOP(3, BIT(4))), + DEF_MOD("gpu_0_ace_clk", CLK_PLLDTY_ACPU_DIV2, 15, 2, 7, 18, + BUS_MSTOP(3, BIT(4))), }; static const struct rzv2h_reset r9a09g057_resets[] __initconst = { @@ -275,6 +300,10 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = { DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */ DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */ DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */ + DEF_RST(10, 12, 4, 29), /* USB2_0_U2H0_HRESETN */ + DEF_RST(10, 13, 4, 30), /* USB2_0_U2H1_HRESETN */ + DEF_RST(10, 14, 4, 31), /* USB2_0_U2P_EXL_SYSRST */ + DEF_RST(10, 15, 5, 0), /* USB2_0_PRESETN */ DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */ DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */ DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */ @@ -287,6 +316,9 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = { DEF_RST(12, 14, 5, 31), /* CRU_3_PRESETN */ DEF_RST(12, 15, 6, 0), /* CRU_3_ARESETN */ DEF_RST(13, 0, 6, 1), /* CRU_3_S_RESETN */ + DEF_RST(13, 13, 6, 14), /* GPU_0_RESETN */ + DEF_RST(13, 14, 6, 15), /* GPU_0_AXI_RESETN */ + DEF_RST(13, 15, 6, 16), /* GPU_0_ACE_RESETN */ }; const struct rzv2h_cpg_info r9a09g057_cpg_info __initconst = { diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index da021ee446ec..71431970d6e6 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -27,6 +27,7 @@ #include <linux/psci.h> #include <linux/reset-controller.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include <dt-bindings/clock/renesas-cpg-mssr.h> @@ -204,7 +205,7 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable) int error; dev_dbg(dev, "MSTP %u%02u/%pC %s\n", reg, bit, hw->clk, - enable ? "ON" : "OFF"); + str_on_off(enable)); spin_lock_irqsave(&priv->rmw_lock, flags); if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) { diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c index b91dfbfb01e3..a8628f64a03b 100644 --- a/drivers/clk/renesas/rzg2l-cpg.c +++ b/drivers/clk/renesas/rzg2l-cpg.c @@ -27,6 +27,7 @@ #include <linux/pm_domain.h> #include <linux/reset-controller.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include <linux/units.h> #include <dt-bindings/clock/renesas-cpg-mssr.h> @@ -1217,7 +1218,7 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable) } dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", CLK_ON_R(reg), hw->clk, - enable ? "ON" : "OFF"); + str_on_off(enable)); value = bitmask << 16; if (enable) diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c index 2b9771ab2b3f..bcc496e8cbcd 100644 --- a/drivers/clk/renesas/rzv2h-cpg.c +++ b/drivers/clk/renesas/rzv2h-cpg.c @@ -25,6 +25,7 @@ #include <linux/pm_domain.h> #include <linux/refcount.h> #include <linux/reset-controller.h> +#include <linux/string_choices.h> #include <dt-bindings/clock/renesas-cpg-mssr.h> @@ -44,10 +45,18 @@ #define CPG_BUS_1_MSTOP (0xd00) #define CPG_BUS_MSTOP(m) (CPG_BUS_1_MSTOP + ((m) - 1) * 4) -#define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), (val))) -#define MDIV(val) FIELD_GET(GENMASK(15, 6), (val)) -#define PDIV(val) FIELD_GET(GENMASK(5, 0), (val)) -#define SDIV(val) FIELD_GET(GENMASK(2, 0), (val)) +#define CPG_PLL_STBY(x) ((x)) +#define CPG_PLL_STBY_RESETB BIT(0) +#define CPG_PLL_STBY_RESETB_WEN BIT(16) +#define CPG_PLL_CLK1(x) ((x) + 0x004) +#define CPG_PLL_CLK1_KDIV(x) ((s16)FIELD_GET(GENMASK(31, 16), (x))) +#define CPG_PLL_CLK1_MDIV(x) FIELD_GET(GENMASK(15, 6), (x)) +#define CPG_PLL_CLK1_PDIV(x) FIELD_GET(GENMASK(5, 0), (x)) +#define CPG_PLL_CLK2(x) ((x) + 0x008) +#define CPG_PLL_CLK2_SDIV(x) FIELD_GET(GENMASK(2, 0), (x)) +#define CPG_PLL_MON(x) ((x) + 0x010) +#define CPG_PLL_MON_RESETB BIT(0) +#define CPG_PLL_MON_LOCK BIT(4) #define DDIV_DIVCTL_WEN(shift) BIT((shift) + 16) @@ -94,8 +103,7 @@ struct pll_clk { struct rzv2h_cpg_priv *priv; void __iomem *base; struct clk_hw hw; - unsigned int conf; - unsigned int type; + struct pll pll; }; #define to_pll(_hw) container_of(_hw, struct pll_clk, hw) @@ -110,7 +118,7 @@ struct pll_clk { * @on_index: register offset * @on_bit: ON/MON bit * @mon_index: monitor register offset - * @mon_bit: montor bit + * @mon_bit: monitor bit */ struct mod_clock { struct rzv2h_cpg_priv *priv; @@ -140,27 +148,78 @@ struct ddiv_clk { #define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div) +static int rzv2h_cpg_pll_clk_is_enabled(struct clk_hw *hw) +{ + struct pll_clk *pll_clk = to_pll(hw); + struct rzv2h_cpg_priv *priv = pll_clk->priv; + u32 val = readl(priv->base + CPG_PLL_MON(pll_clk->pll.offset)); + + /* Ensure both RESETB and LOCK bits are set */ + return (val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) == + (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK); +} + +static int rzv2h_cpg_pll_clk_enable(struct clk_hw *hw) +{ + struct pll_clk *pll_clk = to_pll(hw); + struct rzv2h_cpg_priv *priv = pll_clk->priv; + struct pll pll = pll_clk->pll; + u32 stby_offset; + u32 mon_offset; + u32 val; + int ret; + + if (rzv2h_cpg_pll_clk_is_enabled(hw)) + return 0; + + stby_offset = CPG_PLL_STBY(pll.offset); + mon_offset = CPG_PLL_MON(pll.offset); + + writel(CPG_PLL_STBY_RESETB_WEN | CPG_PLL_STBY_RESETB, + priv->base + stby_offset); + + /* + * Ensure PLL enters into normal mode + * + * Note: There is no HW information about the worst case latency. + * + * Since this latency might depend on external crystal or PLL rate, + * use a "super" safe timeout value. + */ + ret = readl_poll_timeout_atomic(priv->base + mon_offset, val, + (val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) == + (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK), 200, 2000); + if (ret) + dev_err(priv->dev, "Failed to enable PLL 0x%x/%pC\n", + stby_offset, hw->clk); + + return ret; +} + static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct pll_clk *pll_clk = to_pll(hw); struct rzv2h_cpg_priv *priv = pll_clk->priv; + struct pll pll = pll_clk->pll; unsigned int clk1, clk2; u64 rate; - if (!PLL_CLK_ACCESS(pll_clk->conf)) + if (!pll.has_clkn) return 0; - clk1 = readl(priv->base + PLL_CLK1_OFFSET(pll_clk->conf)); - clk2 = readl(priv->base + PLL_CLK2_OFFSET(pll_clk->conf)); + clk1 = readl(priv->base + CPG_PLL_CLK1(pll.offset)); + clk2 = readl(priv->base + CPG_PLL_CLK2(pll.offset)); - rate = mul_u64_u32_shr(parent_rate, (MDIV(clk1) << 16) + KDIV(clk1), - 16 + SDIV(clk2)); + rate = mul_u64_u32_shr(parent_rate, (CPG_PLL_CLK1_MDIV(clk1) << 16) + + CPG_PLL_CLK1_KDIV(clk1), 16 + CPG_PLL_CLK2_SDIV(clk2)); - return DIV_ROUND_CLOSEST_ULL(rate, PDIV(clk1)); + return DIV_ROUND_CLOSEST_ULL(rate, CPG_PLL_CLK1_PDIV(clk1)); } static const struct clk_ops rzv2h_cpg_pll_ops = { + .is_enabled = rzv2h_cpg_pll_clk_is_enabled, + .enable = rzv2h_cpg_pll_clk_enable, .recalc_rate = rzv2h_cpg_pll_clk_recalc_rate, }; @@ -193,10 +252,9 @@ rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core, init.num_parents = 1; pll_clk->hw.init = &init; - pll_clk->conf = core->cfg.conf; + pll_clk->pll = core->cfg.pll; pll_clk->base = base; pll_clk->priv = priv; - pll_clk->type = core->type; ret = devm_clk_hw_register(dev, &pll_clk->hw); if (ret) @@ -241,6 +299,9 @@ static inline int rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem *base, u8 mon u32 bitmask = BIT(mon); u32 val; + if (mon == CSDIV_NO_MON) + return 0; + return readl_poll_timeout_atomic(base + CPG_CLKSTATUS0, val, !(val & bitmask), 10, 200); } @@ -272,12 +333,6 @@ static int rzv2h_ddiv_set_rate(struct clk_hw *hw, unsigned long rate, writel(val, divider->reg); ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon); - if (ret) - goto ddiv_timeout; - - spin_unlock_irqrestore(divider->lock, flags); - - return 0; ddiv_timeout: spin_unlock_irqrestore(divider->lock, flags); @@ -320,7 +375,10 @@ rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core, return ERR_PTR(-ENOMEM); init.name = core->name; - init.ops = &rzv2h_ddiv_clk_divider_ops; + if (cfg_ddiv.no_rmw) + init.ops = &clk_divider_ops; + else + init.ops = &rzv2h_ddiv_clk_divider_ops; init.parent_names = &parent_name; init.num_parents = 1; @@ -342,6 +400,24 @@ rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core, return div->hw.clk; } +static struct clk * __init +rzv2h_cpg_mux_clk_register(const struct cpg_core_clk *core, + struct rzv2h_cpg_priv *priv) +{ + struct smuxed mux = core->cfg.smux; + const struct clk_hw *clk_hw; + + clk_hw = devm_clk_hw_register_mux(priv->dev, core->name, + core->parent_names, core->num_parents, + core->flag, priv->base + mux.offset, + mux.shift, mux.width, + core->mux_flags, &priv->rmw_lock); + if (IS_ERR(clk_hw)) + return ERR_CAST(clk_hw); + + return clk_hw->clk; +} + static struct clk *rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec, void *data) @@ -426,6 +502,9 @@ rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core, case CLK_TYPE_DDIV: clk = rzv2h_cpg_ddiv_clk_register(core, priv); break; + case CLK_TYPE_SMUX: + clk = rzv2h_cpg_mux_clk_register(core, priv); + break; default: goto fail; } @@ -494,11 +573,14 @@ static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw) if (clock->mon_index >= 0) { offset = GET_CLK_MON_OFFSET(clock->mon_index); bitmask = BIT(clock->mon_bit); - } else { - offset = GET_CLK_ON_OFFSET(clock->on_index); - bitmask = BIT(clock->on_bit); + + if (!(readl(priv->base + offset) & bitmask)) + return 0; } + offset = GET_CLK_ON_OFFSET(clock->on_index); + bitmask = BIT(clock->on_bit); + return readl(priv->base + offset) & bitmask; } @@ -514,7 +596,7 @@ static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable) int error; dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk, - enable ? "ON" : "OFF"); + str_on_off(enable)); if (enabled == enable) return 0; @@ -658,8 +740,8 @@ fail: mod->name, PTR_ERR(clk)); } -static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev, - unsigned long id) +static int __rzv2h_cpg_assert(struct reset_controller_dev *rcdev, + unsigned long id, bool assert) { struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev); unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index); @@ -667,35 +749,31 @@ static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev, u8 monbit = priv->resets[id].mon_bit; u32 value = mask << 16; - dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, reg); + dev_dbg(rcdev->dev, "%s id:%ld offset:0x%x\n", + assert ? "assert" : "deassert", id, reg); + if (!assert) + value |= mask; writel(value, priv->base + reg); reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index); mask = BIT(monbit); return readl_poll_timeout_atomic(priv->base + reg, value, - value & mask, 10, 200); + assert ? (value & mask) : !(value & mask), + 10, 200); +} + +static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return __rzv2h_cpg_assert(rcdev, id, true); } static int rzv2h_cpg_deassert(struct reset_controller_dev *rcdev, unsigned long id) { - struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev); - unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index); - u32 mask = BIT(priv->resets[id].reset_bit); - u8 monbit = priv->resets[id].mon_bit; - u32 value = (mask << 16) | mask; - - dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id, reg); - - writel(value, priv->base + reg); - - reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index); - mask = BIT(monbit); - - return readl_poll_timeout_atomic(priv->base + reg, value, - !(value & mask), 10, 200); + return __rzv2h_cpg_assert(rcdev, id, false); } static int rzv2h_cpg_reset(struct reset_controller_dev *rcdev, @@ -967,18 +1045,24 @@ static int __init rzv2h_cpg_probe(struct platform_device *pdev) } static const struct of_device_id rzv2h_cpg_match[] = { -#ifdef CONFIG_CLK_R9A09G057 - { - .compatible = "renesas,r9a09g057-cpg", - .data = &r9a09g057_cpg_info, - }, -#endif #ifdef CONFIG_CLK_R9A09G047 { .compatible = "renesas,r9a09g047-cpg", .data = &r9a09g047_cpg_info, }, #endif +#ifdef CONFIG_CLK_R9A09G056 + { + .compatible = "renesas,r9a09g056-cpg", + .data = &r9a09g056_cpg_info, + }, +#endif +#ifdef CONFIG_CLK_R9A09G057 + { + .compatible = "renesas,r9a09g057-cpg", + .data = &r9a09g057_cpg_info, + }, +#endif { /* sentinel */ } }; diff --git a/drivers/clk/renesas/rzv2h-cpg.h b/drivers/clk/renesas/rzv2h-cpg.h index 576a070763cb..9104b1cd276c 100644 --- a/drivers/clk/renesas/rzv2h-cpg.h +++ b/drivers/clk/renesas/rzv2h-cpg.h @@ -11,20 +11,51 @@ #include <linux/bitfield.h> /** + * struct pll - Structure for PLL configuration + * + * @offset: STBY register offset + * @has_clkn: Flag to indicate if CLK1/2 are accessible or not + */ +struct pll { + unsigned int offset:9; + unsigned int has_clkn:1; +}; + +#define PLL_PACK(_offset, _has_clkn) \ + ((struct pll){ \ + .offset = _offset, \ + .has_clkn = _has_clkn \ + }) + +#define PLLCA55 PLL_PACK(0x60, 1) +#define PLLGPU PLL_PACK(0x120, 1) + +/** * struct ddiv - Structure for dynamic switching divider * * @offset: register offset * @shift: position of the divider bit * @width: width of the divider * @monbit: monitor bit in CPG_CLKSTATUS0 register + * @no_rmw: flag to indicate if the register is read-modify-write + * (1: no RMW, 0: RMW) */ struct ddiv { unsigned int offset:11; unsigned int shift:4; unsigned int width:4; unsigned int monbit:5; + unsigned int no_rmw:1; }; +/* + * On RZ/V2H(P), the dynamic divider clock supports up to 19 monitor bits, + * while on RZ/G3E, it supports up to 16 monitor bits. Use the maximum value + * `0x1f` to indicate that monitor bits are not supported for static divider + * clocks. + */ +#define CSDIV_NO_MON (0x1f) + #define DDIV_PACK(_offset, _shift, _width, _monbit) \ ((struct ddiv){ \ .offset = _offset, \ @@ -33,10 +64,41 @@ struct ddiv { .monbit = _monbit \ }) +#define DDIV_PACK_NO_RMW(_offset, _shift, _width, _monbit) \ + ((struct ddiv){ \ + .offset = (_offset), \ + .shift = (_shift), \ + .width = (_width), \ + .monbit = (_monbit), \ + .no_rmw = 1 \ + }) + +/** + * struct smuxed - Structure for static muxed clocks + * + * @offset: register offset + * @shift: position of the divider field + * @width: width of the divider field + */ +struct smuxed { + unsigned int offset:11; + unsigned int shift:4; + unsigned int width:4; +}; + +#define SMUX_PACK(_offset, _shift, _width) \ + ((struct smuxed){ \ + .offset = (_offset), \ + .shift = (_shift), \ + .width = (_width), \ + }) + +#define CPG_SSEL1 (0x304) #define CPG_CDDIV0 (0x400) #define CPG_CDDIV1 (0x404) #define CPG_CDDIV3 (0x40C) #define CPG_CDDIV4 (0x410) +#define CPG_CSDIV0 (0x500) #define CDDIV0_DIVCTL1 DDIV_PACK(CPG_CDDIV0, 4, 3, 1) #define CDDIV0_DIVCTL2 DDIV_PACK(CPG_CDDIV0, 8, 3, 2) @@ -44,12 +106,18 @@ struct ddiv { #define CDDIV1_DIVCTL1 DDIV_PACK(CPG_CDDIV1, 4, 2, 5) #define CDDIV1_DIVCTL2 DDIV_PACK(CPG_CDDIV1, 8, 2, 6) #define CDDIV1_DIVCTL3 DDIV_PACK(CPG_CDDIV1, 12, 2, 7) +#define CDDIV3_DIVCTL1 DDIV_PACK(CPG_CDDIV3, 4, 3, 13) #define CDDIV3_DIVCTL2 DDIV_PACK(CPG_CDDIV3, 8, 3, 14) #define CDDIV3_DIVCTL3 DDIV_PACK(CPG_CDDIV3, 12, 1, 15) #define CDDIV4_DIVCTL0 DDIV_PACK(CPG_CDDIV4, 0, 1, 16) #define CDDIV4_DIVCTL1 DDIV_PACK(CPG_CDDIV4, 4, 1, 17) #define CDDIV4_DIVCTL2 DDIV_PACK(CPG_CDDIV4, 8, 1, 18) +#define CSDIV0_DIVCTL3 DDIV_PACK_NO_RMW(CPG_CSDIV0, 12, 2, CSDIV_NO_MON) + +#define SSEL1_SELCTL2 SMUX_PACK(CPG_SSEL1, 8, 1) +#define SSEL1_SELCTL3 SMUX_PACK(CPG_SSEL1, 12, 1) + #define BUS_MSTOP_IDX_MASK GENMASK(31, 16) #define BUS_MSTOP_BITS_MASK GENMASK(15, 0) #define BUS_MSTOP(idx, mask) (FIELD_PREP_CONST(BUS_MSTOP_IDX_MASK, (idx)) | \ @@ -74,8 +142,13 @@ struct cpg_core_clk { union { unsigned int conf; struct ddiv ddiv; + struct pll pll; + struct smuxed smux; } cfg; const struct clk_div_table *dtable; + const char * const *parent_names; + unsigned int num_parents; + u8 mux_flags; u32 flag; }; @@ -85,20 +158,15 @@ enum clk_types { CLK_TYPE_FF, /* Fixed Factor Clock */ CLK_TYPE_PLL, CLK_TYPE_DDIV, /* Dynamic Switching Divider */ + CLK_TYPE_SMUX, /* Static Mux */ }; -/* BIT(31) indicates if CLK1/2 are accessible or not */ -#define PLL_CONF(n) (BIT(31) | ((n) & ~GENMASK(31, 16))) -#define PLL_CLK_ACCESS(n) ((n) & BIT(31) ? 1 : 0) -#define PLL_CLK1_OFFSET(n) ((n) & ~GENMASK(31, 16)) -#define PLL_CLK2_OFFSET(n) (((n) & ~GENMASK(31, 16)) + (0x4)) - #define DEF_TYPE(_name, _id, _type...) \ { .name = _name, .id = _id, .type = _type } #define DEF_BASE(_name, _id, _type, _parent...) \ DEF_TYPE(_name, _id, _type, .parent = _parent) -#define DEF_PLL(_name, _id, _parent, _conf) \ - DEF_TYPE(_name, _id, CLK_TYPE_PLL, .parent = _parent, .cfg.conf = _conf) +#define DEF_PLL(_name, _id, _parent, _pll_packed) \ + DEF_TYPE(_name, _id, CLK_TYPE_PLL, .parent = _parent, .cfg.pll = _pll_packed) #define DEF_INPUT(_name, _id) \ DEF_TYPE(_name, _id, CLK_TYPE_IN) #define DEF_FIXED(_name, _id, _parent, _mult, _div) \ @@ -109,6 +177,15 @@ enum clk_types { .parent = _parent, \ .dtable = _dtable, \ .flag = CLK_DIVIDER_HIWORD_MASK) +#define DEF_CSDIV(_name, _id, _parent, _ddiv_packed, _dtable) \ + DEF_DDIV(_name, _id, _parent, _ddiv_packed, _dtable) +#define DEF_SMUX(_name, _id, _smux_packed, _parent_names) \ + DEF_TYPE(_name, _id, CLK_TYPE_SMUX, \ + .cfg.smux = _smux_packed, \ + .parent_names = _parent_names, \ + .num_parents = ARRAY_SIZE(_parent_names), \ + .flag = CLK_SET_RATE_PARENT, \ + .mux_flags = CLK_MUX_HIWORD_MASK) /** * struct rzv2h_mod_clk - Module Clocks definitions @@ -221,6 +298,7 @@ struct rzv2h_cpg_info { }; extern const struct rzv2h_cpg_info r9a09g047_cpg_info; +extern const struct rzv2h_cpg_info r9a09g056_cpg_info; extern const struct rzv2h_cpg_info r9a09g057_cpg_info; #endif /* __RENESAS_RZV2H_CPG_H__ */ diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile index e8ece20aebfd..c281a9738d9f 100644 --- a/drivers/clk/rockchip/Makefile +++ b/drivers/clk/rockchip/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_COMMON_CLK_ROCKCHIP) += clk-rockchip.o clk-rockchip-y += clk.o clk-rockchip-y += clk-pll.o clk-rockchip-y += clk-cpu.o +clk-rockchip-y += clk-gate-grf.o clk-rockchip-y += clk-half-divider.o clk-rockchip-y += clk-inverter.o clk-rockchip-y += clk-mmc-phase.o diff --git a/drivers/clk/rockchip/clk-gate-grf.c b/drivers/clk/rockchip/clk-gate-grf.c new file mode 100644 index 000000000000..8122f471f391 --- /dev/null +++ b/drivers/clk/rockchip/clk-gate-grf.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2025 Collabora Ltd. + * Author: Nicolas Frattaroli <nicolas.frattaroli@collabora.com> + * + * Certain clocks on Rockchip are "gated" behind an additional register bit + * write in a GRF register, such as the SAI MCLKs on RK3576. This code + * implements a clock driver for these types of gates, based on regmaps. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include "clk.h" + +struct rockchip_gate_grf { + struct clk_hw hw; + struct regmap *regmap; + unsigned int reg; + unsigned int shift; + u8 flags; +}; + +#define to_gate_grf(_hw) container_of(_hw, struct rockchip_gate_grf, hw) + +static int rockchip_gate_grf_enable(struct clk_hw *hw) +{ + struct rockchip_gate_grf *gate = to_gate_grf(hw); + u32 val = !(gate->flags & CLK_GATE_SET_TO_DISABLE) ? BIT(gate->shift) : 0; + u32 hiword = ((gate->flags & CLK_GATE_HIWORD_MASK) ? 1 : 0) << (gate->shift + 16); + int ret; + + ret = regmap_update_bits(gate->regmap, gate->reg, + hiword | BIT(gate->shift), hiword | val); + + return ret; +} + +static void rockchip_gate_grf_disable(struct clk_hw *hw) +{ + struct rockchip_gate_grf *gate = to_gate_grf(hw); + u32 val = !(gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : BIT(gate->shift); + u32 hiword = ((gate->flags & CLK_GATE_HIWORD_MASK) ? 1 : 0) << (gate->shift + 16); + + regmap_update_bits(gate->regmap, gate->reg, + hiword | BIT(gate->shift), hiword | val); +} + +static int rockchip_gate_grf_is_enabled(struct clk_hw *hw) +{ + struct rockchip_gate_grf *gate = to_gate_grf(hw); + bool invert = !!(gate->flags & CLK_GATE_SET_TO_DISABLE); + int ret; + + ret = regmap_test_bits(gate->regmap, gate->reg, BIT(gate->shift)); + if (ret < 0) + ret = 0; + + return invert ? 1 - ret : ret; + +} + +static const struct clk_ops rockchip_gate_grf_ops = { + .enable = rockchip_gate_grf_enable, + .disable = rockchip_gate_grf_disable, + .is_enabled = rockchip_gate_grf_is_enabled, +}; + +struct clk *rockchip_clk_register_gate_grf(const char *name, + const char *parent_name, unsigned long flags, + struct regmap *regmap, unsigned int reg, unsigned int shift, + u8 gate_flags) +{ + struct rockchip_gate_grf *gate; + struct clk_init_data init; + struct clk *clk; + + if (IS_ERR(regmap)) { + pr_err("%s: regmap not available\n", __func__); + return ERR_PTR(-EOPNOTSUPP); + } + + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.flags = flags; + init.num_parents = parent_name ? 1 : 0; + init.parent_names = parent_name ? &parent_name : NULL; + init.ops = &rockchip_gate_grf_ops; + + gate->hw.init = &init; + gate->regmap = regmap; + gate->reg = reg; + gate->shift = shift; + gate->flags = gate_flags; + + clk = clk_register(NULL, &gate->hw); + if (IS_ERR(clk)) + kfree(gate); + + return clk; +} diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c index 91012078681b..b3ed8e7523e5 100644 --- a/drivers/clk/rockchip/clk-mmc-phase.c +++ b/drivers/clk/rockchip/clk-mmc-phase.c @@ -9,11 +9,14 @@ #include <linux/clk-provider.h> #include <linux/io.h> #include <linux/kernel.h> +#include <linux/regmap.h> #include "clk.h" struct rockchip_mmc_clock { struct clk_hw hw; void __iomem *reg; + struct regmap *grf; + int grf_reg; int shift; int cached_phase; struct notifier_block clk_rate_change_nb; @@ -54,7 +57,12 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw) if (!rate) return 0; - raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift); + if (mmc_clock->grf) + regmap_read(mmc_clock->grf, mmc_clock->grf_reg, &raw_value); + else + raw_value = readl(mmc_clock->reg); + + raw_value >>= mmc_clock->shift; degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90; @@ -134,8 +142,12 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees) raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0; raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET; raw_value |= nineties; - writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift), - mmc_clock->reg); + raw_value = HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift); + + if (mmc_clock->grf) + regmap_write(mmc_clock->grf, mmc_clock->grf_reg, raw_value); + else + writel(raw_value, mmc_clock->reg); pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n", clk_hw_get_name(hw), degrees, delay_num, @@ -189,7 +201,9 @@ static int rockchip_mmc_clk_rate_notify(struct notifier_block *nb, struct clk *rockchip_clk_register_mmc(const char *name, const char *const *parent_names, u8 num_parents, - void __iomem *reg, int shift) + void __iomem *reg, + struct regmap *grf, int grf_reg, + int shift) { struct clk_init_data init; struct rockchip_mmc_clock *mmc_clock; @@ -208,6 +222,8 @@ struct clk *rockchip_clk_register_mmc(const char *name, mmc_clock->hw.init = &init; mmc_clock->reg = reg; + mmc_clock->grf = grf; + mmc_clock->grf_reg = grf_reg; mmc_clock->shift = shift; clk = clk_register(NULL, &mmc_clock->hw); diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c index 2c2abb3b4210..af74439a7457 100644 --- a/drivers/clk/rockchip/clk-pll.c +++ b/drivers/clk/rockchip/clk-pll.c @@ -1027,16 +1027,6 @@ static int rockchip_rk3588_pll_is_enabled(struct clk_hw *hw) return !(pllcon & RK3588_PLLCON1_PWRDOWN); } -static int rockchip_rk3588_pll_init(struct clk_hw *hw) -{ - struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); - - if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE)) - return 0; - - return 0; -} - static const struct clk_ops rockchip_rk3588_pll_clk_norate_ops = { .recalc_rate = rockchip_rk3588_pll_recalc_rate, .enable = rockchip_rk3588_pll_enable, @@ -1051,7 +1041,6 @@ static const struct clk_ops rockchip_rk3588_pll_clk_ops = { .enable = rockchip_rk3588_pll_enable, .disable = rockchip_rk3588_pll_disable, .is_enabled = rockchip_rk3588_pll_is_enabled, - .init = rockchip_rk3588_pll_init, }; /* diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c index d341ce0708aa..df9330958c83 100644 --- a/drivers/clk/rockchip/clk-rk3036.c +++ b/drivers/clk/rockchip/clk-rk3036.c @@ -123,6 +123,7 @@ PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" }; PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll", "usb480m" }; PNAME(mux_pll_src_dmyapll_dpll_gpll_xin24_p) = { "dummy_apll", "dpll", "gpll", "xin24m" }; +PNAME(mux_usb480m_p) = { "usb480m_phy", "xin24m" }; PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" }; PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" }; PNAME(mux_i2s_clkout_p) = { "i2s_pre", "xin12m" }; @@ -423,6 +424,9 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = { GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 9, GFLAGS), GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 10, GFLAGS), GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 11, GFLAGS), + + MUX(SCLK_USB480M, "usb480m", mux_usb480m_p, CLK_SET_RATE_PARENT, + RK2928_MISC_CON, 15, 1, MFLAGS), }; static const char *const rk3036_critical_clocks[] __initconst = { @@ -431,6 +435,7 @@ static const char *const rk3036_critical_clocks[] __initconst = { "hclk_peri", "pclk_peri", "pclk_ddrupctl", + "ddrphy", }; static void __init rk3036_clk_init(struct device_node *np) @@ -438,7 +443,6 @@ static void __init rk3036_clk_init(struct device_node *np) struct rockchip_clk_provider *ctx; unsigned long clk_nr_clks; void __iomem *reg_base; - struct clk *clk; reg_base = of_iomap(np, 0); if (!reg_base) { @@ -462,11 +466,6 @@ static void __init rk3036_clk_init(struct device_node *np) return; } - clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1); - if (IS_ERR(clk)) - pr_warn("%s: could not register clock usb480m: %ld\n", - __func__, PTR_ERR(clk)); - rockchip_clk_register_plls(ctx, rk3036_pll_clks, ARRAY_SIZE(rk3036_pll_clks), RK3036_GRF_SOC_STATUS0); diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c index 90d329216064..0a1e017df7c6 100644 --- a/drivers/clk/rockchip/clk-rk3288.c +++ b/drivers/clk/rockchip/clk-rk3288.c @@ -418,7 +418,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS, RK3288_CLKGATE_CON(3), 11, GFLAGS), MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, CLK_SET_RATE_PARENT, - RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS), + RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS, grf_type_sys), GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0, RK3288_CLKGATE_CON(9), 0, GFLAGS), diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c index cf60fcf2fa5c..cd5f65b6cdf5 100644 --- a/drivers/clk/rockchip/clk-rk3328.c +++ b/drivers/clk/rockchip/clk-rk3328.c @@ -677,9 +677,9 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = { RK3328_CLKSEL_CON(27), 15, 1, MFLAGS, 8, 5, DFLAGS, RK3328_CLKGATE_CON(3), 5, GFLAGS), MUXGRF(SCLK_MAC2IO, "clk_mac2io", mux_mac2io_src_p, CLK_SET_RATE_NO_REPARENT, - RK3328_GRF_MAC_CON1, 10, 1, MFLAGS), + RK3328_GRF_MAC_CON1, 10, 1, MFLAGS, grf_type_sys), MUXGRF(SCLK_MAC2IO_EXT, "clk_mac2io_ext", mux_mac2io_ext_p, CLK_SET_RATE_NO_REPARENT, - RK3328_GRF_SOC_CON4, 14, 1, MFLAGS), + RK3328_GRF_SOC_CON4, 14, 1, MFLAGS, grf_type_sys), COMPOSITE(SCLK_MAC2PHY_SRC, "clk_mac2phy_src", mux_2plls_p, 0, RK3328_CLKSEL_CON(26), 7, 1, MFLAGS, 0, 5, DFLAGS, @@ -692,7 +692,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = { RK3328_CLKSEL_CON(26), 8, 2, DFLAGS, RK3328_CLKGATE_CON(9), 2, GFLAGS), MUXGRF(SCLK_MAC2PHY, "clk_mac2phy", mux_mac2phy_src_p, CLK_SET_RATE_NO_REPARENT, - RK3328_GRF_MAC_CON2, 10, 1, MFLAGS), + RK3328_GRF_MAC_CON2, 10, 1, MFLAGS, grf_type_sys), FACTOR(0, "xin12m", "xin24m", 0, 1, 2), diff --git a/drivers/clk/rockchip/clk-rk3528.c b/drivers/clk/rockchip/clk-rk3528.c index b8b577b902a0..a5ff64b93f8f 100644 --- a/drivers/clk/rockchip/clk-rk3528.c +++ b/drivers/clk/rockchip/clk-rk3528.c @@ -10,6 +10,9 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/mfd/syscon.h> +#include <linux/minmax.h> +#include <linux/slab.h> #include <dt-bindings/clock/rockchip,rk3528-cru.h> @@ -1061,23 +1064,65 @@ static struct rockchip_clk_branch rk3528_clk_branches[] __initdata = { 0, 1, 1), }; +static struct rockchip_clk_branch rk3528_vo_clk_branches[] __initdata = { + MMC_GRF(SCLK_SDMMC_DRV, "sdmmc_drv", "cclk_src_sdmmc0", + RK3528_SDMMC_CON(0), 1, grf_type_vo), + MMC_GRF(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "cclk_src_sdmmc0", + RK3528_SDMMC_CON(1), 1, grf_type_vo), +}; + +static struct rockchip_clk_branch rk3528_vpu_clk_branches[] __initdata = { + MMC_GRF(SCLK_SDIO0_DRV, "sdio0_drv", "cclk_src_sdio0", + RK3528_SDIO0_CON(0), 1, grf_type_vpu), + MMC_GRF(SCLK_SDIO0_SAMPLE, "sdio0_sample", "cclk_src_sdio0", + RK3528_SDIO0_CON(1), 1, grf_type_vpu), + MMC_GRF(SCLK_SDIO1_DRV, "sdio1_drv", "cclk_src_sdio1", + RK3528_SDIO1_CON(0), 1, grf_type_vpu), + MMC_GRF(SCLK_SDIO1_SAMPLE, "sdio1_sample", "cclk_src_sdio1", + RK3528_SDIO1_CON(1), 1, grf_type_vpu), +}; + static int __init clk_rk3528_probe(struct platform_device *pdev) { - struct rockchip_clk_provider *ctx; + unsigned long nr_vpu_branches = ARRAY_SIZE(rk3528_vpu_clk_branches); + unsigned long nr_vo_branches = ARRAY_SIZE(rk3528_vo_clk_branches); + unsigned long nr_branches = ARRAY_SIZE(rk3528_clk_branches); + unsigned long nr_clks, nr_vo_clks, nr_vpu_clks; + struct rockchip_aux_grf *vo_grf_e, *vpu_grf_e; + struct regmap *vo_grf, *vpu_grf; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - unsigned long nr_branches = ARRAY_SIZE(rk3528_clk_branches); - unsigned long nr_clks; + struct rockchip_clk_provider *ctx; void __iomem *reg_base; - nr_clks = rockchip_clk_find_max_clk_id(rk3528_clk_branches, - nr_branches) + 1; - reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(reg_base)) return dev_err_probe(dev, PTR_ERR(reg_base), "could not map cru region"); + nr_clks = rockchip_clk_find_max_clk_id(rk3528_clk_branches, + nr_branches) + 1; + + vo_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3528-vo-grf"); + if (!IS_ERR(vo_grf)) { + nr_vo_clks = rockchip_clk_find_max_clk_id(rk3528_vo_clk_branches, + nr_vo_branches) + 1; + nr_clks = max(nr_clks, nr_vo_clks); + } else if (PTR_ERR(vo_grf) != -ENODEV) { + return dev_err_probe(dev, PTR_ERR(vo_grf), + "failed to look up VO GRF\n"); + } + + vpu_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3528-vpu-grf"); + if (!IS_ERR(vpu_grf)) { + nr_vpu_clks = rockchip_clk_find_max_clk_id(rk3528_vpu_clk_branches, + nr_vpu_branches) + 1; + nr_clks = max(nr_clks, nr_vpu_clks); + } else if (PTR_ERR(vpu_grf) != -ENODEV) { + return dev_err_probe(dev, PTR_ERR(vpu_grf), + "failed to look up VPU GRF\n"); + } + ctx = rockchip_clk_init(np, reg_base, nr_clks); if (IS_ERR(ctx)) return dev_err_probe(dev, PTR_ERR(ctx), @@ -1092,6 +1137,32 @@ static int __init clk_rk3528_probe(struct platform_device *pdev) ARRAY_SIZE(rk3528_cpuclk_rates)); rockchip_clk_register_branches(ctx, rk3528_clk_branches, nr_branches); + if (!IS_ERR(vo_grf)) { + vo_grf_e = devm_kzalloc(dev, sizeof(*vo_grf_e), GFP_KERNEL); + if (!vo_grf_e) + return -ENOMEM; + + vo_grf_e->grf = vo_grf; + vo_grf_e->type = grf_type_vo; + hash_add(ctx->aux_grf_table, &vo_grf_e->node, grf_type_vo); + + rockchip_clk_register_branches(ctx, rk3528_vo_clk_branches, + nr_vo_branches); + } + + if (!IS_ERR(vpu_grf)) { + vpu_grf_e = devm_kzalloc(dev, sizeof(*vpu_grf_e), GFP_KERNEL); + if (!vpu_grf_e) + return -ENOMEM; + + vpu_grf_e->grf = vpu_grf; + vpu_grf_e->type = grf_type_vpu; + hash_add(ctx->aux_grf_table, &vpu_grf_e->node, grf_type_vpu); + + rockchip_clk_register_branches(ctx, rk3528_vpu_clk_branches, + nr_vpu_branches); + } + rk3528_rst_init(np, reg_base); rockchip_register_restart_notifier(ctx, RK3528_GLB_SRST_FST, NULL); diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c index 7d9279291e76..d48ab9d6c064 100644 --- a/drivers/clk/rockchip/clk-rk3568.c +++ b/drivers/clk/rockchip/clk-rk3568.c @@ -89,6 +89,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = { RK3036_PLL_RATE(96000000, 1, 96, 6, 4, 1, 0), RK3036_PLL_RATE(78750000, 4, 315, 6, 4, 1, 0), RK3036_PLL_RATE(74250000, 2, 99, 4, 4, 1, 0), + RK3036_PLL_RATE(33300000, 4, 111, 5, 4, 1, 0), { /* sentinel */ }, }; @@ -590,7 +591,7 @@ static struct rockchip_clk_branch rk3568_clk_branches[] __initdata = { RK3568_CLKSEL_CON(9), 6, 2, MFLAGS, 0, 5, DFLAGS, RK3568_CLKGATE_CON(4), 0, GFLAGS), MUXGRF(CLK_DDR1X, "clk_ddr1x", clk_ddr1x_p, CLK_SET_RATE_PARENT, - RK3568_CLKSEL_CON(9), 15, 1, MFLAGS), + RK3568_CLKSEL_CON(9), 15, 1, MFLAGS, grf_type_sys), COMPOSITE_NOMUX(CLK_MSCH, "clk_msch", "clk_ddr1x", CLK_IGNORE_UNUSED, RK3568_CLKSEL_CON(10), 0, 2, DFLAGS, diff --git a/drivers/clk/rockchip/clk-rk3576.c b/drivers/clk/rockchip/clk-rk3576.c index be703f250197..9bc0ef51ef68 100644 --- a/drivers/clk/rockchip/clk-rk3576.c +++ b/drivers/clk/rockchip/clk-rk3576.c @@ -10,11 +10,13 @@ #include <linux/platform_device.h> #include <linux/syscore_ops.h> #include <linux/mfd/syscon.h> +#include <linux/slab.h> #include <dt-bindings/clock/rockchip,rk3576-cru.h> #include "clk.h" #define RK3576_GRF_SOC_STATUS0 0x600 #define RK3576_PMU0_GRF_OSC_CON6 0x18 +#define RK3576_VCCIO_IOC_MISC_CON0 0x6400 enum rk3576_plls { bpll, lpll, vpll, aupll, cpll, gpll, ppll, @@ -1481,6 +1483,14 @@ static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = { RK3576_CLKGATE_CON(10), 0, GFLAGS), GATE(CLK_SAI0_MCLKOUT, "clk_sai0_mclkout", "mclk_sai0_8ch", 0, RK3576_CLKGATE_CON(10), 1, GFLAGS), + GATE_GRF(CLK_SAI0_MCLKOUT_TO_IO, "mclk_sai0_to_io", "clk_sai0_mclkout", + 0, RK3576_VCCIO_IOC_MISC_CON0, 0, GFLAGS, grf_type_ioc), + GATE_GRF(CLK_SAI1_MCLKOUT_TO_IO, "mclk_sai1_to_io", "clk_sai1_mclkout", + 0, RK3576_VCCIO_IOC_MISC_CON0, 1, GFLAGS, grf_type_ioc), + GATE_GRF(CLK_SAI2_MCLKOUT_TO_IO, "mclk_sai2_to_io", "clk_sai2_mclkout", + 0, RK3576_VCCIO_IOC_MISC_CON0, 2, GFLAGS, grf_type_ioc), + GATE_GRF(CLK_SAI3_MCLKOUT_TO_IO, "mclk_sai3_to_io", "clk_sai3_mclkout", + 0, RK3576_VCCIO_IOC_MISC_CON0, 3, GFLAGS, grf_type_ioc), /* sdgmac */ COMPOSITE_NODIV(HCLK_SDGMAC_ROOT, "hclk_sdgmac_root", mux_200m_100m_50m_24m_p, 0, @@ -1678,13 +1688,13 @@ static struct rockchip_clk_branch rk3576_clk_branches[] __initdata = { /* phy ref */ MUXGRF(CLK_PHY_REF_SRC, "clk_phy_ref_src", clk_phy_ref_src_p, 0, - RK3576_PMU0_GRF_OSC_CON6, 4, 1, MFLAGS), + RK3576_PMU0_GRF_OSC_CON6, 4, 1, MFLAGS, grf_type_pmu0), MUXGRF(CLK_USBPHY_REF_SRC, "clk_usbphy_ref_src", clk_usbphy_ref_src_p, 0, - RK3576_PMU0_GRF_OSC_CON6, 2, 1, MFLAGS), + RK3576_PMU0_GRF_OSC_CON6, 2, 1, MFLAGS, grf_type_pmu0), MUXGRF(CLK_CPLL_REF_SRC, "clk_cpll_ref_src", clk_cpll_ref_src_p, 0, - RK3576_PMU0_GRF_OSC_CON6, 1, 1, MFLAGS), + RK3576_PMU0_GRF_OSC_CON6, 1, 1, MFLAGS, grf_type_pmu0), MUXGRF(CLK_AUPLL_REF_SRC, "clk_aupll_ref_src", clk_aupll_ref_src_p, 0, - RK3576_PMU0_GRF_OSC_CON6, 0, 1, MFLAGS), + RK3576_PMU0_GRF_OSC_CON6, 0, 1, MFLAGS, grf_type_pmu0), /* secure ns */ COMPOSITE_NODIV(ACLK_SECURE_NS, "aclk_secure_ns", mux_350m_175m_116m_24m_p, CLK_IS_CRITICAL, @@ -1727,17 +1737,26 @@ static void __init rk3576_clk_init(struct device_node *np) struct rockchip_clk_provider *ctx; unsigned long clk_nr_clks; void __iomem *reg_base; - struct regmap *grf; + struct rockchip_aux_grf *ioc_grf_e; + struct rockchip_aux_grf *pmu0_grf_e; + struct regmap *ioc_grf; + struct regmap *pmu0_grf; clk_nr_clks = rockchip_clk_find_max_clk_id(rk3576_clk_branches, ARRAY_SIZE(rk3576_clk_branches)) + 1; - grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-pmu0-grf"); - if (IS_ERR(grf)) { + pmu0_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-pmu0-grf"); + if (IS_ERR(pmu0_grf)) { pr_err("%s: could not get PMU0 GRF syscon\n", __func__); return; } + ioc_grf = syscon_regmap_lookup_by_compatible("rockchip,rk3576-ioc-grf"); + if (IS_ERR(ioc_grf)) { + pr_err("%s: could not get IOC GRF syscon\n", __func__); + return; + } + reg_base = of_iomap(np, 0); if (!reg_base) { pr_err("%s: could not map cru region\n", __func__); @@ -1747,11 +1766,24 @@ static void __init rk3576_clk_init(struct device_node *np) ctx = rockchip_clk_init(np, reg_base, clk_nr_clks); if (IS_ERR(ctx)) { pr_err("%s: rockchip clk init failed\n", __func__); - iounmap(reg_base); - return; + goto err_unmap; } - ctx->grf = grf; + pmu0_grf_e = kzalloc(sizeof(*pmu0_grf_e), GFP_KERNEL); + if (!pmu0_grf_e) + goto err_unmap; + + pmu0_grf_e->grf = pmu0_grf; + pmu0_grf_e->type = grf_type_pmu0; + hash_add(ctx->aux_grf_table, &pmu0_grf_e->node, grf_type_pmu0); + + ioc_grf_e = kzalloc(sizeof(*ioc_grf_e), GFP_KERNEL); + if (!ioc_grf_e) + goto err_free_pmu0; + + ioc_grf_e->grf = ioc_grf; + ioc_grf_e->type = grf_type_ioc; + hash_add(ctx->aux_grf_table, &ioc_grf_e->node, grf_type_ioc); rockchip_clk_register_plls(ctx, rk3576_pll_clks, ARRAY_SIZE(rk3576_pll_clks), @@ -1774,6 +1806,14 @@ static void __init rk3576_clk_init(struct device_node *np) rockchip_register_restart_notifier(ctx, RK3576_GLB_SRST_FST, NULL); rockchip_clk_of_add_provider(np, ctx); + + return; + +err_free_pmu0: + kfree(pmu0_grf_e); +err_unmap: + iounmap(reg_base); + return; } CLK_OF_DECLARE(rk3576_cru, "rockchip,rk3576-cru", rk3576_clk_init); diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c index 4031733def4e..1694223f4f84 100644 --- a/drivers/clk/rockchip/clk-rk3588.c +++ b/drivers/clk/rockchip/clk-rk3588.c @@ -64,6 +64,7 @@ static struct rockchip_pll_rate_table rk3588_pll_rates[] = { RK3588_PLL_RATE(1560000000, 2, 260, 1, 0), RK3588_PLL_RATE(1536000000, 2, 256, 1, 0), RK3588_PLL_RATE(1512000000, 2, 252, 1, 0), + RK3588_PLL_RATE(1500000000, 2, 250, 1, 0), RK3588_PLL_RATE(1488000000, 2, 248, 1, 0), RK3588_PLL_RATE(1464000000, 2, 244, 1, 0), RK3588_PLL_RATE(1440000000, 2, 240, 1, 0), diff --git a/drivers/clk/rockchip/clk-rv1126.c b/drivers/clk/rockchip/clk-rv1126.c index fc19c5522490..15e7bfe84506 100644 --- a/drivers/clk/rockchip/clk-rv1126.c +++ b/drivers/clk/rockchip/clk-rv1126.c @@ -857,7 +857,7 @@ static struct rockchip_clk_branch rv1126_clk_branches[] __initdata = { RV1126_GMAC_CON, 5, 1, MFLAGS), MUXGRF(CLK_GMAC_SRC, "clk_gmac_src", mux_clk_gmac_src_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, - RV1126_GRF_IOFUNC_CON1, 12, 1, MFLAGS), + RV1126_GRF_IOFUNC_CON1, 12, 1, MFLAGS, grf_type_sys), GATE(CLK_GMAC_REF, "clk_gmac_ref", "clk_gmac_src", 0, RV1126_CLKGATE_CON(20), 7, GFLAGS), diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c index cbf93ea119a9..19caf26c991b 100644 --- a/drivers/clk/rockchip/clk.c +++ b/drivers/clk/rockchip/clk.c @@ -382,6 +382,8 @@ static struct rockchip_clk_provider *rockchip_clk_init_base( ctx->cru_node = np; spin_lock_init(&ctx->lock); + hash_init(ctx->aux_grf_table); + ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node, "rockchip,grf"); @@ -496,6 +498,8 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, struct rockchip_clk_branch *list, unsigned int nr_clk) { + struct regmap *grf = ctx->grf; + struct rockchip_aux_grf *agrf; struct clk *clk; unsigned int idx; unsigned long flags; @@ -504,6 +508,19 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, flags = list->flags; clk = NULL; + /* for GRF-dependent branches, choose the right grf first */ + if ((list->branch_type == branch_grf_mux || + list->branch_type == branch_grf_gate || + list->branch_type == branch_grf_mmc) && + list->grf_type != grf_type_sys) { + hash_for_each_possible(ctx->aux_grf_table, agrf, node, list->grf_type) { + if (agrf->type == list->grf_type) { + grf = agrf->grf; + break; + } + } + } + /* catch simple muxes */ switch (list->branch_type) { case branch_mux: @@ -523,10 +540,10 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, list->mux_shift, list->mux_width, list->mux_flags, &ctx->lock); break; - case branch_muxgrf: + case branch_grf_mux: clk = rockchip_clk_register_muxgrf(list->name, list->parent_names, list->num_parents, - flags, ctx->grf, list->muxdiv_offset, + flags, grf, list->muxdiv_offset, list->mux_shift, list->mux_width, list->mux_flags); break; @@ -573,6 +590,13 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, ctx->reg_base + list->gate_offset, list->gate_shift, list->gate_flags, &ctx->lock); break; + case branch_grf_gate: + flags |= CLK_SET_RATE_PARENT; + clk = rockchip_clk_register_gate_grf(list->name, + list->parent_names[0], flags, grf, + list->gate_offset, list->gate_shift, + list->gate_flags); + break; case branch_composite: clk = rockchip_clk_register_branch(list->name, list->parent_names, list->num_parents, @@ -590,6 +614,16 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, list->name, list->parent_names, list->num_parents, ctx->reg_base + list->muxdiv_offset, + NULL, 0, + list->div_shift + ); + break; + case branch_grf_mmc: + clk = rockchip_clk_register_mmc( + list->name, + list->parent_names, list->num_parents, + NULL, + grf, list->muxdiv_offset, list->div_shift ); break; diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h index df2b2d706450..1e9c3c0d31e3 100644 --- a/drivers/clk/rockchip/clk.h +++ b/drivers/clk/rockchip/clk.h @@ -19,6 +19,7 @@ #include <linux/io.h> #include <linux/clk-provider.h> +#include <linux/hashtable.h> struct clk; @@ -217,6 +218,9 @@ struct clk; #define RK3528_CLKSEL_CON(x) ((x) * 0x4 + 0x300) #define RK3528_CLKGATE_CON(x) ((x) * 0x4 + 0x800) #define RK3528_SOFTRST_CON(x) ((x) * 0x4 + 0xa00) +#define RK3528_SDMMC_CON(x) ((x) * 0x4 + 0x24) +#define RK3528_SDIO0_CON(x) ((x) * 0x4 + 0x4) +#define RK3528_SDIO1_CON(x) ((x) * 0x4 + 0xc) #define RK3528_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x300 + RK3528_PMU_CRU_BASE) #define RK3528_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x800 + RK3528_PMU_CRU_BASE) #define RK3528_PCIE_CLKSEL_CON(x) ((x) * 0x4 + 0x300 + RK3528_PCIE_CRU_BASE) @@ -440,12 +444,37 @@ enum rockchip_pll_type { .k = _k, \ } +enum rockchip_grf_type { + grf_type_sys = 0, + grf_type_pmu0, + grf_type_pmu1, + grf_type_ioc, + grf_type_vo, + grf_type_vpu, +}; + +/* ceil(sqrt(enums in rockchip_grf_type - 1)) */ +#define GRF_HASH_ORDER 2 + +/** + * struct rockchip_aux_grf - entry for the aux_grf_table hashtable + * @grf: pointer to the grf this entry references + * @type: what type of GRF this is + * @node: hlist node + */ +struct rockchip_aux_grf { + struct regmap *grf; + enum rockchip_grf_type type; + struct hlist_node node; +}; + /** * struct rockchip_clk_provider - information about clock provider * @reg_base: virtual address for the register base. * @clk_data: holds clock related data like clk* and number of clocks. * @cru_node: device-node of the clock-provider * @grf: regmap of the general-register-files syscon + * @aux_grf_table: hashtable of auxiliary GRF regmaps, indexed by grf_type * @lock: maintains exclusion between callbacks for a given clock-provider. */ struct rockchip_clk_provider { @@ -453,6 +482,7 @@ struct rockchip_clk_provider { struct clk_onecell_data clk_data; struct device_node *cru_node; struct regmap *grf; + DECLARE_HASHTABLE(aux_grf_table, GRF_HASH_ORDER); spinlock_t lock; }; @@ -594,7 +624,9 @@ struct clk *rockchip_clk_register_cpuclk(const char *name, struct clk *rockchip_clk_register_mmc(const char *name, const char *const *parent_names, u8 num_parents, - void __iomem *reg, int shift); + void __iomem *reg, + struct regmap *grf, int grf_reg, + int shift); /* * DDRCLK flags, including method of setting the rate @@ -622,17 +654,24 @@ struct clk *rockchip_clk_register_muxgrf(const char *name, int flags, struct regmap *grf, int reg, int shift, int width, int mux_flags); +struct clk *rockchip_clk_register_gate_grf(const char *name, + const char *parent_name, unsigned long flags, + struct regmap *regmap, unsigned int reg, + unsigned int shift, u8 gate_flags); + #define PNAME(x) static const char *const x[] __initconst enum rockchip_clk_branch_type { branch_composite, branch_mux, - branch_muxgrf, + branch_grf_mux, branch_divider, branch_fraction_divider, branch_gate, + branch_grf_gate, branch_linked_gate, branch_mmc, + branch_grf_mmc, branch_inverter, branch_factor, branch_ddrclk, @@ -660,6 +699,7 @@ struct rockchip_clk_branch { u8 gate_shift; u8 gate_flags; unsigned int linked_clk_id; + enum rockchip_grf_type grf_type; struct rockchip_clk_branch *child; }; @@ -900,10 +940,10 @@ struct rockchip_clk_branch { .mux_table = mt, \ } -#define MUXGRF(_id, cname, pnames, f, o, s, w, mf) \ +#define MUXGRF(_id, cname, pnames, f, o, s, w, mf, gt) \ { \ .id = _id, \ - .branch_type = branch_muxgrf, \ + .branch_type = branch_grf_mux, \ .name = cname, \ .parent_names = pnames, \ .num_parents = ARRAY_SIZE(pnames), \ @@ -913,6 +953,7 @@ struct rockchip_clk_branch { .mux_width = w, \ .mux_flags = mf, \ .gate_offset = -1, \ + .grf_type = gt, \ } #define DIV(_id, cname, pname, f, o, s, w, df) \ @@ -958,6 +999,20 @@ struct rockchip_clk_branch { .gate_flags = gf, \ } +#define GATE_GRF(_id, cname, pname, f, o, b, gf, gt) \ + { \ + .id = _id, \ + .branch_type = branch_grf_gate, \ + .name = cname, \ + .parent_names = (const char *[]){ pname }, \ + .num_parents = 1, \ + .flags = f, \ + .gate_offset = o, \ + .gate_shift = b, \ + .gate_flags = gf, \ + .grf_type = gt, \ + } + #define GATE_LINK(_id, cname, pname, linkedclk, f, o, b, gf) \ { \ .id = _id, \ @@ -983,6 +1038,18 @@ struct rockchip_clk_branch { .div_shift = shift, \ } +#define MMC_GRF(_id, cname, pname, offset, shift, grftype) \ + { \ + .id = _id, \ + .branch_type = branch_grf_mmc, \ + .name = cname, \ + .parent_names = (const char *[]){ pname }, \ + .num_parents = 1, \ + .muxdiv_offset = offset, \ + .div_shift = shift, \ + .grf_type = grftype, \ + } + #define INVERTER(_id, cname, pname, io, is, if) \ { \ .id = _id, \ diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 374c26e5d9fd..cc5c1644c41c 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -1269,6 +1269,45 @@ static const struct samsung_cpu_clock exynos4412_cpu_clks[] __initconst = { CPUCLK_LAYOUT_E4210, e4412_armclk_d), }; +static const struct samsung_cmu_info cmu_info_exynos4 __initconst = { + .mux_clks = exynos4_mux_clks, + .nr_mux_clks = ARRAY_SIZE(exynos4_mux_clks), + .div_clks = exynos4_div_clks, + .nr_div_clks = ARRAY_SIZE(exynos4_div_clks), + .gate_clks = exynos4_gate_clks, + .nr_gate_clks = ARRAY_SIZE(exynos4_gate_clks), + .fixed_factor_clks = exynos4_fixed_factor_clks, + .nr_fixed_factor_clks = ARRAY_SIZE(exynos4_fixed_factor_clks), + .fixed_clks = exynos4_fixed_rate_clks, + .nr_fixed_clks = ARRAY_SIZE(exynos4_fixed_rate_clks), +}; + +static const struct samsung_cmu_info cmu_info_exynos4210 __initconst = { + .mux_clks = exynos4210_mux_clks, + .nr_mux_clks = ARRAY_SIZE(exynos4210_mux_clks), + .div_clks = exynos4210_div_clks, + .nr_div_clks = ARRAY_SIZE(exynos4210_div_clks), + .gate_clks = exynos4210_gate_clks, + .nr_gate_clks = ARRAY_SIZE(exynos4210_gate_clks), + .fixed_factor_clks = exynos4210_fixed_factor_clks, + .nr_fixed_factor_clks = ARRAY_SIZE(exynos4210_fixed_factor_clks), + .fixed_clks = exynos4210_fixed_rate_clks, + .nr_fixed_clks = ARRAY_SIZE(exynos4210_fixed_rate_clks), + .cpu_clks = exynos4210_cpu_clks, + .nr_cpu_clks = ARRAY_SIZE(exynos4210_cpu_clks), +}; + +static const struct samsung_cmu_info cmu_info_exynos4x12 __initconst = { + .mux_clks = exynos4x12_mux_clks, + .nr_mux_clks = ARRAY_SIZE(exynos4x12_mux_clks), + .div_clks = exynos4x12_div_clks, + .nr_div_clks = ARRAY_SIZE(exynos4x12_div_clks), + .gate_clks = exynos4x12_gate_clks, + .nr_gate_clks = ARRAY_SIZE(exynos4x12_gate_clks), + .fixed_factor_clks = exynos4x12_fixed_factor_clks, + .nr_fixed_factor_clks = ARRAY_SIZE(exynos4x12_fixed_factor_clks), +}; + /* register exynos4 clocks */ static void __init exynos4_clk_init(struct device_node *np, enum exynos4_soc soc) @@ -1322,41 +1361,12 @@ static void __init exynos4_clk_init(struct device_node *np, ARRAY_SIZE(exynos4x12_plls)); } - samsung_clk_register_fixed_rate(ctx, exynos4_fixed_rate_clks, - ARRAY_SIZE(exynos4_fixed_rate_clks)); - samsung_clk_register_mux(ctx, exynos4_mux_clks, - ARRAY_SIZE(exynos4_mux_clks)); - samsung_clk_register_div(ctx, exynos4_div_clks, - ARRAY_SIZE(exynos4_div_clks)); - samsung_clk_register_gate(ctx, exynos4_gate_clks, - ARRAY_SIZE(exynos4_gate_clks)); - samsung_clk_register_fixed_factor(ctx, exynos4_fixed_factor_clks, - ARRAY_SIZE(exynos4_fixed_factor_clks)); + samsung_cmu_register_clocks(ctx, &cmu_info_exynos4); if (exynos4_soc == EXYNOS4210) { - samsung_clk_register_fixed_rate(ctx, exynos4210_fixed_rate_clks, - ARRAY_SIZE(exynos4210_fixed_rate_clks)); - samsung_clk_register_mux(ctx, exynos4210_mux_clks, - ARRAY_SIZE(exynos4210_mux_clks)); - samsung_clk_register_div(ctx, exynos4210_div_clks, - ARRAY_SIZE(exynos4210_div_clks)); - samsung_clk_register_gate(ctx, exynos4210_gate_clks, - ARRAY_SIZE(exynos4210_gate_clks)); - samsung_clk_register_fixed_factor(ctx, - exynos4210_fixed_factor_clks, - ARRAY_SIZE(exynos4210_fixed_factor_clks)); - samsung_clk_register_cpu(ctx, exynos4210_cpu_clks, - ARRAY_SIZE(exynos4210_cpu_clks)); + samsung_cmu_register_clocks(ctx, &cmu_info_exynos4210); } else { - samsung_clk_register_mux(ctx, exynos4x12_mux_clks, - ARRAY_SIZE(exynos4x12_mux_clks)); - samsung_clk_register_div(ctx, exynos4x12_div_clks, - ARRAY_SIZE(exynos4x12_div_clks)); - samsung_clk_register_gate(ctx, exynos4x12_gate_clks, - ARRAY_SIZE(exynos4x12_gate_clks)); - samsung_clk_register_fixed_factor(ctx, - exynos4x12_fixed_factor_clks, - ARRAY_SIZE(exynos4x12_fixed_factor_clks)); + samsung_cmu_register_clocks(ctx, &cmu_info_exynos4x12); if (soc == EXYNOS4412) samsung_clk_register_cpu(ctx, exynos4412_cpu_clks, ARRAY_SIZE(exynos4412_cpu_clks)); diff --git a/drivers/clk/samsung/clk-exynosautov920.c b/drivers/clk/samsung/clk-exynosautov920.c index dc8d4240f6de..da4afe8ac2ab 100644 --- a/drivers/clk/samsung/clk-exynosautov920.c +++ b/drivers/clk/samsung/clk-exynosautov920.c @@ -18,6 +18,9 @@ /* NOTE: Must be equal to the last clock ID increased by one */ #define CLKS_NR_TOP (DOUT_CLKCMU_TAA_NOC + 1) +#define CLKS_NR_CPUCL0 (CLK_DOUT_CPUCL0_NOCP + 1) +#define CLKS_NR_CPUCL1 (CLK_DOUT_CPUCL1_NOCP + 1) +#define CLKS_NR_CPUCL2 (CLK_DOUT_CPUCL2_NOCP + 1) #define CLKS_NR_PERIC0 (CLK_DOUT_PERIC0_I3C + 1) #define CLKS_NR_PERIC1 (CLK_DOUT_PERIC1_I3C + 1) #define CLKS_NR_MISC (CLK_DOUT_MISC_OSC_DIV2 + 1) @@ -1005,6 +1008,339 @@ static void __init exynosautov920_cmu_top_init(struct device_node *np) CLK_OF_DECLARE(exynosautov920_cmu_top, "samsung,exynosautov920-cmu-top", exynosautov920_cmu_top_init); +/* ---- CMU_CPUCL0 --------------------------------------------------------- */ + +/* Register Offset definitions for CMU_CPUCL0 (0x1EC00000) */ +#define PLL_LOCKTIME_PLL_CPUCL0 0x0000 +#define PLL_CON0_PLL_CPUCL0 0x0100 +#define PLL_CON1_PLL_CPUCL0 0x0104 +#define PLL_CON3_PLL_CPUCL0 0x010c +#define PLL_CON0_MUX_CLKCMU_CPUCL0_CLUSTER_USER 0x0600 +#define PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER 0x0610 +#define PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER 0x0620 + +#define CLK_CON_MUX_MUX_CLK_CPUCL0_CLUSTER 0x1000 +#define CLK_CON_MUX_MUX_CLK_CPUCL0_CORE 0x1004 + +#define CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK 0x1800 +#define CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK 0x1804 +#define CLK_CON_DIV_DIV_CLK_CLUSTER0_MPCLK 0x1808 +#define CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLK 0x180c +#define CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK 0x1810 +#define CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_NOC 0x181c +#define CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_PCLKDBG 0x1820 +#define CLK_CON_DIV_DIV_CLK_CPUCL0_NOCP 0x1824 + +static const unsigned long cpucl0_clk_regs[] __initconst = { + PLL_LOCKTIME_PLL_CPUCL0, + PLL_CON0_PLL_CPUCL0, + PLL_CON1_PLL_CPUCL0, + PLL_CON3_PLL_CPUCL0, + PLL_CON0_MUX_CLKCMU_CPUCL0_CLUSTER_USER, + PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER, + PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER, + CLK_CON_MUX_MUX_CLK_CPUCL0_CLUSTER, + CLK_CON_MUX_MUX_CLK_CPUCL0_CORE, + CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK, + CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK, + CLK_CON_DIV_DIV_CLK_CLUSTER0_MPCLK, + CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLK, + CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK, + CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_NOC, + CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_PCLKDBG, + CLK_CON_DIV_DIV_CLK_CPUCL0_NOCP, +}; + +/* List of parent clocks for Muxes in CMU_CPUCL0 */ +PNAME(mout_pll_cpucl0_p) = { "oscclk", "fout_cpucl0_pll" }; +PNAME(mout_cpucl0_cluster_user_p) = { "oscclk", "dout_clkcmu_cpucl0_cluster" }; +PNAME(mout_cpucl0_dbg_user_p) = { "oscclk", "dout_clkcmu_cpucl0_dbg" }; +PNAME(mout_cpucl0_switch_user_p) = { "oscclk", "dout_clkcmu_cpucl0_switch" }; +PNAME(mout_cpucl0_cluster_p) = { "oscclk", "mout_cpucl0_cluster_user", + "mout_cpucl0_switch_user"}; +PNAME(mout_cpucl0_core_p) = { "oscclk", "mout_pll_cpucl0", + "mout_cpucl0_switch_user"}; + +static const struct samsung_pll_rate_table cpu_pll_rates[] __initconst = { + PLL_35XX_RATE(38400000U, 2400000000U, 250, 4, 0), + PLL_35XX_RATE(38400000U, 2304000000U, 240, 4, 0), + PLL_35XX_RATE(38400000U, 2208000000U, 230, 4, 0), + PLL_35XX_RATE(38400000U, 2112000000U, 220, 4, 0), + PLL_35XX_RATE(38400000U, 2016000000U, 210, 4, 0), + PLL_35XX_RATE(38400000U, 1824000000U, 190, 4, 0), + PLL_35XX_RATE(38400000U, 1680000000U, 175, 4, 0), + PLL_35XX_RATE(38400000U, 1344000000U, 140, 4, 0), + PLL_35XX_RATE(38400000U, 1152000000U, 120, 4, 0), + PLL_35XX_RATE(38400000U, 576000000U, 120, 4, 1), + PLL_35XX_RATE(38400000U, 288000000U, 120, 4, 2), +}; + +static const struct samsung_pll_clock cpucl0_pll_clks[] __initconst = { + /* CMU_CPUCL0_PURECLKCOMP */ + PLL(pll_531x, CLK_FOUT_CPUCL0_PLL, "fout_cpucl0_pll", "oscclk", + PLL_LOCKTIME_PLL_CPUCL0, PLL_CON3_PLL_CPUCL0, cpu_pll_rates), +}; + +static const struct samsung_mux_clock cpucl0_mux_clks[] __initconst = { + MUX(CLK_MOUT_PLL_CPUCL0, "mout_pll_cpucl0", mout_pll_cpucl0_p, + PLL_CON0_PLL_CPUCL0, 4, 1), + MUX(CLK_MOUT_CPUCL0_CLUSTER_USER, "mout_cpucl0_cluster_user", mout_cpucl0_cluster_user_p, + PLL_CON0_MUX_CLKCMU_CPUCL0_CLUSTER_USER, 4, 1), + MUX(CLK_MOUT_CPUCL0_DBG_USER, "mout_cpucl0_dbg_user", mout_cpucl0_dbg_user_p, + PLL_CON0_MUX_CLKCMU_CPUCL0_DBG_USER, 4, 1), + MUX(CLK_MOUT_CPUCL0_SWITCH_USER, "mout_cpucl0_switch_user", mout_cpucl0_switch_user_p, + PLL_CON0_MUX_CLKCMU_CPUCL0_SWITCH_USER, 4, 1), + MUX(CLK_MOUT_CPUCL0_CLUSTER, "mout_cpucl0_cluster", mout_cpucl0_cluster_p, + CLK_CON_MUX_MUX_CLK_CPUCL0_CLUSTER, 0, 2), + MUX(CLK_MOUT_CPUCL0_CORE, "mout_cpucl0_core", mout_cpucl0_core_p, + CLK_CON_MUX_MUX_CLK_CPUCL0_CORE, 0, 2), +}; + +static const struct samsung_div_clock cpucl0_div_clks[] __initconst = { + DIV(CLK_DOUT_CLUSTER0_ACLK, "dout_cluster0_aclk", + "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_ACLK, 0, 4), + DIV(CLK_DOUT_CLUSTER0_ATCLK, "dout_cluster0_atclk", + "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_ATCLK, 0, 4), + DIV(CLK_DOUT_CLUSTER0_MPCLK, "dout_cluster0_mpclk", + "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_MPCLK, 0, 4), + DIV(CLK_DOUT_CLUSTER0_PCLK, "dout_cluster0_pclk", + "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_PCLK, 0, 4), + DIV(CLK_DOUT_CLUSTER0_PERIPHCLK, "dout_cluster0_periphclk", + "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER0_PERIPHCLK, 0, 4), + DIV(CLK_DOUT_CPUCL0_DBG_NOC, "dout_cpucl0_dbg_noc", + "mout_cpucl0_dbg_user", CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_NOC, 0, 3), + DIV(CLK_DOUT_CPUCL0_DBG_PCLKDBG, "dout_cpucl0_dbg_pclkdbg", + "mout_cpucl0_dbg_user", CLK_CON_DIV_DIV_CLK_CPUCL0_DBG_PCLKDBG, 0, 3), + DIV(CLK_DOUT_CPUCL0_NOCP, "dout_cpucl0_nocp", + "mout_cpucl0_cluster", CLK_CON_DIV_DIV_CLK_CPUCL0_NOCP, 0, 4), +}; + +static const struct samsung_cmu_info cpucl0_cmu_info __initconst = { + .pll_clks = cpucl0_pll_clks, + .nr_pll_clks = ARRAY_SIZE(cpucl0_pll_clks), + .mux_clks = cpucl0_mux_clks, + .nr_mux_clks = ARRAY_SIZE(cpucl0_mux_clks), + .div_clks = cpucl0_div_clks, + .nr_div_clks = ARRAY_SIZE(cpucl0_div_clks), + .nr_clk_ids = CLKS_NR_CPUCL0, + .clk_regs = cpucl0_clk_regs, + .nr_clk_regs = ARRAY_SIZE(cpucl0_clk_regs), + .clk_name = "cpucl0", +}; + +static void __init exynosautov920_cmu_cpucl0_init(struct device_node *np) +{ + exynos_arm64_register_cmu(NULL, np, &cpucl0_cmu_info); +} + +/* Register CMU_CPUCL0 early, as CPU clocks should be available ASAP */ +CLK_OF_DECLARE(exynosautov920_cmu_cpucl0, "samsung,exynosautov920-cmu-cpucl0", + exynosautov920_cmu_cpucl0_init); + +/* ---- CMU_CPUCL1 --------------------------------------------------------- */ + +/* Register Offset definitions for CMU_CPUCL1 (0x1ED00000) */ +#define PLL_LOCKTIME_PLL_CPUCL1 0x0000 +#define PLL_CON0_PLL_CPUCL1 0x0100 +#define PLL_CON1_PLL_CPUCL1 0x0104 +#define PLL_CON3_PLL_CPUCL1 0x010c +#define PLL_CON0_MUX_CLKCMU_CPUCL1_CLUSTER_USER 0x0600 +#define PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER 0x0610 + +#define CLK_CON_MUX_MUX_CLK_CPUCL1_CLUSTER 0x1000 +#define CLK_CON_MUX_MUX_CLK_CPUCL1_CORE 0x1004 + +#define CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK 0x1800 +#define CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK 0x1804 +#define CLK_CON_DIV_DIV_CLK_CLUSTER1_MPCLK 0x1808 +#define CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLK 0x180c +#define CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK 0x1810 +#define CLK_CON_DIV_DIV_CLK_CPUCL1_NOCP 0x181c + +static const unsigned long cpucl1_clk_regs[] __initconst = { + PLL_LOCKTIME_PLL_CPUCL1, + PLL_CON0_PLL_CPUCL1, + PLL_CON1_PLL_CPUCL1, + PLL_CON3_PLL_CPUCL1, + PLL_CON0_MUX_CLKCMU_CPUCL1_CLUSTER_USER, + PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER, + CLK_CON_MUX_MUX_CLK_CPUCL1_CLUSTER, + CLK_CON_MUX_MUX_CLK_CPUCL1_CORE, + CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK, + CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK, + CLK_CON_DIV_DIV_CLK_CLUSTER1_MPCLK, + CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLK, + CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK, + CLK_CON_DIV_DIV_CLK_CPUCL1_NOCP, +}; + +/* List of parent clocks for Muxes in CMU_CPUCL1 */ +PNAME(mout_pll_cpucl1_p) = { "oscclk", "fout_cpucl1_pll" }; +PNAME(mout_cpucl1_cluster_user_p) = { "oscclk", "dout_clkcmu_cpucl1_cluster" }; +PNAME(mout_cpucl1_switch_user_p) = { "oscclk", "dout_clkcmu_cpucl1_switch" }; +PNAME(mout_cpucl1_cluster_p) = { "oscclk", "mout_cpucl1_cluster_user", + "mout_cpucl1_switch_user"}; +PNAME(mout_cpucl1_core_p) = { "oscclk", "mout_pll_cpucl1", + "mout_cpucl1_switch_user"}; + +static const struct samsung_pll_clock cpucl1_pll_clks[] __initconst = { + /* CMU_CPUCL1_PURECLKCOMP */ + PLL(pll_531x, CLK_FOUT_CPUCL1_PLL, "fout_cpucl1_pll", "oscclk", + PLL_LOCKTIME_PLL_CPUCL1, PLL_CON3_PLL_CPUCL1, cpu_pll_rates), +}; + +static const struct samsung_mux_clock cpucl1_mux_clks[] __initconst = { + MUX(CLK_MOUT_PLL_CPUCL1, "mout_pll_cpucl1", mout_pll_cpucl1_p, + PLL_CON0_PLL_CPUCL1, 4, 1), + MUX(CLK_MOUT_CPUCL1_CLUSTER_USER, "mout_cpucl1_cluster_user", mout_cpucl1_cluster_user_p, + PLL_CON0_MUX_CLKCMU_CPUCL1_CLUSTER_USER, 4, 1), + MUX(CLK_MOUT_CPUCL1_SWITCH_USER, "mout_cpucl1_switch_user", mout_cpucl1_switch_user_p, + PLL_CON0_MUX_CLKCMU_CPUCL1_SWITCH_USER, 4, 1), + MUX(CLK_MOUT_CPUCL1_CLUSTER, "mout_cpucl1_cluster", mout_cpucl1_cluster_p, + CLK_CON_MUX_MUX_CLK_CPUCL1_CLUSTER, 0, 2), + MUX(CLK_MOUT_CPUCL1_CORE, "mout_cpucl1_core", mout_cpucl1_core_p, + CLK_CON_MUX_MUX_CLK_CPUCL1_CORE, 0, 2), +}; + +static const struct samsung_div_clock cpucl1_div_clks[] __initconst = { + DIV(CLK_DOUT_CLUSTER1_ACLK, "dout_cluster1_aclk", + "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_ACLK, 0, 4), + DIV(CLK_DOUT_CLUSTER1_ATCLK, "dout_cluster1_atclk", + "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_ATCLK, 0, 4), + DIV(CLK_DOUT_CLUSTER1_MPCLK, "dout_cluster1_mpclk", + "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_MPCLK, 0, 4), + DIV(CLK_DOUT_CLUSTER1_PCLK, "dout_cluster1_pclk", + "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_PCLK, 0, 4), + DIV(CLK_DOUT_CLUSTER1_PERIPHCLK, "dout_cluster1_periphclk", + "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER1_PERIPHCLK, 0, 4), + DIV(CLK_DOUT_CPUCL1_NOCP, "dout_cpucl1_nocp", + "mout_cpucl1_cluster", CLK_CON_DIV_DIV_CLK_CPUCL1_NOCP, 0, 4), +}; + +static const struct samsung_cmu_info cpucl1_cmu_info __initconst = { + .pll_clks = cpucl1_pll_clks, + .nr_pll_clks = ARRAY_SIZE(cpucl1_pll_clks), + .mux_clks = cpucl1_mux_clks, + .nr_mux_clks = ARRAY_SIZE(cpucl1_mux_clks), + .div_clks = cpucl1_div_clks, + .nr_div_clks = ARRAY_SIZE(cpucl1_div_clks), + .nr_clk_ids = CLKS_NR_CPUCL1, + .clk_regs = cpucl1_clk_regs, + .nr_clk_regs = ARRAY_SIZE(cpucl1_clk_regs), + .clk_name = "cpucl1", +}; + +static void __init exynosautov920_cmu_cpucl1_init(struct device_node *np) +{ + exynos_arm64_register_cmu(NULL, np, &cpucl1_cmu_info); +} + +/* Register CMU_CPUCL1 early, as CPU clocks should be available ASAP */ +CLK_OF_DECLARE(exynosautov920_cmu_cpucl1, "samsung,exynosautov920-cmu-cpucl1", + exynosautov920_cmu_cpucl1_init); + +/* ---- CMU_CPUCL2 --------------------------------------------------------- */ + +/* Register Offset definitions for CMU_CPUCL2 (0x1EE00000) */ +#define PLL_LOCKTIME_PLL_CPUCL2 0x0000 +#define PLL_CON0_PLL_CPUCL2 0x0100 +#define PLL_CON1_PLL_CPUCL2 0x0104 +#define PLL_CON3_PLL_CPUCL2 0x010c +#define PLL_CON0_MUX_CLKCMU_CPUCL2_CLUSTER_USER 0x0600 +#define PLL_CON0_MUX_CLKCMU_CPUCL2_SWITCH_USER 0x0610 + +#define CLK_CON_MUX_MUX_CLK_CPUCL2_CLUSTER 0x1000 +#define CLK_CON_MUX_MUX_CLK_CPUCL2_CORE 0x1004 + +#define CLK_CON_DIV_DIV_CLK_CLUSTER2_ACLK 0x1800 +#define CLK_CON_DIV_DIV_CLK_CLUSTER2_ATCLK 0x1804 +#define CLK_CON_DIV_DIV_CLK_CLUSTER2_MPCLK 0x1808 +#define CLK_CON_DIV_DIV_CLK_CLUSTER2_PCLK 0x180c +#define CLK_CON_DIV_DIV_CLK_CLUSTER2_PERIPHCLK 0x1810 +#define CLK_CON_DIV_DIV_CLK_CPUCL2_NOCP 0x181c + +static const unsigned long cpucl2_clk_regs[] __initconst = { + PLL_LOCKTIME_PLL_CPUCL2, + PLL_CON0_PLL_CPUCL2, + PLL_CON1_PLL_CPUCL2, + PLL_CON3_PLL_CPUCL2, + PLL_CON0_MUX_CLKCMU_CPUCL2_CLUSTER_USER, + PLL_CON0_MUX_CLKCMU_CPUCL2_SWITCH_USER, + CLK_CON_MUX_MUX_CLK_CPUCL2_CLUSTER, + CLK_CON_MUX_MUX_CLK_CPUCL2_CORE, + CLK_CON_DIV_DIV_CLK_CLUSTER2_ACLK, + CLK_CON_DIV_DIV_CLK_CLUSTER2_ATCLK, + CLK_CON_DIV_DIV_CLK_CLUSTER2_MPCLK, + CLK_CON_DIV_DIV_CLK_CLUSTER2_PCLK, + CLK_CON_DIV_DIV_CLK_CLUSTER2_PERIPHCLK, + CLK_CON_DIV_DIV_CLK_CPUCL2_NOCP, +}; + +/* List of parent clocks for Muxes in CMU_CPUCL2 */ +PNAME(mout_pll_cpucl2_p) = { "oscclk", "fout_cpucl2_pll" }; +PNAME(mout_cpucl2_cluster_user_p) = { "oscclk", "dout_clkcmu_cpucl2_cluster" }; +PNAME(mout_cpucl2_switch_user_p) = { "oscclk", "dout_clkcmu_cpucl2_switch" }; +PNAME(mout_cpucl2_cluster_p) = { "oscclk", "mout_cpucl2_cluster_user", + "mout_cpucl2_switch_user"}; +PNAME(mout_cpucl2_core_p) = { "oscclk", "mout_pll_cpucl2", + "mout_cpucl2_switch_user"}; + +static const struct samsung_pll_clock cpucl2_pll_clks[] __initconst = { + /* CMU_CPUCL2_PURECLKCOMP */ + PLL(pll_531x, CLK_FOUT_CPUCL2_PLL, "fout_cpucl2_pll", "oscclk", + PLL_LOCKTIME_PLL_CPUCL2, PLL_CON3_PLL_CPUCL2, cpu_pll_rates), +}; + +static const struct samsung_mux_clock cpucl2_mux_clks[] __initconst = { + MUX(CLK_MOUT_PLL_CPUCL2, "mout_pll_cpucl2", mout_pll_cpucl2_p, + PLL_CON0_PLL_CPUCL2, 4, 1), + MUX(CLK_MOUT_CPUCL2_CLUSTER_USER, "mout_cpucl2_cluster_user", mout_cpucl2_cluster_user_p, + PLL_CON0_MUX_CLKCMU_CPUCL2_CLUSTER_USER, 4, 1), + MUX(CLK_MOUT_CPUCL2_SWITCH_USER, "mout_cpucl2_switch_user", mout_cpucl2_switch_user_p, + PLL_CON0_MUX_CLKCMU_CPUCL2_SWITCH_USER, 4, 1), + MUX(CLK_MOUT_CPUCL2_CLUSTER, "mout_cpucl2_cluster", mout_cpucl2_cluster_p, + CLK_CON_MUX_MUX_CLK_CPUCL2_CLUSTER, 0, 2), + MUX(CLK_MOUT_CPUCL2_CORE, "mout_cpucl2_core", mout_cpucl2_core_p, + CLK_CON_MUX_MUX_CLK_CPUCL2_CORE, 0, 2), +}; + +static const struct samsung_div_clock cpucl2_div_clks[] __initconst = { + DIV(CLK_DOUT_CLUSTER2_ACLK, "dout_cluster2_aclk", + "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_ACLK, 0, 4), + DIV(CLK_DOUT_CLUSTER2_ATCLK, "dout_cluster2_atclk", + "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_ATCLK, 0, 4), + DIV(CLK_DOUT_CLUSTER2_MPCLK, "dout_cluster2_mpclk", + "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_MPCLK, 0, 4), + DIV(CLK_DOUT_CLUSTER2_PCLK, "dout_cluster2_pclk", + "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_PCLK, 0, 4), + DIV(CLK_DOUT_CLUSTER2_PERIPHCLK, "dout_cluster2_periphclk", + "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CLUSTER2_PERIPHCLK, 0, 4), + DIV(CLK_DOUT_CPUCL2_NOCP, "dout_cpucl2_nocp", + "mout_cpucl2_cluster", CLK_CON_DIV_DIV_CLK_CPUCL2_NOCP, 0, 4), +}; + +static const struct samsung_cmu_info cpucl2_cmu_info __initconst = { + .pll_clks = cpucl2_pll_clks, + .nr_pll_clks = ARRAY_SIZE(cpucl2_pll_clks), + .mux_clks = cpucl2_mux_clks, + .nr_mux_clks = ARRAY_SIZE(cpucl2_mux_clks), + .div_clks = cpucl2_div_clks, + .nr_div_clks = ARRAY_SIZE(cpucl2_div_clks), + .nr_clk_ids = CLKS_NR_CPUCL2, + .clk_regs = cpucl2_clk_regs, + .nr_clk_regs = ARRAY_SIZE(cpucl2_clk_regs), + .clk_name = "cpucl2", +}; + +static void __init exynosautov920_cmu_cpucl2_init(struct device_node *np) +{ + exynos_arm64_register_cmu(NULL, np, &cpucl2_cmu_info); +} + +/* Register CMU_CPUCL2 early, as CPU clocks should be available ASAP */ +CLK_OF_DECLARE(exynosautov920_cmu_cpucl2, "samsung,exynosautov920-cmu-cpucl2", + exynosautov920_cmu_cpucl2_init); + /* ---- CMU_PERIC0 --------------------------------------------------------- */ /* Register Offset definitions for CMU_PERIC0 (0x10800000) */ @@ -1393,7 +1729,7 @@ static const unsigned long hsi1_clk_regs[] __initconst = { /* List of parent clocks for Muxes in CMU_HSI1 */ PNAME(mout_hsi1_mmc_card_user_p) = {"oscclk", "dout_clkcmu_hsi1_mmc_card"}; PNAME(mout_hsi1_noc_user_p) = { "oscclk", "dout_clkcmu_hsi1_noc" }; -PNAME(mout_hsi1_usbdrd_user_p) = { "oscclk", "mout_clkcmu_hsi1_usbdrd" }; +PNAME(mout_hsi1_usbdrd_user_p) = { "oscclk", "dout_clkcmu_hsi1_usbdrd" }; PNAME(mout_hsi1_usbdrd_p) = { "dout_tcxo_div2", "mout_hsi1_usbdrd_user" }; static const struct samsung_mux_clock hsi1_mux_clks[] __initconst = { diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c index 1d82737befd3..a88c212bda12 100644 --- a/drivers/clk/socfpga/clk-pll-s10.c +++ b/drivers/clk/socfpga/clk-pll-s10.c @@ -83,9 +83,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, unsigned long parent_rate) { struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk); - unsigned long mdiv; - unsigned long refdiv; - unsigned long reg; + u32 mdiv; + u32 refdiv; + u32 reg; unsigned long long vco_freq; /* read VCO1 reg for numerator and denominator */ diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c index 9dcc1b2d2cc0..03a96139a576 100644 --- a/drivers/clk/socfpga/clk-pll.c +++ b/drivers/clk/socfpga/clk-pll.c @@ -39,9 +39,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, unsigned long parent_rate) { struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk); - unsigned long divf, divq, reg; + u32 divf, divq, reg; unsigned long long vco_freq; - unsigned long bypass; + u32 bypass; reg = readl(socfpgaclk->hw.reg); bypass = readl(clk_mgr_base_addr + CLKMGR_BYPASS); diff --git a/drivers/clk/sophgo/Kconfig b/drivers/clk/sophgo/Kconfig index 8b1367e3a95e..e14e802f28bf 100644 --- a/drivers/clk/sophgo/Kconfig +++ b/drivers/clk/sophgo/Kconfig @@ -37,3 +37,22 @@ config CLK_SOPHGO_SG2042_RPGATE This clock IP depends on SG2042 Clock Generator because it uses clock from Clock Generator IP as input. This driver provides Gate function for RP. + +config CLK_SOPHGO_SG2044 + tristate "Sophgo SG2044 clock controller support" + depends on ARCH_SOPHGO || COMPILE_TEST + help + This driver supports the clock controller on the Sophgo SG2044 + SoC. This controller requires mulitple PLL clock as input. + This clock control provides PLL clocks and common clock function + for various IPs on the SoC. + +config CLK_SOPHGO_SG2044_PLL + tristate "Sophgo SG2044 PLL clock controller support" + depends on ARCH_SOPHGO || COMPILE_TEST + select MFD_SYSCON + select REGMAP_MMIO + help + This driver supports the PLL clock controller on the Sophgo + SG2044 SoC. This controller requires 25M oscillator as input. + This clock control provides PLL clocks on the SoC. diff --git a/drivers/clk/sophgo/Makefile b/drivers/clk/sophgo/Makefile index 53506845a044..26b2fd121582 100644 --- a/drivers/clk/sophgo/Makefile +++ b/drivers/clk/sophgo/Makefile @@ -9,3 +9,5 @@ clk-sophgo-cv1800-y += clk-cv18xx-pll.o obj-$(CONFIG_CLK_SOPHGO_SG2042_CLKGEN) += clk-sg2042-clkgen.o obj-$(CONFIG_CLK_SOPHGO_SG2042_PLL) += clk-sg2042-pll.o obj-$(CONFIG_CLK_SOPHGO_SG2042_RPGATE) += clk-sg2042-rpgate.o +obj-$(CONFIG_CLK_SOPHGO_SG2044) += clk-sg2044.o +obj-$(CONFIG_CLK_SOPHGO_SG2044_PLL) += clk-sg2044-pll.o diff --git a/drivers/clk/sophgo/clk-cv1800.c b/drivers/clk/sophgo/clk-cv1800.c index e0c4dc347579..a4116ac1adcb 100644 --- a/drivers/clk/sophgo/clk-cv1800.c +++ b/drivers/clk/sophgo/clk-cv1800.c @@ -1519,7 +1519,9 @@ static int cv1800_clk_probe(struct platform_device *pdev) static const struct of_device_id cv1800_clk_ids[] = { { .compatible = "sophgo,cv1800-clk", .data = &cv1800_desc }, + { .compatible = "sophgo,cv1800b-clk", .data = &cv1800_desc }, { .compatible = "sophgo,cv1810-clk", .data = &cv1810_desc }, + { .compatible = "sophgo,cv1812h-clk", .data = &cv1810_desc }, { .compatible = "sophgo,sg2000-clk", .data = &sg2000_desc }, { } }; diff --git a/drivers/clk/sophgo/clk-sg2044-pll.c b/drivers/clk/sophgo/clk-sg2044-pll.c new file mode 100644 index 000000000000..94c0f519ba6d --- /dev/null +++ b/drivers/clk/sophgo/clk-sg2044-pll.c @@ -0,0 +1,628 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Sophgo SG2044 PLL clock controller driver + * + * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com> + */ + +#include <linux/array_size.h> +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/cleanup.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/math64.h> +#include <linux/mfd/syscon.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/spinlock.h> + +#include <dt-bindings/clock/sophgo,sg2044-pll.h> + +/* Low Control part */ +#define PLL_VCOSEL_MASK GENMASK(17, 16) + +/* High Control part */ +#define PLL_FBDIV_MASK GENMASK(11, 0) +#define PLL_REFDIV_MASK GENMASK(17, 12) +#define PLL_POSTDIV1_MASK GENMASK(20, 18) +#define PLL_POSTDIV2_MASK GENMASK(23, 21) + +#define PLL_CALIBRATE_EN BIT(24) +#define PLL_CALIBRATE_MASK GENMASK(29, 27) +#define PLL_CALIBRATE_DEFAULT FIELD_PREP(PLL_CALIBRATE_MASK, 2) +#define PLL_UPDATE_EN BIT(30) + +#define PLL_HIGH_CTRL_MASK \ + (PLL_FBDIV_MASK | PLL_REFDIV_MASK | \ + PLL_POSTDIV1_MASK | PLL_POSTDIV2_MASK | \ + PLL_CALIBRATE_EN | PLL_CALIBRATE_MASK | \ + PLL_UPDATE_EN) + +#define PLL_HIGH_CTRL_OFFSET 4 + +#define PLL_VCOSEL_1G6 0x2 +#define PLL_VCOSEL_2G4 0x3 + +#define PLL_LIMIT_FOUTVCO 0 +#define PLL_LIMIT_FOUT 1 +#define PLL_LIMIT_REFDIV 2 +#define PLL_LIMIT_FBDIV 3 +#define PLL_LIMIT_POSTDIV1 4 +#define PLL_LIMIT_POSTDIV2 5 + +#define for_each_pll_limit_range(_var, _limit) \ + for (_var = (_limit)->min; _var <= (_limit)->max; _var++) + +struct sg2044_pll_limit { + u64 min; + u64 max; +}; + +struct sg2044_pll_internal { + u32 ctrl_offset; + u32 status_offset; + u32 enable_offset; + + u8 status_lock_bit; + u8 status_updating_bit; + u8 enable_bit; + + const struct sg2044_pll_limit *limits; +}; + +struct sg2044_clk_common { + struct clk_hw hw; + struct regmap *regmap; + spinlock_t *lock; + unsigned int id; +}; + +struct sg2044_pll { + struct sg2044_clk_common common; + struct sg2044_pll_internal pll; + unsigned int syscon_offset; +}; + +struct sg2044_pll_desc_data { + struct sg2044_clk_common * const *pll; + u16 num_pll; +}; + +#define SG2044_SYSCON_PLL_OFFSET 0x98 + +struct sg2044_pll_ctrl { + spinlock_t lock; + struct clk_hw_onecell_data data; +}; + +#define hw_to_sg2044_clk_common(_hw) \ + container_of((_hw), struct sg2044_clk_common, hw) + +static inline bool sg2044_clk_fit_limit(u64 value, + const struct sg2044_pll_limit *limit) +{ + return value >= limit->min && value <= limit->max; +} + +static inline struct sg2044_pll *hw_to_sg2044_pll(struct clk_hw *hw) +{ + return container_of(hw_to_sg2044_clk_common(hw), + struct sg2044_pll, common); +} + +static unsigned long sg2044_pll_calc_vco_rate(unsigned long parent_rate, + unsigned long refdiv, + unsigned long fbdiv) +{ + u64 numerator = parent_rate * fbdiv; + + return div64_ul(numerator, refdiv); +} + +static unsigned long sg2044_pll_calc_rate(unsigned long parent_rate, + unsigned long refdiv, + unsigned long fbdiv, + unsigned long postdiv1, + unsigned long postdiv2) +{ + u64 numerator, denominator; + + numerator = parent_rate * fbdiv; + denominator = refdiv * (postdiv1 + 1) * (postdiv2 + 1); + + return div64_u64(numerator, denominator); +} + +static unsigned long sg2044_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct sg2044_pll *pll = hw_to_sg2044_pll(hw); + u32 value; + int ret; + + ret = regmap_read(pll->common.regmap, + pll->syscon_offset + pll->pll.ctrl_offset + PLL_HIGH_CTRL_OFFSET, + &value); + if (ret < 0) + return 0; + + return sg2044_pll_calc_rate(parent_rate, + FIELD_GET(PLL_REFDIV_MASK, value), + FIELD_GET(PLL_FBDIV_MASK, value), + FIELD_GET(PLL_POSTDIV1_MASK, value), + FIELD_GET(PLL_POSTDIV2_MASK, value)); +} + +static bool pll_is_better_rate(unsigned long target, unsigned long now, + unsigned long best) +{ + return abs_diff(target, now) < abs_diff(target, best); +} + +static int sg2042_pll_compute_postdiv(const struct sg2044_pll_limit *limits, + unsigned long target, + unsigned long parent_rate, + unsigned int refdiv, + unsigned int fbdiv, + unsigned int *postdiv1, + unsigned int *postdiv2) +{ + unsigned int div1, div2; + unsigned long tmp, best_rate = 0; + unsigned int best_div1 = 0, best_div2 = 0; + + for_each_pll_limit_range(div2, &limits[PLL_LIMIT_POSTDIV2]) { + for_each_pll_limit_range(div1, &limits[PLL_LIMIT_POSTDIV1]) { + tmp = sg2044_pll_calc_rate(parent_rate, + refdiv, fbdiv, + div1, div2); + + if (tmp > target) + continue; + + if (pll_is_better_rate(target, tmp, best_rate)) { + best_div1 = div1; + best_div2 = div2; + best_rate = tmp; + + if (tmp == target) + goto find; + } + } + } + +find: + if (best_rate) { + *postdiv1 = best_div1; + *postdiv2 = best_div2; + return 0; + } + + return -EINVAL; +} + +static int sg2044_compute_pll_setting(const struct sg2044_pll_limit *limits, + unsigned long req_rate, + unsigned long parent_rate, + unsigned int *value) +{ + unsigned int refdiv, fbdiv, postdiv1, postdiv2; + unsigned int best_refdiv, best_fbdiv, best_postdiv1, best_postdiv2; + unsigned long tmp, best_rate = 0; + int ret; + + for_each_pll_limit_range(fbdiv, &limits[PLL_LIMIT_FBDIV]) { + for_each_pll_limit_range(refdiv, &limits[PLL_LIMIT_REFDIV]) { + u64 vco = sg2044_pll_calc_vco_rate(parent_rate, + refdiv, fbdiv); + if (!sg2044_clk_fit_limit(vco, &limits[PLL_LIMIT_FOUTVCO])) + continue; + + ret = sg2042_pll_compute_postdiv(limits, + req_rate, parent_rate, + refdiv, fbdiv, + &postdiv1, &postdiv2); + if (ret) + continue; + + tmp = sg2044_pll_calc_rate(parent_rate, + refdiv, fbdiv, + postdiv1, postdiv2); + + if (pll_is_better_rate(req_rate, tmp, best_rate)) { + best_refdiv = refdiv; + best_fbdiv = fbdiv; + best_postdiv1 = postdiv1; + best_postdiv2 = postdiv2; + best_rate = tmp; + + if (tmp == req_rate) + goto find; + } + } + } + +find: + if (best_rate) { + *value = FIELD_PREP(PLL_REFDIV_MASK, best_refdiv) | + FIELD_PREP(PLL_FBDIV_MASK, best_fbdiv) | + FIELD_PREP(PLL_POSTDIV1_MASK, best_postdiv1) | + FIELD_PREP(PLL_POSTDIV2_MASK, best_postdiv2); + return 0; + } + + return -EINVAL; +} + +static int sg2044_pll_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct sg2044_pll *pll = hw_to_sg2044_pll(hw); + unsigned int value; + u64 target; + int ret; + + target = clamp(req->rate, pll->pll.limits[PLL_LIMIT_FOUT].min, + pll->pll.limits[PLL_LIMIT_FOUT].max); + + ret = sg2044_compute_pll_setting(pll->pll.limits, target, + req->best_parent_rate, &value); + if (ret < 0) + return ret; + + req->rate = sg2044_pll_calc_rate(req->best_parent_rate, + FIELD_GET(PLL_REFDIV_MASK, value), + FIELD_GET(PLL_FBDIV_MASK, value), + FIELD_GET(PLL_POSTDIV1_MASK, value), + FIELD_GET(PLL_POSTDIV2_MASK, value)); + + return 0; +} + +static int sg2044_pll_poll_update(struct sg2044_pll *pll) +{ + int ret; + unsigned int value; + + ret = regmap_read_poll_timeout_atomic(pll->common.regmap, + pll->syscon_offset + pll->pll.status_offset, + value, + (value & BIT(pll->pll.status_lock_bit)), + 1, 100000); + if (ret) + return ret; + + return regmap_read_poll_timeout_atomic(pll->common.regmap, + pll->syscon_offset + pll->pll.status_offset, + value, + (!(value & BIT(pll->pll.status_updating_bit))), + 1, 100000); +} + +static int sg2044_pll_enable(struct sg2044_pll *pll, bool en) +{ + if (en) { + if (sg2044_pll_poll_update(pll) < 0) + pr_warn("%s: fail to lock pll\n", clk_hw_get_name(&pll->common.hw)); + + return regmap_set_bits(pll->common.regmap, + pll->syscon_offset + pll->pll.enable_offset, + BIT(pll->pll.enable_bit)); + } + + return regmap_clear_bits(pll->common.regmap, + pll->syscon_offset + pll->pll.enable_offset, + BIT(pll->pll.enable_bit)); +} + +static int sg2044_pll_update_vcosel(struct sg2044_pll *pll, u64 rate) +{ + unsigned int sel; + + if (rate < U64_C(2400000000)) + sel = PLL_VCOSEL_1G6; + else + sel = PLL_VCOSEL_2G4; + + return regmap_write_bits(pll->common.regmap, + pll->syscon_offset + pll->pll.ctrl_offset, + PLL_VCOSEL_MASK, + FIELD_PREP(PLL_VCOSEL_MASK, sel)); +} + +static int sg2044_pll_set_rate(struct clk_hw *hw, + unsigned long rate, unsigned long parent_rate) +{ + struct sg2044_pll *pll = hw_to_sg2044_pll(hw); + unsigned int value; + u64 vco; + int ret; + + ret = sg2044_compute_pll_setting(pll->pll.limits, rate, + parent_rate, &value); + if (ret < 0) + return ret; + + vco = sg2044_pll_calc_vco_rate(parent_rate, + FIELD_GET(PLL_REFDIV_MASK, value), + FIELD_GET(PLL_FBDIV_MASK, value)); + + value |= PLL_CALIBRATE_EN; + value |= PLL_CALIBRATE_DEFAULT; + value |= PLL_UPDATE_EN; + + guard(spinlock_irqsave)(pll->common.lock); + + ret = sg2044_pll_enable(pll, false); + if (ret) + return ret; + + sg2044_pll_update_vcosel(pll, vco); + + regmap_write_bits(pll->common.regmap, + pll->syscon_offset + pll->pll.ctrl_offset + + PLL_HIGH_CTRL_OFFSET, + PLL_HIGH_CTRL_MASK, value); + + sg2044_pll_enable(pll, true); + + return ret; +} + +static const struct clk_ops sg2044_pll_ops = { + .recalc_rate = sg2044_pll_recalc_rate, + .determine_rate = sg2044_pll_determine_rate, + .set_rate = sg2044_pll_set_rate, +}; + +static const struct clk_ops sg2044_pll_ro_ops = { + .recalc_rate = sg2044_pll_recalc_rate, +}; + +#define SG2044_CLK_COMMON_PDATA(_id, _name, _parents, _op, _flags) \ + { \ + .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parents, \ + _op, (_flags)), \ + .id = (_id), \ + } + +#define DEFINE_SG2044_PLL(_id, _name, _parent, _flags, \ + _ctrl_offset, \ + _status_offset, _status_lock_bit, \ + _status_updating_bit, \ + _enable_offset, _enable_bit, \ + _limits) \ + struct sg2044_pll _name = { \ + .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \ + &sg2044_pll_ops, \ + (_flags)), \ + .pll = { \ + .ctrl_offset = (_ctrl_offset), \ + .status_offset = (_status_offset), \ + .enable_offset = (_enable_offset), \ + .status_lock_bit = (_status_lock_bit), \ + .status_updating_bit = (_status_updating_bit), \ + .enable_bit = (_enable_bit), \ + .limits = (_limits), \ + }, \ + } + +#define DEFINE_SG2044_PLL_RO(_id, _name, _parent, _flags, \ + _ctrl_offset, \ + _status_offset, _status_lock_bit, \ + _status_updating_bit, \ + _enable_offset, _enable_bit, \ + _limits) \ + struct sg2044_pll _name = { \ + .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \ + &sg2044_pll_ro_ops, \ + (_flags)), \ + .pll = { \ + .ctrl_offset = (_ctrl_offset), \ + .status_offset = (_status_offset), \ + .enable_offset = (_enable_offset), \ + .status_lock_bit = (_status_lock_bit), \ + .status_updating_bit = (_status_updating_bit), \ + .enable_bit = (_enable_bit), \ + .limits = (_limits), \ + }, \ + } + +static const struct clk_parent_data osc_parents[] = { + { .index = 0 }, +}; + +static const struct sg2044_pll_limit pll_limits[] = { + [PLL_LIMIT_FOUTVCO] = { + .min = U64_C(1600000000), + .max = U64_C(3200000000), + }, + [PLL_LIMIT_FOUT] = { + .min = U64_C(25000), + .max = U64_C(3200000000), + }, + [PLL_LIMIT_REFDIV] = { + .min = U64_C(1), + .max = U64_C(63), + }, + [PLL_LIMIT_FBDIV] = { + .min = U64_C(8), + .max = U64_C(1066), + }, + [PLL_LIMIT_POSTDIV1] = { + .min = U64_C(0), + .max = U64_C(7), + }, + [PLL_LIMIT_POSTDIV2] = { + .min = U64_C(0), + .max = U64_C(7), + }, +}; + +static DEFINE_SG2044_PLL_RO(CLK_FPLL0, clk_fpll0, osc_parents, CLK_IS_CRITICAL, + 0x58, 0x00, 22, 6, + 0x04, 6, pll_limits); + +static DEFINE_SG2044_PLL_RO(CLK_FPLL1, clk_fpll1, osc_parents, CLK_IS_CRITICAL, + 0x60, 0x00, 23, 7, + 0x04, 7, pll_limits); + +static DEFINE_SG2044_PLL_RO(CLK_FPLL2, clk_fpll2, osc_parents, CLK_IS_CRITICAL, + 0x20, 0x08, 16, 0, + 0x0c, 0, pll_limits); + +static DEFINE_SG2044_PLL_RO(CLK_DPLL0, clk_dpll0, osc_parents, CLK_IS_CRITICAL, + 0x68, 0x00, 24, 8, + 0x04, 8, pll_limits); + +static DEFINE_SG2044_PLL_RO(CLK_DPLL1, clk_dpll1, osc_parents, CLK_IS_CRITICAL, + 0x70, 0x00, 25, 9, + 0x04, 9, pll_limits); + +static DEFINE_SG2044_PLL_RO(CLK_DPLL2, clk_dpll2, osc_parents, CLK_IS_CRITICAL, + 0x78, 0x00, 26, 10, + 0x04, 10, pll_limits); + +static DEFINE_SG2044_PLL_RO(CLK_DPLL3, clk_dpll3, osc_parents, CLK_IS_CRITICAL, + 0x80, 0x00, 27, 11, + 0x04, 11, pll_limits); + +static DEFINE_SG2044_PLL_RO(CLK_DPLL4, clk_dpll4, osc_parents, CLK_IS_CRITICAL, + 0x88, 0x00, 28, 12, + 0x04, 12, pll_limits); + +static DEFINE_SG2044_PLL_RO(CLK_DPLL5, clk_dpll5, osc_parents, CLK_IS_CRITICAL, + 0x90, 0x00, 29, 13, + 0x04, 13, pll_limits); + +static DEFINE_SG2044_PLL_RO(CLK_DPLL6, clk_dpll6, osc_parents, CLK_IS_CRITICAL, + 0x98, 0x00, 30, 14, + 0x04, 14, pll_limits); + +static DEFINE_SG2044_PLL_RO(CLK_DPLL7, clk_dpll7, osc_parents, CLK_IS_CRITICAL, + 0xa0, 0x00, 31, 15, + 0x04, 15, pll_limits); + +static DEFINE_SG2044_PLL(CLK_MPLL0, clk_mpll0, osc_parents, CLK_IS_CRITICAL, + 0x28, 0x00, 16, 0, + 0x04, 0, pll_limits); + +static DEFINE_SG2044_PLL(CLK_MPLL1, clk_mpll1, osc_parents, CLK_IS_CRITICAL, + 0x30, 0x00, 17, 1, + 0x04, 1, pll_limits); + +static DEFINE_SG2044_PLL(CLK_MPLL2, clk_mpll2, osc_parents, CLK_IS_CRITICAL, + 0x38, 0x00, 18, 2, + 0x04, 2, pll_limits); + +static DEFINE_SG2044_PLL(CLK_MPLL3, clk_mpll3, osc_parents, CLK_IS_CRITICAL, + 0x40, 0x00, 19, 3, + 0x04, 3, pll_limits); + +static DEFINE_SG2044_PLL(CLK_MPLL4, clk_mpll4, osc_parents, CLK_IS_CRITICAL, + 0x48, 0x00, 20, 4, + 0x04, 4, pll_limits); + +static DEFINE_SG2044_PLL(CLK_MPLL5, clk_mpll5, osc_parents, CLK_IS_CRITICAL, + 0x50, 0x00, 21, 5, + 0x04, 5, pll_limits); + +static struct sg2044_clk_common * const sg2044_pll_commons[] = { + &clk_fpll0.common, + &clk_fpll1.common, + &clk_fpll2.common, + &clk_dpll0.common, + &clk_dpll1.common, + &clk_dpll2.common, + &clk_dpll3.common, + &clk_dpll4.common, + &clk_dpll5.common, + &clk_dpll6.common, + &clk_dpll7.common, + &clk_mpll0.common, + &clk_mpll1.common, + &clk_mpll2.common, + &clk_mpll3.common, + &clk_mpll4.common, + &clk_mpll5.common, +}; + +static int sg2044_pll_init_ctrl(struct device *dev, struct regmap *regmap, + struct sg2044_pll_ctrl *ctrl, + const struct sg2044_pll_desc_data *desc) +{ + int ret, i; + + spin_lock_init(&ctrl->lock); + + for (i = 0; i < desc->num_pll; i++) { + struct sg2044_clk_common *common = desc->pll[i]; + struct sg2044_pll *pll = hw_to_sg2044_pll(&common->hw); + + common->lock = &ctrl->lock; + common->regmap = regmap; + pll->syscon_offset = SG2044_SYSCON_PLL_OFFSET; + + ret = devm_clk_hw_register(dev, &common->hw); + if (ret) + return ret; + + ctrl->data.hws[common->id] = &common->hw; + } + + return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, + &ctrl->data); +} + +static int sg2044_pll_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sg2044_pll_ctrl *ctrl; + const struct sg2044_pll_desc_data *desc; + struct regmap *regmap; + + regmap = device_node_to_regmap(pdev->dev.parent->of_node); + if (IS_ERR(regmap)) + return dev_err_probe(dev, PTR_ERR(regmap), + "fail to get the regmap for PLL\n"); + + desc = (const struct sg2044_pll_desc_data *)platform_get_device_id(pdev)->driver_data; + if (!desc) + return dev_err_probe(dev, -EINVAL, "no match data for platform\n"); + + ctrl = devm_kzalloc(dev, struct_size(ctrl, data.hws, desc->num_pll), GFP_KERNEL); + if (!ctrl) + return -ENOMEM; + + ctrl->data.num = desc->num_pll; + + return sg2044_pll_init_ctrl(dev, regmap, ctrl, desc); +} + +static const struct sg2044_pll_desc_data sg2044_pll_desc_data = { + .pll = sg2044_pll_commons, + .num_pll = ARRAY_SIZE(sg2044_pll_commons), +}; + +static const struct platform_device_id sg2044_pll_match[] = { + { .name = "sg2044-pll", + .driver_data = (unsigned long)&sg2044_pll_desc_data }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, sg2044_pll_match); + +static struct platform_driver sg2044_clk_driver = { + .probe = sg2044_pll_probe, + .driver = { + .name = "sg2044-pll", + }, + .id_table = sg2044_pll_match, +}; +module_platform_driver(sg2044_clk_driver); + +MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>"); +MODULE_DESCRIPTION("Sophgo SG2044 pll clock driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/sophgo/clk-sg2044.c b/drivers/clk/sophgo/clk-sg2044.c new file mode 100644 index 000000000000..f67f99c926b6 --- /dev/null +++ b/drivers/clk/sophgo/clk-sg2044.c @@ -0,0 +1,1812 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Sophgo SG2044 clock controller driver + * + * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com> + */ + +#include <linux/array_size.h> +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/cleanup.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/math64.h> +#include <linux/mfd/syscon.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/spinlock.h> + +#include <dt-bindings/clock/sophgo,sg2044-clk.h> + +#define DIV_ASSERT BIT(0) +#define DIV_FACTOR_REG_SOURCE BIT(3) +#define DIV_BRANCH_EN BIT(4) + +#define DIV_ASSERT_TIME 2 + +struct sg2044_div_internal { + u32 offset; + u32 initval; + u8 shift; + u8 width; + u16 flags; +}; + +struct sg2044_mux_internal { + const u32 *table; + u32 offset; + u16 shift; + u16 flags; +}; + +struct sg2044_gate_internal { + u32 offset; + u16 shift; + u16 flags; +}; + +struct sg2044_clk_common { + struct clk_hw hw; + void __iomem *base; + spinlock_t *lock; + unsigned int id; +}; + +struct sg2044_div { + struct sg2044_clk_common common; + struct sg2044_div_internal div; +}; + +struct sg2044_mux { + struct sg2044_clk_common common; + struct sg2044_mux_internal mux; + struct notifier_block nb; + u8 saved_parent; +}; + +struct sg2044_gate { + struct sg2044_clk_common common; + struct sg2044_gate_internal gate; +}; + +struct sg2044_clk_ctrl { + spinlock_t lock; + struct clk_hw_onecell_data data; +}; + +struct sg2044_clk_desc_data { + struct sg2044_clk_common * const *pll; + struct sg2044_clk_common * const *div; + struct sg2044_clk_common * const *mux; + struct sg2044_clk_common * const *gate; + u16 num_pll; + u16 num_div; + u16 num_mux; + u16 num_gate; +}; + +#define hw_to_sg2044_clk_common(_hw) \ + container_of((_hw), struct sg2044_clk_common, hw) + +static inline struct sg2044_div *hw_to_sg2044_div(struct clk_hw *hw) +{ + return container_of(hw_to_sg2044_clk_common(hw), + struct sg2044_div, common); +} + +static u32 sg2044_div_get_reg_div(u32 reg, struct sg2044_div_internal *div) +{ + if ((reg & DIV_FACTOR_REG_SOURCE)) + return (reg >> div->shift) & clk_div_mask(div->width); + + return div->initval == 0 ? 1 : div->initval; +} + +static unsigned long _sg2044_div_recalc_rate(struct sg2044_clk_common *common, + struct sg2044_div_internal *div, + unsigned long parent_rate) +{ + u32 reg = readl(common->base + div->offset); + u32 val = sg2044_div_get_reg_div(reg, div); + + return divider_recalc_rate(&common->hw, parent_rate, val, NULL, + div->flags, div->width); +} + +static unsigned long sg2044_div_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct sg2044_div *div = hw_to_sg2044_div(hw); + + return _sg2044_div_recalc_rate(&div->common, &div->div, + parent_rate); +} + +static int _sg2044_div_determine_rate(struct sg2044_clk_common *common, + struct sg2044_div_internal *div, + struct clk_rate_request *req) +{ + if (div->flags & CLK_DIVIDER_READ_ONLY) { + u32 reg = readl(common->base + div->offset); + u32 val = sg2044_div_get_reg_div(reg, div); + + return divider_ro_determine_rate(&common->hw, req, NULL, + div->width, div->flags, + val); + } + + return divider_determine_rate(&common->hw, req, NULL, + div->width, div->flags); +} + +static int sg2044_div_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct sg2044_div *div = hw_to_sg2044_div(hw); + + return _sg2044_div_determine_rate(&div->common, &div->div, req); +} + +static void sg2044_div_set_reg_div(struct sg2044_clk_common *common, + struct sg2044_div_internal *div, + u32 value) +{ + void __iomem *addr = common->base + div->offset; + u32 reg; + + reg = readl(addr); + + /* assert */ + reg &= ~DIV_ASSERT; + writel(reg, addr); + + /* set value */ + reg = readl(addr); + reg &= ~(clk_div_mask(div->width) << div->shift); + reg |= (value << div->shift) | DIV_FACTOR_REG_SOURCE; + writel(reg, addr); + + /* de-assert */ + reg |= DIV_ASSERT; + writel(reg, addr); +} + +static int sg2044_div_set_rate(struct clk_hw *hw, + unsigned long rate, unsigned long parent_rate) +{ + struct sg2044_div *div = hw_to_sg2044_div(hw); + u32 value; + + value = divider_get_val(rate, parent_rate, NULL, + div->div.width, div->div.flags); + + guard(spinlock_irqsave)(div->common.lock); + + sg2044_div_set_reg_div(&div->common, &div->div, value); + + return 0; +} + +static int sg2044_div_enable(struct clk_hw *hw) +{ + struct sg2044_div *div = hw_to_sg2044_div(hw); + void __iomem *addr = div->common.base + div->div.offset; + u32 value; + + guard(spinlock_irqsave)(div->common.lock); + + value = readl(addr); + value |= DIV_BRANCH_EN; + writel(value, addr); + + return 0; +} + +static void sg2044_div_disable(struct clk_hw *hw) +{ + struct sg2044_div *div = hw_to_sg2044_div(hw); + void __iomem *addr = div->common.base + div->div.offset; + u32 value; + + guard(spinlock_irqsave)(div->common.lock); + + value = readl(addr); + value &= ~DIV_BRANCH_EN; + writel(value, addr); +} + +static int sg2044_div_is_enabled(struct clk_hw *hw) +{ + struct sg2044_div *div = hw_to_sg2044_div(hw); + + return readl(div->common.base + div->div.offset) & DIV_BRANCH_EN; +} + +static const struct clk_ops sg2044_gateable_div_ops = { + .enable = sg2044_div_enable, + .disable = sg2044_div_disable, + .is_enabled = sg2044_div_is_enabled, + .recalc_rate = sg2044_div_recalc_rate, + .determine_rate = sg2044_div_determine_rate, + .set_rate = sg2044_div_set_rate, +}; + +static const struct clk_ops sg2044_div_ops = { + .recalc_rate = sg2044_div_recalc_rate, + .determine_rate = sg2044_div_determine_rate, + .set_rate = sg2044_div_set_rate, +}; + +static const struct clk_ops sg2044_div_ro_ops = { + .recalc_rate = sg2044_div_recalc_rate, + .determine_rate = sg2044_div_determine_rate, +}; + +static inline struct sg2044_mux *hw_to_sg2044_mux(struct clk_hw *hw) +{ + return container_of(hw_to_sg2044_clk_common(hw), + struct sg2044_mux, common); +} + +static inline struct sg2044_mux *nb_to_sg2044_mux(struct notifier_block *nb) +{ + return container_of(nb, struct sg2044_mux, nb); +} + +static const u32 sg2044_mux_table[] = {0, 1}; + +static int sg2044_mux_notifier_cb(struct notifier_block *nb, + unsigned long event, + void *data) +{ + struct sg2044_mux *mux = nb_to_sg2044_mux(nb); + const struct clk_ops *ops = &clk_mux_ops; + struct clk_notifier_data *ndata = data; + struct clk_hw *hw = __clk_get_hw(ndata->clk); + int ret = 0; + + if (event == PRE_RATE_CHANGE) { + mux->saved_parent = ops->get_parent(hw); + if (mux->saved_parent) + ret = ops->set_parent(hw, 0); + } else if (event == POST_RATE_CHANGE) { + ret = ops->set_parent(hw, mux->saved_parent); + } + + return notifier_from_errno(ret); +} + +static inline struct sg2044_gate *hw_to_sg2044_gate(struct clk_hw *hw) +{ + return container_of(hw_to_sg2044_clk_common(hw), + struct sg2044_gate, common); +} + +#define SG2044_CLK_COMMON_PDATA(_id, _name, _parents, _op, _flags) \ + { \ + .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parents, \ + _op, (_flags)), \ + .id = (_id), \ + } + +#define SG2044_CLK_COMMON_PHWS(_id, _name, _parents, _op, _flags) \ + { \ + .hw.init = CLK_HW_INIT_PARENTS_HW(_name, _parents, \ + _op, (_flags)), \ + .id = (_id), \ + } + +#define DEFINE_SG2044_GATEABLE_DIV(_id, _name, _parent, _flags, \ + _div_offset, _div_shift, _div_width, \ + _div_flags, _div_initval) \ + struct sg2044_div _name = { \ + .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \ + &sg2044_gateable_div_ops,\ + (_flags)), \ + .div = { \ + .offset = (_div_offset), \ + .initval = (_div_initval), \ + .shift = (_div_shift), \ + .width = (_div_width), \ + .flags = (_div_flags), \ + }, \ + } + +#define DEFINE_SG2044_DIV(_id, _name, _parent, _flags, \ + _div_offset, _div_shift, _div_width, \ + _div_flags, _div_initval) \ + struct sg2044_div _name = { \ + .common = SG2044_CLK_COMMON_PHWS(_id, #_name, _parent, \ + &sg2044_div_ops, \ + (_flags)), \ + .div = { \ + .offset = (_div_offset), \ + .initval = (_div_initval), \ + .shift = (_div_shift), \ + .width = (_div_width), \ + .flags = (_div_flags), \ + }, \ + } + +#define DEFINE_SG2044_DIV_PDATA(_id, _name, _parent, _flags, \ + _div_offset, _div_shift, _div_width, \ + _div_flags, _div_initval) \ + struct sg2044_div _name = { \ + .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \ + &sg2044_div_ops, \ + (_flags)), \ + .div = { \ + .offset = (_div_offset), \ + .initval = (_div_initval), \ + .shift = (_div_shift), \ + .width = (_div_width), \ + .flags = (_div_flags), \ + }, \ + } + +#define DEFINE_SG2044_DIV_RO(_id, _name, _parent, _flags, \ + _div_offset, _div_shift, _div_width, \ + _div_flags, _div_initval) \ + struct sg2044_div _name = { \ + .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \ + &sg2044_div_ro_ops, \ + (_flags)), \ + .div = { \ + .offset = (_div_offset), \ + .initval = (_div_initval), \ + .shift = (_div_shift), \ + .width = (_div_width), \ + .flags = (_div_flags) | CLK_DIVIDER_READ_ONLY,\ + }, \ + } + +#define DEFINE_SG2044_MUX(_id, _name, _parent, _flags, \ + _mux_offset, _mux_shift, \ + _mux_table, _mux_flags) \ + struct sg2044_mux _name = { \ + .common = SG2044_CLK_COMMON_PDATA(_id, #_name, _parent, \ + &clk_mux_ops, (_flags)),\ + .mux = { \ + .table = (_mux_table), \ + .offset = (_mux_offset), \ + .shift = (_mux_shift), \ + .flags = (_mux_flags), \ + }, \ + } + +#define DEFINE_SG2044_GATE(_id, _name, _parent, _flags, \ + _gate_offset, _gate_shift, _gate_flags) \ + struct sg2044_gate _name = { \ + .common = SG2044_CLK_COMMON_PHWS(_id, #_name, _parent, \ + &clk_gate_ops, (_flags)),\ + .gate = { \ + .offset = (_gate_offset), \ + .shift = (_gate_shift), \ + .flags = (_gate_flags), \ + }, \ + } + +static const struct clk_parent_data clk_fpll0_parent[] = { + { .fw_name = "fpll0" }, +}; + +static const struct clk_parent_data clk_fpll1_parent[] = { + { .fw_name = "fpll1" }, +}; + +static const struct clk_parent_data clk_fpll2_parent[] = { + { .fw_name = "fpll2" }, +}; + +static const struct clk_parent_data clk_dpll0_parent[] = { + { .fw_name = "dpll0" }, +}; + +static const struct clk_parent_data clk_dpll1_parent[] = { + { .fw_name = "dpll1" }, +}; + +static const struct clk_parent_data clk_dpll2_parent[] = { + { .fw_name = "dpll2" }, +}; + +static const struct clk_parent_data clk_dpll3_parent[] = { + { .fw_name = "dpll3" }, +}; + +static const struct clk_parent_data clk_dpll4_parent[] = { + { .fw_name = "dpll4" }, +}; + +static const struct clk_parent_data clk_dpll5_parent[] = { + { .fw_name = "dpll5" }, +}; + +static const struct clk_parent_data clk_dpll6_parent[] = { + { .fw_name = "dpll6" }, +}; + +static const struct clk_parent_data clk_dpll7_parent[] = { + { .fw_name = "dpll7" }, +}; + +static const struct clk_parent_data clk_mpll0_parent[] = { + { .fw_name = "mpll0" }, +}; + +static const struct clk_parent_data clk_mpll1_parent[] = { + { .fw_name = "mpll1" }, +}; + +static const struct clk_parent_data clk_mpll2_parent[] = { + { .fw_name = "mpll2" }, +}; + +static const struct clk_parent_data clk_mpll3_parent[] = { + { .fw_name = "mpll3" }, +}; + +static const struct clk_parent_data clk_mpll4_parent[] = { + { .fw_name = "mpll4" }, +}; + +static const struct clk_parent_data clk_mpll5_parent[] = { + { .fw_name = "mpll5" }, +}; + +static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_AP_SYS_FIXED, clk_div_ap_sys_fixed, + clk_fpll0_parent, 0, + 0x044, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO | + CLK_IS_CRITICAL, + 1); + +static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_AP_SYS_MAIN, clk_div_ap_sys_main, + clk_mpll0_parent, 0, + 0x040, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO | + CLK_IS_CRITICAL, + 1); + +static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_RP_SYS_FIXED, clk_div_rp_sys_fixed, + clk_fpll0_parent, 0, + 0x050, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO | + CLK_IS_CRITICAL, + 1); + +static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_RP_SYS_MAIN, clk_div_rp_sys_main, + clk_mpll1_parent, 0, + 0x04c, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO | + CLK_IS_CRITICAL, + 1); + +static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_TPU_SYS_FIXED, clk_div_tpu_sys_fixed, + clk_fpll0_parent, 0, + 0x058, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO | + CLK_IS_CRITICAL, + 2); + +static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_TPU_SYS_MAIN, clk_div_tpu_sys_main, + clk_mpll2_parent, 0, + 0x054, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO | + CLK_IS_CRITICAL, + 1); + +static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_NOC_SYS_FIXED, clk_div_noc_sys_fixed, + clk_fpll0_parent, 0, + 0x070, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO | + CLK_IS_CRITICAL, + 1); + +static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_NOC_SYS_MAIN, clk_div_noc_sys_main, + clk_mpll3_parent, 0, + 0x06c, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO | + CLK_IS_CRITICAL, + 1); + +static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC0_FIXED, clk_div_vc_src0_fixed, + clk_fpll0_parent, 0, + 0x078, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO | + CLK_IS_CRITICAL, + 2); + +static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC0_MAIN, clk_div_vc_src0_main, + clk_mpll4_parent, 0, + 0x074, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO | + CLK_IS_CRITICAL, + 1); + +static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC1_FIXED, clk_div_vc_src1_fixed, + clk_fpll0_parent, 0, + 0x080, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO | + CLK_IS_CRITICAL, + 3); + +static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_VC_SRC1_MAIN, clk_div_vc_src1_main, + clk_mpll5_parent, 0, + 0x07c, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO | + CLK_IS_CRITICAL, + 1); + +static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_CXP_MAC_FIXED, clk_div_cxp_mac_fixed, + clk_fpll0_parent, 0, + 0x088, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO | + CLK_IS_CRITICAL, + 2); + +static DEFINE_SG2044_GATEABLE_DIV(CLK_DIV_CXP_MAC_MAIN, clk_div_cxp_mac_main, + clk_fpll1_parent, 0, + 0x084, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO | + CLK_IS_CRITICAL, + 1); + +static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR0_FIXED, clk_div_ddr0_fixed, + clk_fpll0_parent, 0, + 0x124, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 2); + +static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR0_MAIN, clk_div_ddr0_main, + clk_dpll0_parent, 0, + 0x120, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR1_FIXED, clk_div_ddr1_fixed, + clk_fpll0_parent, 0, + 0x12c, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 2); + +static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR1_MAIN, clk_div_ddr1_main, + clk_dpll1_parent, 0, + 0x128, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR2_FIXED, clk_div_ddr2_fixed, + clk_fpll0_parent, 0, + 0x134, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 2); + +static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR2_MAIN, clk_div_ddr2_main, + clk_dpll2_parent, 0, + 0x130, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR3_FIXED, clk_div_ddr3_fixed, + clk_fpll0_parent, 0, + 0x13c, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 2); + +static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR3_MAIN, clk_div_ddr3_main, + clk_dpll3_parent, 0, + 0x138, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR4_FIXED, clk_div_ddr4_fixed, + clk_fpll0_parent, 0, + 0x144, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 2); + +static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR4_MAIN, clk_div_ddr4_main, + clk_dpll4_parent, 0, + 0x140, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR5_FIXED, clk_div_ddr5_fixed, + clk_fpll0_parent, 0, + 0x14c, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 2); + +static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR5_MAIN, clk_div_ddr5_main, + clk_dpll5_parent, 0, + 0x148, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR6_FIXED, clk_div_ddr6_fixed, + clk_fpll0_parent, 0, + 0x154, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 2); + +static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR6_MAIN, clk_div_ddr6_main, + clk_dpll6_parent, 0, + 0x150, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR7_FIXED, clk_div_ddr7_fixed, + clk_fpll0_parent, 0, + 0x15c, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 2); + +static DEFINE_SG2044_DIV_RO(CLK_DIV_DDR7_MAIN, clk_div_ddr7_main, + clk_dpll7_parent, 0, + 0x158, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV_PDATA(CLK_DIV_TOP_50M, clk_div_top_50m, + clk_fpll0_parent, 0, + 0x048, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 40); + +static const struct clk_hw *clk_div_top_50m_parent[] = { + &clk_div_top_50m.common.hw, +}; + +static DEFINE_SG2044_DIV_RO(CLK_DIV_TOP_AXI0, clk_div_top_axi0, + clk_fpll0_parent, 0, + 0x118, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 20); + +static const struct clk_hw *clk_div_top_axi0_parent[] = { + &clk_div_top_axi0.common.hw, +}; + +static DEFINE_SG2044_DIV_PDATA(CLK_DIV_TOP_AXI_HSPERI, clk_div_top_axi_hsperi, + clk_fpll0_parent, 0, + 0x11c, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 8); + +static const struct clk_hw *clk_div_top_axi_hsperi_parent[] = { + &clk_div_top_axi_hsperi.common.hw, +}; + +static DEFINE_SG2044_DIV(CLK_DIV_TIMER0, clk_div_timer0, + clk_div_top_50m_parent, 0, + 0x0d0, 16, 16, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV(CLK_DIV_TIMER1, clk_div_timer1, + clk_div_top_50m_parent, 0, + 0x0d4, 16, 16, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV(CLK_DIV_TIMER2, clk_div_timer2, + clk_div_top_50m_parent, 0, + 0x0d8, 16, 16, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV(CLK_DIV_TIMER3, clk_div_timer3, + clk_div_top_50m_parent, 0, + 0x0dc, 16, 16, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV(CLK_DIV_TIMER4, clk_div_timer4, + clk_div_top_50m_parent, 0, + 0x0e0, 16, 16, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV(CLK_DIV_TIMER5, clk_div_timer5, + clk_div_top_50m_parent, 0, + 0x0e4, 16, 16, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV(CLK_DIV_TIMER6, clk_div_timer6, + clk_div_top_50m_parent, 0, + 0x0e8, 16, 16, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV(CLK_DIV_TIMER7, clk_div_timer7, + clk_div_top_50m_parent, 0, + 0x0ec, 16, 16, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV_PDATA(CLK_DIV_CXP_TEST_PHY, clk_div_cxp_test_phy, + clk_fpll0_parent, 0, + 0x064, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV_PDATA(CLK_DIV_CXP_TEST_ETH_PHY, clk_div_cxp_test_eth_phy, + clk_fpll2_parent, 0, + 0x068, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV_PDATA(CLK_DIV_C2C0_TEST_PHY, clk_div_c2c0_test_phy, + clk_fpll0_parent, 0, + 0x05c, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV_PDATA(CLK_DIV_C2C1_TEST_PHY, clk_div_c2c1_test_phy, + clk_fpll0_parent, 0, + 0x060, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV_PDATA(CLK_DIV_PCIE_1G, clk_div_pcie_1g, + clk_fpll1_parent, 0, + 0x160, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1); + +static DEFINE_SG2044_DIV_PDATA(CLK_DIV_UART_500M, clk_div_uart_500m, + clk_fpll0_parent, 0, + 0x0cc, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 4); + +static DEFINE_SG2044_DIV(CLK_DIV_GPIO_DB, clk_div_gpio_db, + clk_div_top_axi0_parent, 0, + 0x0f8, 16, 16, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1000); + +static DEFINE_SG2044_DIV_PDATA(CLK_DIV_SD, clk_div_sd, + clk_fpll0_parent, 0, + 0x110, 16, 16, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 5); + +static DEFINE_SG2044_DIV(CLK_DIV_SD_100K, clk_div_sd_100k, + clk_div_top_axi0_parent, 0, + 0x114, 16, 16, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1000); + +static DEFINE_SG2044_DIV_PDATA(CLK_DIV_EMMC, clk_div_emmc, + clk_fpll0_parent, 0, + 0x108, 16, 16, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 5); + +static DEFINE_SG2044_DIV(CLK_DIV_EMMC_100K, clk_div_emmc_100k, + clk_div_top_axi0_parent, 0, + 0x10c, 16, 16, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 1000); + +static DEFINE_SG2044_DIV_PDATA(CLK_DIV_EFUSE, clk_div_efuse, + clk_fpll0_parent, 0, + 0x0f4, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 80); + +static DEFINE_SG2044_DIV_PDATA(CLK_DIV_TX_ETH0, clk_div_tx_eth0, + clk_fpll0_parent, 0, + 0x0fc, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 16); + +static DEFINE_SG2044_DIV_PDATA(CLK_DIV_PTP_REF_I_ETH0, clk_div_ptp_ref_i_eth0, + clk_fpll0_parent, 0, + 0x100, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 40); + +static DEFINE_SG2044_DIV_PDATA(CLK_DIV_REF_ETH0, clk_div_ref_eth0, + clk_fpll0_parent, 0, + 0x104, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 80); + +static DEFINE_SG2044_DIV_PDATA(CLK_DIV_PKA, clk_div_pka, + clk_fpll0_parent, 0, + 0x0f0, 16, 8, + CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, + 2); + +static const struct clk_parent_data clk_mux_ddr0_parents[] = { + { .hw = &clk_div_ddr0_fixed.common.hw }, + { .hw = &clk_div_ddr0_main.common.hw }, +}; + +static DEFINE_SG2044_MUX(CLK_MUX_DDR0, clk_mux_ddr0, + clk_mux_ddr0_parents, + CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + 0x020, 7, sg2044_mux_table, CLK_MUX_READ_ONLY); + +static const struct clk_parent_data clk_mux_ddr1_parents[] = { + { .hw = &clk_div_ddr1_fixed.common.hw }, + { .hw = &clk_div_ddr1_main.common.hw }, +}; + +static DEFINE_SG2044_MUX(CLK_MUX_DDR1, clk_mux_ddr1, + clk_mux_ddr1_parents, + CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + 0x020, 8, sg2044_mux_table, CLK_MUX_READ_ONLY); + +static const struct clk_parent_data clk_mux_ddr2_parents[] = { + { .hw = &clk_div_ddr2_fixed.common.hw }, + { .hw = &clk_div_ddr2_main.common.hw }, +}; + +static DEFINE_SG2044_MUX(CLK_MUX_DDR2, clk_mux_ddr2, + clk_mux_ddr2_parents, + CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + 0x020, 9, sg2044_mux_table, CLK_MUX_READ_ONLY); + +static const struct clk_parent_data clk_mux_ddr3_parents[] = { + { .hw = &clk_div_ddr3_fixed.common.hw }, + { .hw = &clk_div_ddr3_main.common.hw }, +}; + +static DEFINE_SG2044_MUX(CLK_MUX_DDR3, clk_mux_ddr3, + clk_mux_ddr3_parents, + CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + 0x020, 10, sg2044_mux_table, CLK_MUX_READ_ONLY); + +static const struct clk_parent_data clk_mux_ddr4_parents[] = { + { .hw = &clk_div_ddr4_fixed.common.hw }, + { .hw = &clk_div_ddr4_main.common.hw }, +}; + +static DEFINE_SG2044_MUX(CLK_MUX_DDR4, clk_mux_ddr4, + clk_mux_ddr4_parents, + CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + 0x020, 11, sg2044_mux_table, CLK_MUX_READ_ONLY); + +static const struct clk_parent_data clk_mux_ddr5_parents[] = { + { .hw = &clk_div_ddr5_fixed.common.hw }, + { .hw = &clk_div_ddr5_main.common.hw }, +}; + +static DEFINE_SG2044_MUX(CLK_MUX_DDR5, clk_mux_ddr5, + clk_mux_ddr5_parents, + CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + 0x020, 12, sg2044_mux_table, CLK_MUX_READ_ONLY); + +static const struct clk_parent_data clk_mux_ddr6_parents[] = { + { .hw = &clk_div_ddr6_fixed.common.hw }, + { .hw = &clk_div_ddr6_main.common.hw }, +}; + +static DEFINE_SG2044_MUX(CLK_MUX_DDR6, clk_mux_ddr6, + clk_mux_ddr6_parents, + CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + 0x020, 13, sg2044_mux_table, CLK_MUX_READ_ONLY); + +static const struct clk_parent_data clk_mux_ddr7_parents[] = { + { .hw = &clk_div_ddr7_fixed.common.hw }, + { .hw = &clk_div_ddr7_main.common.hw }, +}; + +static DEFINE_SG2044_MUX(CLK_MUX_DDR7, clk_mux_ddr7, + clk_mux_ddr7_parents, + CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + 0x020, 14, sg2044_mux_table, CLK_MUX_READ_ONLY); + +static const struct clk_parent_data clk_mux_noc_sys_parents[] = { + { .hw = &clk_div_noc_sys_fixed.common.hw }, + { .hw = &clk_div_noc_sys_main.common.hw }, +}; + +static DEFINE_SG2044_MUX(CLK_MUX_NOC_SYS, clk_mux_noc_sys, + clk_mux_noc_sys_parents, + CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + 0x020, 3, sg2044_mux_table, 0); + +static const struct clk_parent_data clk_mux_tpu_sys_parents[] = { + { .hw = &clk_div_tpu_sys_fixed.common.hw }, + { .hw = &clk_div_tpu_sys_main.common.hw }, +}; + +static DEFINE_SG2044_MUX(CLK_MUX_TPU_SYS, clk_mux_tpu_sys, + clk_mux_tpu_sys_parents, + CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + 0x020, 2, sg2044_mux_table, 0); + +static const struct clk_parent_data clk_mux_rp_sys_parents[] = { + { .hw = &clk_div_rp_sys_fixed.common.hw }, + { .hw = &clk_div_rp_sys_main.common.hw }, +}; + +static DEFINE_SG2044_MUX(CLK_MUX_RP_SYS, clk_mux_rp_sys, + clk_mux_rp_sys_parents, + CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + 0x020, 1, sg2044_mux_table, 0); + +static const struct clk_parent_data clk_mux_ap_sys_parents[] = { + { .hw = &clk_div_ap_sys_fixed.common.hw }, + { .hw = &clk_div_ap_sys_main.common.hw }, +}; + +static DEFINE_SG2044_MUX(CLK_MUX_AP_SYS, clk_mux_ap_sys, + clk_mux_ap_sys_parents, + CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + 0x020, 0, sg2044_mux_table, 0); + +static const struct clk_parent_data clk_mux_vc_src0_parents[] = { + { .hw = &clk_div_vc_src0_fixed.common.hw }, + { .hw = &clk_div_vc_src0_main.common.hw }, +}; + +static DEFINE_SG2044_MUX(CLK_MUX_VC_SRC0, clk_mux_vc_src0, + clk_mux_vc_src0_parents, + CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + 0x020, 4, sg2044_mux_table, 0); + +static const struct clk_parent_data clk_mux_vc_src1_parents[] = { + { .hw = &clk_div_vc_src1_fixed.common.hw }, + { .hw = &clk_div_vc_src1_main.common.hw }, +}; + +static DEFINE_SG2044_MUX(CLK_MUX_VC_SRC1, clk_mux_vc_src1, + clk_mux_vc_src1_parents, + CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + 0x020, 5, sg2044_mux_table, 0); + +static const struct clk_parent_data clk_mux_cxp_mac_parents[] = { + { .hw = &clk_div_cxp_mac_fixed.common.hw }, + { .hw = &clk_div_cxp_mac_main.common.hw }, +}; + +static DEFINE_SG2044_MUX(CLK_MUX_CXP_MAC, clk_mux_cxp_mac, + clk_mux_cxp_mac_parents, + CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + 0x020, 6, sg2044_mux_table, 0); + +static const struct clk_hw *clk_gate_ap_sys_parent[] = { + &clk_mux_ap_sys.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_AP_SYS, clk_gate_ap_sys, + clk_gate_ap_sys_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x000, 0, 0); + +static const struct clk_hw *clk_gate_rp_sys_parent[] = { + &clk_mux_rp_sys.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_RP_SYS, clk_gate_rp_sys, + clk_gate_rp_sys_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x000, 2, 0); + +static const struct clk_hw *clk_gate_tpu_sys_parent[] = { + &clk_mux_tpu_sys.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_TPU_SYS, clk_gate_tpu_sys, + clk_gate_tpu_sys_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x000, 3, 0); + +static const struct clk_hw *clk_gate_noc_sys_parent[] = { + &clk_mux_noc_sys.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_NOC_SYS, clk_gate_noc_sys, + clk_gate_noc_sys_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x000, 8, 0); + +static const struct clk_hw *clk_gate_vc_src0_parent[] = { + &clk_mux_vc_src0.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_VC_SRC0, clk_gate_vc_src0, + clk_gate_vc_src0_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x000, 9, 0); + +static const struct clk_hw *clk_gate_vc_src1_parent[] = { + &clk_mux_vc_src1.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_VC_SRC1, clk_gate_vc_src1, + clk_gate_vc_src1_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x000, 10, 0); + +static const struct clk_hw *clk_gate_ddr0_parent[] = { + &clk_mux_ddr0.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_DDR0, clk_gate_ddr0, + clk_gate_ddr0_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x008, 7, 0); + +static const struct clk_hw *clk_gate_ddr1_parent[] = { + &clk_mux_ddr1.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_DDR1, clk_gate_ddr1, + clk_gate_ddr1_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x008, 8, 0); + +static const struct clk_hw *clk_gate_ddr2_parent[] = { + &clk_mux_ddr2.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_DDR2, clk_gate_ddr2, + clk_gate_ddr2_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x008, 9, 0); + +static const struct clk_hw *clk_gate_ddr3_parent[] = { + &clk_mux_ddr3.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_DDR3, clk_gate_ddr3, + clk_gate_ddr3_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x008, 10, 0); + +static const struct clk_hw *clk_gate_ddr4_parent[] = { + &clk_mux_ddr4.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_DDR4, clk_gate_ddr4, + clk_gate_ddr4_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x008, 11, 0); + +static const struct clk_hw *clk_gate_ddr5_parent[] = { + &clk_mux_ddr5.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_DDR5, clk_gate_ddr5, + clk_gate_ddr5_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x008, 12, 0); + +static const struct clk_hw *clk_gate_ddr6_parent[] = { + &clk_mux_ddr6.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_DDR6, clk_gate_ddr6, + clk_gate_ddr6_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x008, 13, 0); + +static const struct clk_hw *clk_gate_ddr7_parent[] = { + &clk_mux_ddr7.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_DDR7, clk_gate_ddr7, + clk_gate_ddr7_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x008, 14, 0); + +static const struct clk_hw *clk_gate_top_50m_parent[] = { + &clk_div_top_50m.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_TOP_50M, clk_gate_top_50m, + clk_gate_top_50m_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x000, 1, 0); + +static const struct clk_hw *clk_gate_sc_rx_parent[] = { + &clk_div_top_50m.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_SC_RX, clk_gate_sc_rx, + clk_gate_sc_rx_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x000, 12, 0); + +static const struct clk_hw *clk_gate_sc_rx_x0y1_parent[] = { + &clk_div_top_50m.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_SC_RX_X0Y1, clk_gate_sc_rx_x0y1, + clk_gate_sc_rx_x0y1_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x000, 13, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_TOP_AXI0, clk_gate_top_axi0, + clk_div_top_axi0_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x008, 5, 0); + +static const struct clk_hw *clk_gate_mailbox_intc_parent[] = { + &clk_gate_top_axi0.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_INTC0, clk_gate_intc0, + clk_gate_mailbox_intc_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x020, 20, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_INTC1, clk_gate_intc1, + clk_gate_mailbox_intc_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x020, 21, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_INTC2, clk_gate_intc2, + clk_gate_mailbox_intc_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x020, 22, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_INTC3, clk_gate_intc3, + clk_gate_mailbox_intc_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x020, 23, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX0, clk_gate_mailbox0, + clk_gate_mailbox_intc_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x020, 16, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX1, clk_gate_mailbox1, + clk_gate_mailbox_intc_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x020, 17, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX2, clk_gate_mailbox2, + clk_gate_mailbox_intc_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x020, 18, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_MAILBOX3, clk_gate_mailbox3, + clk_gate_mailbox_intc_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x020, 19, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_TOP_AXI_HSPERI, clk_gate_top_axi_hsperi, + clk_div_top_axi_hsperi_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x008, 6, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_APB_TIMER, clk_gate_apb_timer, + clk_div_top_axi0_parent, + CLK_SET_RATE_PARENT, + 0x004, 7, 0); + +static const struct clk_hw *clk_gate_timer0_parent[] = { + &clk_div_timer0.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_TIMER0, clk_gate_timer0, + clk_gate_timer0_parent, + CLK_SET_RATE_PARENT, + 0x004, 8, 0); + +static const struct clk_hw *clk_gate_timer1_parent[] = { + &clk_div_timer1.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_TIMER1, clk_gate_timer1, + clk_gate_timer1_parent, + CLK_SET_RATE_PARENT, + 0x004, 9, 0); + +static const struct clk_hw *clk_gate_timer2_parent[] = { + &clk_div_timer2.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_TIMER2, clk_gate_timer2, + clk_gate_timer2_parent, + CLK_SET_RATE_PARENT, + 0x004, 10, 0); + +static const struct clk_hw *clk_gate_timer3_parent[] = { + &clk_div_timer3.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_TIMER3, clk_gate_timer3, + clk_gate_timer3_parent, + CLK_SET_RATE_PARENT, + 0x004, 11, 0); + +static const struct clk_hw *clk_gate_timer4_parent[] = { + &clk_div_timer4.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_TIMER4, clk_gate_timer4, + clk_gate_timer4_parent, + CLK_SET_RATE_PARENT, + 0x004, 12, 0); + +static const struct clk_hw *clk_gate_timer5_parent[] = { + &clk_div_timer5.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_TIMER5, clk_gate_timer5, + clk_gate_timer5_parent, + CLK_SET_RATE_PARENT, + 0x004, 13, 0); + +static const struct clk_hw *clk_gate_timer6_parent[] = { + &clk_div_timer6.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_TIMER6, clk_gate_timer6, + clk_gate_timer6_parent, + CLK_SET_RATE_PARENT, + 0x004, 14, 0); + +static const struct clk_hw *clk_gate_timer7_parent[] = { + &clk_div_timer7.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_TIMER7, clk_gate_timer7, + clk_gate_timer7_parent, + CLK_SET_RATE_PARENT, + 0x004, 15, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_CXP_CFG, clk_gate_cxp_cfg, + clk_div_top_axi0_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x000, 15, 0); + +static const struct clk_hw *clk_gate_cxp_mac_parent[] = { + &clk_mux_cxp_mac.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_CXP_MAC, clk_gate_cxp_mac, + clk_gate_cxp_mac_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x000, 14, 0); + +static const struct clk_hw *clk_gate_cxp_test_phy_parent[] = { + &clk_div_cxp_test_phy.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_CXP_TEST_PHY, clk_gate_cxp_test_phy, + clk_gate_cxp_test_phy_parent, + CLK_SET_RATE_PARENT, + 0x000, 6, 0); + +static const struct clk_hw *clk_gate_cxp_test_eth_phy_parent[] = { + &clk_div_cxp_test_eth_phy.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_CXP_TEST_ETH_PHY, clk_gate_cxp_test_eth_phy, + clk_gate_cxp_test_eth_phy_parent, + CLK_SET_RATE_PARENT, + 0x000, 7, 0); + +static const struct clk_hw *clk_gate_pcie_1g_parent[] = { + &clk_div_pcie_1g.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_PCIE_1G, clk_gate_pcie_1g, + clk_gate_pcie_1g_parent, + CLK_SET_RATE_PARENT, + 0x008, 15, 0); + +static const struct clk_hw *clk_gate_c2c0_test_phy_parent[] = { + &clk_div_c2c0_test_phy.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_C2C0_TEST_PHY, clk_gate_c2c0_test_phy, + clk_gate_c2c0_test_phy_parent, + CLK_SET_RATE_PARENT, + 0x000, 4, 0); + +static const struct clk_hw *clk_gate_c2c1_test_phy_parent[] = { + &clk_div_c2c1_test_phy.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_C2C1_TEST_PHY, clk_gate_c2c1_test_phy, + clk_gate_c2c1_test_phy_parent, + CLK_SET_RATE_PARENT, + 0x000, 5, 0); + +static const struct clk_hw *clk_gate_uart_500m_parent[] = { + &clk_div_uart_500m.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_UART_500M, clk_gate_uart_500m, + clk_gate_uart_500m_parent, + CLK_SET_RATE_PARENT, + 0x004, 1, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_APB_UART, clk_gate_apb_uart, + clk_div_top_axi_hsperi_parent, + CLK_SET_RATE_PARENT, + 0x004, 2, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_APB_SPI, clk_gate_apb_spi, + clk_div_top_axi_hsperi_parent, + CLK_SET_RATE_PARENT, + 0x004, 22, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_AHB_SPIFMC, clk_gate_ahb_spifmc, + clk_div_top_axi0_parent, + CLK_SET_RATE_PARENT, + 0x004, 5, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_APB_I2C, clk_gate_apb_i2c, + clk_div_top_axi0_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x004, 23, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_AXI_DBG_I2C, clk_gate_axi_dbg_i2c, + clk_div_top_axi_hsperi_parent, + CLK_SET_RATE_PARENT, + 0x004, 3, 0); + +static const struct clk_hw *clk_gate_gpio_db_parent[] = { + &clk_div_gpio_db.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_GPIO_DB, clk_gate_gpio_db, + clk_gate_gpio_db_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x004, 21, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_APB_GPIO_INTR, clk_gate_apb_gpio_intr, + clk_div_top_axi0_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x004, 20, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_APB_GPIO, clk_gate_apb_gpio, + clk_div_top_axi0_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x004, 19, 0); + +static const struct clk_hw *clk_gate_sd_parent[] = { + &clk_div_sd.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_SD, clk_gate_sd, + clk_gate_sd_parent, + CLK_SET_RATE_PARENT, + 0x008, 3, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_AXI_SD, clk_gate_axi_sd, + clk_div_top_axi_hsperi_parent, + CLK_SET_RATE_PARENT, + 0x008, 2, 0); + +static const struct clk_hw *clk_gate_sd_100k_parent[] = { + &clk_div_sd_100k.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_SD_100K, clk_gate_sd_100k, + clk_gate_sd_100k_parent, + CLK_SET_RATE_PARENT, + 0x008, 4, 0); + +static const struct clk_hw *clk_gate_emmc_parent[] = { + &clk_div_emmc.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_EMMC, clk_gate_emmc, + clk_gate_emmc_parent, + CLK_SET_RATE_PARENT, + 0x008, 0, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_AXI_EMMC, clk_gate_axi_emmc, + clk_div_top_axi_hsperi_parent, + CLK_SET_RATE_PARENT, + 0x004, 31, 0); + +static const struct clk_hw *clk_gate_emmc_100k_parent[] = { + &clk_div_emmc_100k.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_EMMC_100K, clk_gate_emmc_100k, + clk_gate_emmc_100k_parent, + CLK_SET_RATE_PARENT, + 0x008, 1, 0); + +static const struct clk_hw *clk_gate_efuse_parent[] = { + &clk_div_efuse.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_EFUSE, clk_gate_efuse, + clk_gate_efuse_parent, + CLK_SET_RATE_PARENT, + 0x004, 17, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_APB_EFUSE, clk_gate_apb_efuse, + clk_div_top_axi0_parent, + CLK_SET_RATE_PARENT, + 0x004, 18, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_SYSDMA_AXI, clk_gate_sysdma_axi, + clk_div_top_axi_hsperi_parent, + CLK_SET_RATE_PARENT, + 0x004, 0, 0); + +static const struct clk_hw *clk_gate_tx_eth0_parent[] = { + &clk_div_tx_eth0.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_TX_ETH0, clk_gate_tx_eth0, + clk_gate_tx_eth0_parent, + CLK_SET_RATE_PARENT, + 0x004, 27, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_AXI_ETH0, clk_gate_axi_eth0, + clk_div_top_axi_hsperi_parent, + CLK_SET_RATE_PARENT, + 0x004, 28, 0); + +static const struct clk_hw *clk_gate_ptp_ref_i_eth0_parent[] = { + &clk_div_ptp_ref_i_eth0.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_PTP_REF_I_ETH0, clk_gate_ptp_ref_i_eth0, + clk_gate_ptp_ref_i_eth0_parent, + CLK_SET_RATE_PARENT, + 0x004, 29, 0); + +static const struct clk_hw *clk_gate_ref_eth0_parent[] = { + &clk_div_ref_eth0.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_REF_ETH0, clk_gate_ref_eth0, + clk_gate_ref_eth0_parent, + CLK_SET_RATE_PARENT, + 0x004, 30, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_APB_RTC, clk_gate_apb_rtc, + clk_div_top_axi0_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x004, 26, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_APB_PWM, clk_gate_apb_pwm, + clk_div_top_axi0_parent, + CLK_SET_RATE_PARENT, + 0x004, 25, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_APB_WDT, clk_gate_apb_wdt, + clk_div_top_axi0_parent, + CLK_SET_RATE_PARENT, + 0x004, 24, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_AXI_SRAM, clk_gate_axi_sram, + clk_div_top_axi0_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x004, 6, 0); + +static DEFINE_SG2044_GATE(CLK_GATE_AHB_ROM, clk_gate_ahb_rom, + clk_div_top_axi0_parent, + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + 0x004, 4, 0); + +static const struct clk_hw *clk_gate_pka_parent[] = { + &clk_div_pka.common.hw, +}; + +static DEFINE_SG2044_GATE(CLK_GATE_PKA, clk_gate_pka, + clk_gate_pka_parent, + CLK_SET_RATE_PARENT, + 0x004, 16, 0); + +static struct sg2044_clk_common * const sg2044_div_commons[] = { + &clk_div_ap_sys_fixed.common, + &clk_div_ap_sys_main.common, + &clk_div_rp_sys_fixed.common, + &clk_div_rp_sys_main.common, + &clk_div_tpu_sys_fixed.common, + &clk_div_tpu_sys_main.common, + &clk_div_noc_sys_fixed.common, + &clk_div_noc_sys_main.common, + &clk_div_vc_src0_fixed.common, + &clk_div_vc_src0_main.common, + &clk_div_vc_src1_fixed.common, + &clk_div_vc_src1_main.common, + &clk_div_cxp_mac_fixed.common, + &clk_div_cxp_mac_main.common, + &clk_div_ddr0_fixed.common, + &clk_div_ddr0_main.common, + &clk_div_ddr1_fixed.common, + &clk_div_ddr1_main.common, + &clk_div_ddr2_fixed.common, + &clk_div_ddr2_main.common, + &clk_div_ddr3_fixed.common, + &clk_div_ddr3_main.common, + &clk_div_ddr4_fixed.common, + &clk_div_ddr4_main.common, + &clk_div_ddr5_fixed.common, + &clk_div_ddr5_main.common, + &clk_div_ddr6_fixed.common, + &clk_div_ddr6_main.common, + &clk_div_ddr7_fixed.common, + &clk_div_ddr7_main.common, + &clk_div_top_50m.common, + &clk_div_top_axi0.common, + &clk_div_top_axi_hsperi.common, + &clk_div_timer0.common, + &clk_div_timer1.common, + &clk_div_timer2.common, + &clk_div_timer3.common, + &clk_div_timer4.common, + &clk_div_timer5.common, + &clk_div_timer6.common, + &clk_div_timer7.common, + &clk_div_cxp_test_phy.common, + &clk_div_cxp_test_eth_phy.common, + &clk_div_c2c0_test_phy.common, + &clk_div_c2c1_test_phy.common, + &clk_div_pcie_1g.common, + &clk_div_uart_500m.common, + &clk_div_gpio_db.common, + &clk_div_sd.common, + &clk_div_sd_100k.common, + &clk_div_emmc.common, + &clk_div_emmc_100k.common, + &clk_div_efuse.common, + &clk_div_tx_eth0.common, + &clk_div_ptp_ref_i_eth0.common, + &clk_div_ref_eth0.common, + &clk_div_pka.common, +}; + +static struct sg2044_clk_common * const sg2044_mux_commons[] = { + &clk_mux_ddr0.common, + &clk_mux_ddr1.common, + &clk_mux_ddr2.common, + &clk_mux_ddr3.common, + &clk_mux_ddr4.common, + &clk_mux_ddr5.common, + &clk_mux_ddr6.common, + &clk_mux_ddr7.common, + &clk_mux_noc_sys.common, + &clk_mux_tpu_sys.common, + &clk_mux_rp_sys.common, + &clk_mux_ap_sys.common, + &clk_mux_vc_src0.common, + &clk_mux_vc_src1.common, + &clk_mux_cxp_mac.common, +}; + +static struct sg2044_clk_common * const sg2044_gate_commons[] = { + &clk_gate_ap_sys.common, + &clk_gate_rp_sys.common, + &clk_gate_tpu_sys.common, + &clk_gate_noc_sys.common, + &clk_gate_vc_src0.common, + &clk_gate_vc_src1.common, + &clk_gate_ddr0.common, + &clk_gate_ddr1.common, + &clk_gate_ddr2.common, + &clk_gate_ddr3.common, + &clk_gate_ddr4.common, + &clk_gate_ddr5.common, + &clk_gate_ddr6.common, + &clk_gate_ddr7.common, + &clk_gate_top_50m.common, + &clk_gate_sc_rx.common, + &clk_gate_sc_rx_x0y1.common, + &clk_gate_top_axi0.common, + &clk_gate_intc0.common, + &clk_gate_intc1.common, + &clk_gate_intc2.common, + &clk_gate_intc3.common, + &clk_gate_mailbox0.common, + &clk_gate_mailbox1.common, + &clk_gate_mailbox2.common, + &clk_gate_mailbox3.common, + &clk_gate_top_axi_hsperi.common, + &clk_gate_apb_timer.common, + &clk_gate_timer0.common, + &clk_gate_timer1.common, + &clk_gate_timer2.common, + &clk_gate_timer3.common, + &clk_gate_timer4.common, + &clk_gate_timer5.common, + &clk_gate_timer6.common, + &clk_gate_timer7.common, + &clk_gate_cxp_cfg.common, + &clk_gate_cxp_mac.common, + &clk_gate_cxp_test_phy.common, + &clk_gate_cxp_test_eth_phy.common, + &clk_gate_pcie_1g.common, + &clk_gate_c2c0_test_phy.common, + &clk_gate_c2c1_test_phy.common, + &clk_gate_uart_500m.common, + &clk_gate_apb_uart.common, + &clk_gate_apb_spi.common, + &clk_gate_ahb_spifmc.common, + &clk_gate_apb_i2c.common, + &clk_gate_axi_dbg_i2c.common, + &clk_gate_gpio_db.common, + &clk_gate_apb_gpio_intr.common, + &clk_gate_apb_gpio.common, + &clk_gate_sd.common, + &clk_gate_axi_sd.common, + &clk_gate_sd_100k.common, + &clk_gate_emmc.common, + &clk_gate_axi_emmc.common, + &clk_gate_emmc_100k.common, + &clk_gate_efuse.common, + &clk_gate_apb_efuse.common, + &clk_gate_sysdma_axi.common, + &clk_gate_tx_eth0.common, + &clk_gate_axi_eth0.common, + &clk_gate_ptp_ref_i_eth0.common, + &clk_gate_ref_eth0.common, + &clk_gate_apb_rtc.common, + &clk_gate_apb_pwm.common, + &clk_gate_apb_wdt.common, + &clk_gate_axi_sram.common, + &clk_gate_ahb_rom.common, + &clk_gate_pka.common, +}; + +static void sg2044_clk_fix_init_parent(struct clk_hw **pdata, + const struct clk_init_data *init, + struct clk_hw_onecell_data *data) +{ + u8 i; + const struct clk_hw *hw; + const struct sg2044_clk_common *common; + + for (i = 0; i < init->num_parents; i++) { + hw = init->parent_hws[i]; + common = hw_to_sg2044_clk_common(hw); + + WARN(!data->hws[common->id], "clk %u is not register\n", + common->id); + pdata[i] = data->hws[common->id]; + } +} + +static int sg2044_clk_init_ctrl(struct device *dev, void __iomem *reg, + struct sg2044_clk_ctrl *ctrl, + const struct sg2044_clk_desc_data *desc) +{ + int ret, i; + struct clk_hw *hw; + + spin_lock_init(&ctrl->lock); + + for (i = 0; i < desc->num_div; i++) { + struct sg2044_clk_common *common = desc->div[i]; + + common->lock = &ctrl->lock; + common->base = reg; + + ret = devm_clk_hw_register(dev, &common->hw); + if (ret) + return ret; + + ctrl->data.hws[common->id] = &common->hw; + } + + for (i = 0; i < desc->num_mux; i++) { + struct sg2044_clk_common *common = desc->mux[i]; + struct sg2044_mux *mux = hw_to_sg2044_mux(&common->hw); + const struct clk_init_data *init = common->hw.init; + + common->lock = &ctrl->lock; + common->base = reg; + + hw = devm_clk_hw_register_mux_parent_data_table(dev, + init->name, + init->parent_data, + init->num_parents, + init->flags, + reg + mux->mux.offset, + mux->mux.shift, + 1, + mux->mux.flags, + mux->mux.table, + &ctrl->lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + if (!(mux->mux.flags & CLK_MUX_READ_ONLY)) { + mux->nb.notifier_call = sg2044_mux_notifier_cb; + ret = devm_clk_notifier_register(dev, hw->clk, + &mux->nb); + if (ret < 0) + return dev_err_probe(dev, ret, + "%s: failed to register notifier\n", + clk_hw_get_name(hw)); + } + + ctrl->data.hws[common->id] = hw; + } + + for (i = 0; i < desc->num_gate; i++) { + struct sg2044_clk_common *common = desc->gate[i]; + struct sg2044_gate *gate = hw_to_sg2044_gate(&common->hw); + const struct clk_init_data *init = common->hw.init; + struct clk_hw *parent_hws[1] = { }; + + sg2044_clk_fix_init_parent(parent_hws, init, &ctrl->data); + common->lock = &ctrl->lock; + common->base = reg; + + hw = devm_clk_hw_register_gate_parent_hw(dev, init->name, + parent_hws[0], + init->flags, + reg + gate->gate.offset, + gate->gate.shift, + gate->gate.flags, + &ctrl->lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + ctrl->data.hws[common->id] = hw; + } + + return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, + &ctrl->data); +} + +static int sg2044_clk_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sg2044_clk_ctrl *ctrl; + const struct sg2044_clk_desc_data *desc; + void __iomem *reg; + u32 num_clks; + + reg = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(reg)) + return PTR_ERR(reg); + + desc = device_get_match_data(dev); + if (!desc) + return dev_err_probe(dev, -EINVAL, "no match data for platform\n"); + + num_clks = desc->num_div + desc->num_gate + desc->num_mux; + + ctrl = devm_kzalloc(dev, struct_size(ctrl, data.hws, num_clks), GFP_KERNEL); + if (!ctrl) + return -ENOMEM; + + ctrl->data.num = num_clks; + + return sg2044_clk_init_ctrl(dev, reg, ctrl, desc); +} + +static const struct sg2044_clk_desc_data sg2044_clk_desc_data = { + .div = sg2044_div_commons, + .mux = sg2044_mux_commons, + .gate = sg2044_gate_commons, + .num_div = ARRAY_SIZE(sg2044_div_commons), + .num_mux = ARRAY_SIZE(sg2044_mux_commons), + .num_gate = ARRAY_SIZE(sg2044_gate_commons), +}; + +static const struct of_device_id sg2044_clk_match[] = { + { .compatible = "sophgo,sg2044-clk", .data = &sg2044_clk_desc_data }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sg2044_clk_match); + +static struct platform_driver sg2044_clk_driver = { + .probe = sg2044_clk_probe, + .driver = { + .name = "sg2044-clk", + .of_match_table = sg2044_clk_match, + }, +}; +module_platform_driver(sg2044_clk_driver); + +MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>"); +MODULE_DESCRIPTION("Sophgo SG2044 clock driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/spacemit/Kconfig b/drivers/clk/spacemit/Kconfig new file mode 100644 index 000000000000..4c4df845b3cb --- /dev/null +++ b/drivers/clk/spacemit/Kconfig @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config SPACEMIT_CCU + tristate "Clock support for SpacemiT SoCs" + depends on ARCH_SPACEMIT || COMPILE_TEST + select MFD_SYSCON + help + Say Y to enable clock controller unit support for SpacemiT SoCs. + +if SPACEMIT_CCU + +config SPACEMIT_K1_CCU + tristate "Support for SpacemiT K1 SoC" + depends on ARCH_SPACEMIT || COMPILE_TEST + help + Support for clock controller unit in SpacemiT K1 SoC. + +endif diff --git a/drivers/clk/spacemit/Makefile b/drivers/clk/spacemit/Makefile new file mode 100644 index 000000000000..5ec6da61db98 --- /dev/null +++ b/drivers/clk/spacemit/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_SPACEMIT_K1_CCU) = spacemit-ccu-k1.o +spacemit-ccu-k1-y = ccu_pll.o ccu_mix.o ccu_ddn.o +spacemit-ccu-k1-y += ccu-k1.o diff --git a/drivers/clk/spacemit/ccu-k1.c b/drivers/clk/spacemit/ccu-k1.c new file mode 100644 index 000000000000..cdde37a05235 --- /dev/null +++ b/drivers/clk/spacemit/ccu-k1.c @@ -0,0 +1,1164 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024 SpacemiT Technology Co. Ltd + * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org> + */ + +#include <linux/array_size.h> +#include <linux/clk-provider.h> +#include <linux/delay.h> +#include <linux/mfd/syscon.h> +#include <linux/minmax.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include "ccu_common.h" +#include "ccu_pll.h" +#include "ccu_mix.h" +#include "ccu_ddn.h" + +#include <dt-bindings/clock/spacemit,k1-syscon.h> + +/* APBS register offset */ +#define APBS_PLL1_SWCR1 0x100 +#define APBS_PLL1_SWCR2 0x104 +#define APBS_PLL1_SWCR3 0x108 +#define APBS_PLL2_SWCR1 0x118 +#define APBS_PLL2_SWCR2 0x11c +#define APBS_PLL2_SWCR3 0x120 +#define APBS_PLL3_SWCR1 0x124 +#define APBS_PLL3_SWCR2 0x128 +#define APBS_PLL3_SWCR3 0x12c + +/* MPMU register offset */ +#define MPMU_POSR 0x0010 +#define POSR_PLL1_LOCK BIT(27) +#define POSR_PLL2_LOCK BIT(28) +#define POSR_PLL3_LOCK BIT(29) +#define MPMU_SUCCR 0x0014 +#define MPMU_ISCCR 0x0044 +#define MPMU_WDTPCR 0x0200 +#define MPMU_RIPCCR 0x0210 +#define MPMU_ACGR 0x1024 +#define MPMU_APBCSCR 0x1050 +#define MPMU_SUCCR_1 0x10b0 + +/* APBC register offset */ +#define APBC_UART1_CLK_RST 0x00 +#define APBC_UART2_CLK_RST 0x04 +#define APBC_GPIO_CLK_RST 0x08 +#define APBC_PWM0_CLK_RST 0x0c +#define APBC_PWM1_CLK_RST 0x10 +#define APBC_PWM2_CLK_RST 0x14 +#define APBC_PWM3_CLK_RST 0x18 +#define APBC_TWSI8_CLK_RST 0x20 +#define APBC_UART3_CLK_RST 0x24 +#define APBC_RTC_CLK_RST 0x28 +#define APBC_TWSI0_CLK_RST 0x2c +#define APBC_TWSI1_CLK_RST 0x30 +#define APBC_TIMERS1_CLK_RST 0x34 +#define APBC_TWSI2_CLK_RST 0x38 +#define APBC_AIB_CLK_RST 0x3c +#define APBC_TWSI4_CLK_RST 0x40 +#define APBC_TIMERS2_CLK_RST 0x44 +#define APBC_ONEWIRE_CLK_RST 0x48 +#define APBC_TWSI5_CLK_RST 0x4c +#define APBC_DRO_CLK_RST 0x58 +#define APBC_IR_CLK_RST 0x5c +#define APBC_TWSI6_CLK_RST 0x60 +#define APBC_COUNTER_CLK_SEL 0x64 +#define APBC_TWSI7_CLK_RST 0x68 +#define APBC_TSEN_CLK_RST 0x6c +#define APBC_UART4_CLK_RST 0x70 +#define APBC_UART5_CLK_RST 0x74 +#define APBC_UART6_CLK_RST 0x78 +#define APBC_SSP3_CLK_RST 0x7c +#define APBC_SSPA0_CLK_RST 0x80 +#define APBC_SSPA1_CLK_RST 0x84 +#define APBC_IPC_AP2AUD_CLK_RST 0x90 +#define APBC_UART7_CLK_RST 0x94 +#define APBC_UART8_CLK_RST 0x98 +#define APBC_UART9_CLK_RST 0x9c +#define APBC_CAN0_CLK_RST 0xa0 +#define APBC_PWM4_CLK_RST 0xa8 +#define APBC_PWM5_CLK_RST 0xac +#define APBC_PWM6_CLK_RST 0xb0 +#define APBC_PWM7_CLK_RST 0xb4 +#define APBC_PWM8_CLK_RST 0xb8 +#define APBC_PWM9_CLK_RST 0xbc +#define APBC_PWM10_CLK_RST 0xc0 +#define APBC_PWM11_CLK_RST 0xc4 +#define APBC_PWM12_CLK_RST 0xc8 +#define APBC_PWM13_CLK_RST 0xcc +#define APBC_PWM14_CLK_RST 0xd0 +#define APBC_PWM15_CLK_RST 0xd4 +#define APBC_PWM16_CLK_RST 0xd8 +#define APBC_PWM17_CLK_RST 0xdc +#define APBC_PWM18_CLK_RST 0xe0 +#define APBC_PWM19_CLK_RST 0xe4 + +/* APMU register offset */ +#define APMU_JPG_CLK_RES_CTRL 0x020 +#define APMU_CSI_CCIC2_CLK_RES_CTRL 0x024 +#define APMU_ISP_CLK_RES_CTRL 0x038 +#define APMU_LCD_CLK_RES_CTRL1 0x044 +#define APMU_LCD_SPI_CLK_RES_CTRL 0x048 +#define APMU_LCD_CLK_RES_CTRL2 0x04c +#define APMU_CCIC_CLK_RES_CTRL 0x050 +#define APMU_SDH0_CLK_RES_CTRL 0x054 +#define APMU_SDH1_CLK_RES_CTRL 0x058 +#define APMU_USB_CLK_RES_CTRL 0x05c +#define APMU_QSPI_CLK_RES_CTRL 0x060 +#define APMU_DMA_CLK_RES_CTRL 0x064 +#define APMU_AES_CLK_RES_CTRL 0x068 +#define APMU_VPU_CLK_RES_CTRL 0x0a4 +#define APMU_GPU_CLK_RES_CTRL 0x0cc +#define APMU_SDH2_CLK_RES_CTRL 0x0e0 +#define APMU_PMUA_MC_CTRL 0x0e8 +#define APMU_PMU_CC2_AP 0x100 +#define APMU_PMUA_EM_CLK_RES_CTRL 0x104 +#define APMU_AUDIO_CLK_RES_CTRL 0x14c +#define APMU_HDMI_CLK_RES_CTRL 0x1b8 +#define APMU_CCI550_CLK_CTRL 0x300 +#define APMU_ACLK_CLK_CTRL 0x388 +#define APMU_CPU_C0_CLK_CTRL 0x38C +#define APMU_CPU_C1_CLK_CTRL 0x390 +#define APMU_PCIE_CLK_RES_CTRL_0 0x3cc +#define APMU_PCIE_CLK_RES_CTRL_1 0x3d4 +#define APMU_PCIE_CLK_RES_CTRL_2 0x3dc +#define APMU_EMAC0_CLK_RES_CTRL 0x3e4 +#define APMU_EMAC1_CLK_RES_CTRL 0x3ec + +struct spacemit_ccu_data { + struct clk_hw **hws; + size_t num; +}; + +/* APBS clocks start, APBS region contains and only contains all PLL clocks */ + +/* + * PLL{1,2} must run at fixed frequencies to provide clocks in correct rates for + * peripherals. + */ +static const struct ccu_pll_rate_tbl pll1_rate_tbl[] = { + CCU_PLL_RATE(2457600000UL, 0x0050dd64, 0x330ccccd), +}; + +static const struct ccu_pll_rate_tbl pll2_rate_tbl[] = { + CCU_PLL_RATE(3000000000UL, 0x0050dd66, 0x3fe00000), +}; + +static const struct ccu_pll_rate_tbl pll3_rate_tbl[] = { + CCU_PLL_RATE(1600000000UL, 0x0050cd61, 0x43eaaaab), + CCU_PLL_RATE(1800000000UL, 0x0050cd61, 0x4b000000), + CCU_PLL_RATE(2000000000UL, 0x0050dd62, 0x2aeaaaab), + CCU_PLL_RATE(2457600000UL, 0x0050dd64, 0x330ccccd), + CCU_PLL_RATE(3000000000UL, 0x0050dd66, 0x3fe00000), + CCU_PLL_RATE(3200000000UL, 0x0050dd67, 0x43eaaaab), +}; + +CCU_PLL_DEFINE(pll1, pll1_rate_tbl, APBS_PLL1_SWCR1, APBS_PLL1_SWCR3, MPMU_POSR, POSR_PLL1_LOCK, + CLK_SET_RATE_GATE); +CCU_PLL_DEFINE(pll2, pll2_rate_tbl, APBS_PLL2_SWCR1, APBS_PLL2_SWCR3, MPMU_POSR, POSR_PLL2_LOCK, + CLK_SET_RATE_GATE); +CCU_PLL_DEFINE(pll3, pll3_rate_tbl, APBS_PLL3_SWCR1, APBS_PLL3_SWCR3, MPMU_POSR, POSR_PLL3_LOCK, + CLK_SET_RATE_GATE); + +CCU_FACTOR_GATE_DEFINE(pll1_d2, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(1), 2, 1); +CCU_FACTOR_GATE_DEFINE(pll1_d3, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(2), 3, 1); +CCU_FACTOR_GATE_DEFINE(pll1_d4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(3), 4, 1); +CCU_FACTOR_GATE_DEFINE(pll1_d5, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(4), 5, 1); +CCU_FACTOR_GATE_DEFINE(pll1_d6, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(5), 6, 1); +CCU_FACTOR_GATE_DEFINE(pll1_d7, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(6), 7, 1); +CCU_FACTOR_GATE_DEFINE(pll1_d8, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(7), 8, 1); +CCU_FACTOR_GATE_DEFINE(pll1_d11_223p4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(15), 11, 1); +CCU_FACTOR_GATE_DEFINE(pll1_d13_189, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(16), 13, 1); +CCU_FACTOR_GATE_DEFINE(pll1_d23_106p8, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(20), 23, 1); +CCU_FACTOR_GATE_DEFINE(pll1_d64_38p4, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(0), 64, 1); +CCU_FACTOR_GATE_DEFINE(pll1_aud_245p7, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(10), 10, 1); +CCU_FACTOR_GATE_DEFINE(pll1_aud_24p5, CCU_PARENT_HW(pll1), APBS_PLL1_SWCR2, BIT(11), 100, 1); + +CCU_FACTOR_GATE_DEFINE(pll2_d1, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(0), 1, 1); +CCU_FACTOR_GATE_DEFINE(pll2_d2, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(1), 2, 1); +CCU_FACTOR_GATE_DEFINE(pll2_d3, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(2), 3, 1); +CCU_FACTOR_GATE_DEFINE(pll2_d4, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(3), 4, 1); +CCU_FACTOR_GATE_DEFINE(pll2_d5, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(4), 5, 1); +CCU_FACTOR_GATE_DEFINE(pll2_d6, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(5), 6, 1); +CCU_FACTOR_GATE_DEFINE(pll2_d7, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(6), 7, 1); +CCU_FACTOR_GATE_DEFINE(pll2_d8, CCU_PARENT_HW(pll2), APBS_PLL2_SWCR2, BIT(7), 8, 1); + +CCU_FACTOR_GATE_DEFINE(pll3_d1, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(0), 1, 1); +CCU_FACTOR_GATE_DEFINE(pll3_d2, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(1), 2, 1); +CCU_FACTOR_GATE_DEFINE(pll3_d3, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(2), 3, 1); +CCU_FACTOR_GATE_DEFINE(pll3_d4, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(3), 4, 1); +CCU_FACTOR_GATE_DEFINE(pll3_d5, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(4), 5, 1); +CCU_FACTOR_GATE_DEFINE(pll3_d6, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(5), 6, 1); +CCU_FACTOR_GATE_DEFINE(pll3_d7, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(6), 7, 1); +CCU_FACTOR_GATE_DEFINE(pll3_d8, CCU_PARENT_HW(pll3), APBS_PLL3_SWCR2, BIT(7), 8, 1); + +CCU_FACTOR_DEFINE(pll3_20, CCU_PARENT_HW(pll3_d8), 20, 1); +CCU_FACTOR_DEFINE(pll3_40, CCU_PARENT_HW(pll3_d8), 10, 1); +CCU_FACTOR_DEFINE(pll3_80, CCU_PARENT_HW(pll3_d8), 5, 1); + +/* APBS clocks end */ + +/* MPMU clocks start */ +CCU_GATE_DEFINE(pll1_d8_307p2, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(13), 0); + +CCU_FACTOR_DEFINE(pll1_d32_76p8, CCU_PARENT_HW(pll1_d8_307p2), 4, 1); + +CCU_FACTOR_DEFINE(pll1_d40_61p44, CCU_PARENT_HW(pll1_d8_307p2), 5, 1); + +CCU_FACTOR_DEFINE(pll1_d16_153p6, CCU_PARENT_HW(pll1_d8), 2, 1); +CCU_FACTOR_GATE_DEFINE(pll1_d24_102p4, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(12), 3, 1); +CCU_FACTOR_GATE_DEFINE(pll1_d48_51p2, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(7), 6, 1); +CCU_FACTOR_GATE_DEFINE(pll1_d48_51p2_ap, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(11), 6, 1); +CCU_FACTOR_GATE_DEFINE(pll1_m3d128_57p6, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(8), 16, 3); +CCU_FACTOR_GATE_DEFINE(pll1_d96_25p6, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(4), 12, 1); +CCU_FACTOR_GATE_DEFINE(pll1_d192_12p8, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(3), 24, 1); +CCU_FACTOR_GATE_DEFINE(pll1_d192_12p8_wdt, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(19), 24, 1); +CCU_FACTOR_GATE_DEFINE(pll1_d384_6p4, CCU_PARENT_HW(pll1_d8), MPMU_ACGR, BIT(2), 48, 1); + +CCU_FACTOR_DEFINE(pll1_d768_3p2, CCU_PARENT_HW(pll1_d384_6p4), 2, 1); +CCU_FACTOR_DEFINE(pll1_d1536_1p6, CCU_PARENT_HW(pll1_d384_6p4), 4, 1); +CCU_FACTOR_DEFINE(pll1_d3072_0p8, CCU_PARENT_HW(pll1_d384_6p4), 8, 1); + +CCU_GATE_DEFINE(pll1_d6_409p6, CCU_PARENT_HW(pll1_d6), MPMU_ACGR, BIT(0), 0); +CCU_FACTOR_GATE_DEFINE(pll1_d12_204p8, CCU_PARENT_HW(pll1_d6), MPMU_ACGR, BIT(5), 2, 1); + +CCU_GATE_DEFINE(pll1_d5_491p52, CCU_PARENT_HW(pll1_d5), MPMU_ACGR, BIT(21), 0); +CCU_FACTOR_GATE_DEFINE(pll1_d10_245p76, CCU_PARENT_HW(pll1_d5), MPMU_ACGR, BIT(18), 2, 1); + +CCU_GATE_DEFINE(pll1_d4_614p4, CCU_PARENT_HW(pll1_d4), MPMU_ACGR, BIT(15), 0); +CCU_FACTOR_GATE_DEFINE(pll1_d52_47p26, CCU_PARENT_HW(pll1_d4), MPMU_ACGR, BIT(10), 13, 1); +CCU_FACTOR_GATE_DEFINE(pll1_d78_31p5, CCU_PARENT_HW(pll1_d4), MPMU_ACGR, BIT(6), 39, 2); + +CCU_GATE_DEFINE(pll1_d3_819p2, CCU_PARENT_HW(pll1_d3), MPMU_ACGR, BIT(14), 0); + +CCU_GATE_DEFINE(pll1_d2_1228p8, CCU_PARENT_HW(pll1_d2), MPMU_ACGR, BIT(16), 0); + +CCU_GATE_DEFINE(slow_uart, CCU_PARENT_NAME(osc), MPMU_ACGR, BIT(1), CLK_IGNORE_UNUSED); +CCU_DDN_DEFINE(slow_uart1_14p74, pll1_d16_153p6, MPMU_SUCCR, 16, 13, 0, 13, 0); +CCU_DDN_DEFINE(slow_uart2_48, pll1_d4_614p4, MPMU_SUCCR_1, 16, 13, 0, 13, 0); + +CCU_GATE_DEFINE(wdt_clk, CCU_PARENT_HW(pll1_d96_25p6), MPMU_WDTPCR, BIT(1), 0); + +CCU_FACTOR_GATE_DEFINE(i2s_sysclk, CCU_PARENT_HW(pll1_d16_153p6), MPMU_ISCCR, BIT(31), 50, 1); +CCU_FACTOR_GATE_DEFINE(i2s_bclk, CCU_PARENT_HW(i2s_sysclk), MPMU_ISCCR, BIT(29), 1, 1); + +static const struct clk_parent_data apb_parents[] = { + CCU_PARENT_HW(pll1_d96_25p6), + CCU_PARENT_HW(pll1_d48_51p2), + CCU_PARENT_HW(pll1_d96_25p6), + CCU_PARENT_HW(pll1_d24_102p4), +}; +CCU_MUX_DEFINE(apb_clk, apb_parents, MPMU_APBCSCR, 0, 2, 0); + +CCU_GATE_DEFINE(wdt_bus_clk, CCU_PARENT_HW(apb_clk), MPMU_WDTPCR, BIT(0), 0); + +CCU_GATE_DEFINE(ripc_clk, CCU_PARENT_HW(apb_clk), MPMU_RIPCCR, 0x1, 0); +/* MPMU clocks end */ + +/* APBC clocks start */ +static const struct clk_parent_data uart_clk_parents[] = { + CCU_PARENT_HW(pll1_m3d128_57p6), + CCU_PARENT_HW(slow_uart1_14p74), + CCU_PARENT_HW(slow_uart2_48), +}; +CCU_MUX_GATE_DEFINE(uart0_clk, uart_clk_parents, APBC_UART1_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(uart2_clk, uart_clk_parents, APBC_UART2_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(uart3_clk, uart_clk_parents, APBC_UART3_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(uart4_clk, uart_clk_parents, APBC_UART4_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(uart5_clk, uart_clk_parents, APBC_UART5_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(uart6_clk, uart_clk_parents, APBC_UART6_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(uart7_clk, uart_clk_parents, APBC_UART7_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(uart8_clk, uart_clk_parents, APBC_UART8_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(uart9_clk, uart_clk_parents, APBC_UART9_CLK_RST, 4, 3, BIT(1), 0); + +CCU_GATE_DEFINE(gpio_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_GPIO_CLK_RST, BIT(1), 0); + +static const struct clk_parent_data pwm_parents[] = { + CCU_PARENT_HW(pll1_d192_12p8), + CCU_PARENT_NAME(osc), +}; +CCU_MUX_GATE_DEFINE(pwm0_clk, pwm_parents, APBC_PWM0_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(pwm1_clk, pwm_parents, APBC_PWM1_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(pwm2_clk, pwm_parents, APBC_PWM2_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(pwm3_clk, pwm_parents, APBC_PWM3_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(pwm4_clk, pwm_parents, APBC_PWM4_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(pwm5_clk, pwm_parents, APBC_PWM5_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(pwm6_clk, pwm_parents, APBC_PWM6_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(pwm7_clk, pwm_parents, APBC_PWM7_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(pwm8_clk, pwm_parents, APBC_PWM8_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(pwm9_clk, pwm_parents, APBC_PWM9_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(pwm10_clk, pwm_parents, APBC_PWM10_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(pwm11_clk, pwm_parents, APBC_PWM11_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(pwm12_clk, pwm_parents, APBC_PWM12_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(pwm13_clk, pwm_parents, APBC_PWM13_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(pwm14_clk, pwm_parents, APBC_PWM14_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(pwm15_clk, pwm_parents, APBC_PWM15_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(pwm16_clk, pwm_parents, APBC_PWM16_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(pwm17_clk, pwm_parents, APBC_PWM17_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(pwm18_clk, pwm_parents, APBC_PWM18_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(pwm19_clk, pwm_parents, APBC_PWM19_CLK_RST, 4, 3, BIT(1), 0); + +static const struct clk_parent_data ssp_parents[] = { + CCU_PARENT_HW(pll1_d384_6p4), + CCU_PARENT_HW(pll1_d192_12p8), + CCU_PARENT_HW(pll1_d96_25p6), + CCU_PARENT_HW(pll1_d48_51p2), + CCU_PARENT_HW(pll1_d768_3p2), + CCU_PARENT_HW(pll1_d1536_1p6), + CCU_PARENT_HW(pll1_d3072_0p8), +}; +CCU_MUX_GATE_DEFINE(ssp3_clk, ssp_parents, APBC_SSP3_CLK_RST, 4, 3, BIT(1), 0); + +CCU_GATE_DEFINE(rtc_clk, CCU_PARENT_NAME(osc), APBC_RTC_CLK_RST, + BIT(7) | BIT(1), 0); + +static const struct clk_parent_data twsi_parents[] = { + CCU_PARENT_HW(pll1_d78_31p5), + CCU_PARENT_HW(pll1_d48_51p2), + CCU_PARENT_HW(pll1_d40_61p44), +}; +CCU_MUX_GATE_DEFINE(twsi0_clk, twsi_parents, APBC_TWSI0_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(twsi1_clk, twsi_parents, APBC_TWSI1_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(twsi2_clk, twsi_parents, APBC_TWSI2_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(twsi4_clk, twsi_parents, APBC_TWSI4_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(twsi5_clk, twsi_parents, APBC_TWSI5_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(twsi6_clk, twsi_parents, APBC_TWSI6_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(twsi7_clk, twsi_parents, APBC_TWSI7_CLK_RST, 4, 3, BIT(1), 0); +/* + * APBC_TWSI8_CLK_RST has a quirk that reading always results in zero. + * Combine functional and bus bits together as a gate to avoid sharing the + * write-only register between different clock hardwares. + */ +CCU_GATE_DEFINE(twsi8_clk, CCU_PARENT_HW(pll1_d78_31p5), APBC_TWSI8_CLK_RST, BIT(1) | BIT(0), 0); + +static const struct clk_parent_data timer_parents[] = { + CCU_PARENT_HW(pll1_d192_12p8), + CCU_PARENT_NAME(osc), + CCU_PARENT_HW(pll1_d384_6p4), + CCU_PARENT_NAME(vctcxo_3m), + CCU_PARENT_NAME(vctcxo_1m), +}; +CCU_MUX_GATE_DEFINE(timers1_clk, timer_parents, APBC_TIMERS1_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(timers2_clk, timer_parents, APBC_TIMERS2_CLK_RST, 4, 3, BIT(1), 0); + +CCU_GATE_DEFINE(aib_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_AIB_CLK_RST, BIT(1), 0); + +CCU_GATE_DEFINE(onewire_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_ONEWIRE_CLK_RST, BIT(1), 0); + +static const struct clk_parent_data sspa_parents[] = { + CCU_PARENT_HW(pll1_d384_6p4), + CCU_PARENT_HW(pll1_d192_12p8), + CCU_PARENT_HW(pll1_d96_25p6), + CCU_PARENT_HW(pll1_d48_51p2), + CCU_PARENT_HW(pll1_d768_3p2), + CCU_PARENT_HW(pll1_d1536_1p6), + CCU_PARENT_HW(pll1_d3072_0p8), + CCU_PARENT_HW(i2s_bclk), +}; +CCU_MUX_GATE_DEFINE(sspa0_clk, sspa_parents, APBC_SSPA0_CLK_RST, 4, 3, BIT(1), 0); +CCU_MUX_GATE_DEFINE(sspa1_clk, sspa_parents, APBC_SSPA1_CLK_RST, 4, 3, BIT(1), 0); +CCU_GATE_DEFINE(dro_clk, CCU_PARENT_HW(apb_clk), APBC_DRO_CLK_RST, BIT(1), 0); +CCU_GATE_DEFINE(ir_clk, CCU_PARENT_HW(apb_clk), APBC_IR_CLK_RST, BIT(1), 0); +CCU_GATE_DEFINE(tsen_clk, CCU_PARENT_HW(apb_clk), APBC_TSEN_CLK_RST, BIT(1), 0); +CCU_GATE_DEFINE(ipc_ap2aud_clk, CCU_PARENT_HW(apb_clk), APBC_IPC_AP2AUD_CLK_RST, BIT(1), 0); + +static const struct clk_parent_data can_parents[] = { + CCU_PARENT_HW(pll3_20), + CCU_PARENT_HW(pll3_40), + CCU_PARENT_HW(pll3_80), +}; +CCU_MUX_GATE_DEFINE(can0_clk, can_parents, APBC_CAN0_CLK_RST, 4, 3, BIT(1), 0); +CCU_GATE_DEFINE(can0_bus_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_CAN0_CLK_RST, BIT(0), 0); + +CCU_GATE_DEFINE(uart0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART1_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(uart2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART2_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(uart3_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART3_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(uart4_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART4_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(uart5_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART5_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(uart6_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART6_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(uart7_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART7_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(uart8_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART8_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(uart9_bus_clk, CCU_PARENT_HW(apb_clk), APBC_UART9_CLK_RST, BIT(0), 0); + +CCU_GATE_DEFINE(gpio_bus_clk, CCU_PARENT_HW(apb_clk), APBC_GPIO_CLK_RST, BIT(0), 0); + +CCU_GATE_DEFINE(pwm0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM0_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(pwm1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM1_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(pwm2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM2_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(pwm3_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM3_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(pwm4_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM4_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(pwm5_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM5_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(pwm6_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM6_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(pwm7_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM7_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(pwm8_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM8_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(pwm9_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM9_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(pwm10_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM10_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(pwm11_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM11_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(pwm12_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM12_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(pwm13_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM13_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(pwm14_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM14_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(pwm15_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM15_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(pwm16_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM16_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(pwm17_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM17_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(pwm18_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM18_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(pwm19_bus_clk, CCU_PARENT_HW(apb_clk), APBC_PWM19_CLK_RST, BIT(0), 0); + +CCU_GATE_DEFINE(ssp3_bus_clk, CCU_PARENT_HW(apb_clk), APBC_SSP3_CLK_RST, BIT(0), 0); + +CCU_GATE_DEFINE(rtc_bus_clk, CCU_PARENT_HW(apb_clk), APBC_RTC_CLK_RST, BIT(0), 0); + +CCU_GATE_DEFINE(twsi0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI0_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(twsi1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI1_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(twsi2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI2_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(twsi4_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI4_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(twsi5_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI5_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(twsi6_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI6_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(twsi7_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TWSI7_CLK_RST, BIT(0), 0); +/* Placeholder to workaround quirk of the register */ +CCU_FACTOR_DEFINE(twsi8_bus_clk, CCU_PARENT_HW(apb_clk), 1, 1); + +CCU_GATE_DEFINE(timers1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TIMERS1_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(timers2_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TIMERS2_CLK_RST, BIT(0), 0); + +CCU_GATE_DEFINE(aib_bus_clk, CCU_PARENT_HW(apb_clk), APBC_AIB_CLK_RST, BIT(0), 0); + +CCU_GATE_DEFINE(onewire_bus_clk, CCU_PARENT_HW(apb_clk), APBC_ONEWIRE_CLK_RST, BIT(0), 0); + +CCU_GATE_DEFINE(sspa0_bus_clk, CCU_PARENT_HW(apb_clk), APBC_SSPA0_CLK_RST, BIT(0), 0); +CCU_GATE_DEFINE(sspa1_bus_clk, CCU_PARENT_HW(apb_clk), APBC_SSPA1_CLK_RST, BIT(0), 0); + +CCU_GATE_DEFINE(tsen_bus_clk, CCU_PARENT_HW(apb_clk), APBC_TSEN_CLK_RST, BIT(0), 0); + +CCU_GATE_DEFINE(ipc_ap2aud_bus_clk, CCU_PARENT_HW(apb_clk), APBC_IPC_AP2AUD_CLK_RST, BIT(0), 0); +/* APBC clocks end */ + +/* APMU clocks start */ +static const struct clk_parent_data pmua_aclk_parents[] = { + CCU_PARENT_HW(pll1_d10_245p76), + CCU_PARENT_HW(pll1_d8_307p2), +}; +CCU_MUX_DIV_FC_DEFINE(pmua_aclk, pmua_aclk_parents, APMU_ACLK_CLK_CTRL, 1, 2, BIT(4), 0, 1, 0); + +static const struct clk_parent_data cci550_clk_parents[] = { + CCU_PARENT_HW(pll1_d5_491p52), + CCU_PARENT_HW(pll1_d4_614p4), + CCU_PARENT_HW(pll1_d3_819p2), + CCU_PARENT_HW(pll2_d3), +}; +CCU_MUX_DIV_FC_DEFINE(cci550_clk, cci550_clk_parents, APMU_CCI550_CLK_CTRL, 8, 3, BIT(12), 0, 2, + CLK_IS_CRITICAL); + +static const struct clk_parent_data cpu_c0_hi_clk_parents[] = { + CCU_PARENT_HW(pll3_d2), + CCU_PARENT_HW(pll3_d1), +}; +CCU_MUX_DEFINE(cpu_c0_hi_clk, cpu_c0_hi_clk_parents, APMU_CPU_C0_CLK_CTRL, 13, 1, 0); +static const struct clk_parent_data cpu_c0_clk_parents[] = { + CCU_PARENT_HW(pll1_d4_614p4), + CCU_PARENT_HW(pll1_d3_819p2), + CCU_PARENT_HW(pll1_d6_409p6), + CCU_PARENT_HW(pll1_d5_491p52), + CCU_PARENT_HW(pll1_d2_1228p8), + CCU_PARENT_HW(pll3_d3), + CCU_PARENT_HW(pll2_d3), + CCU_PARENT_HW(cpu_c0_hi_clk), +}; +CCU_MUX_FC_DEFINE(cpu_c0_core_clk, cpu_c0_clk_parents, APMU_CPU_C0_CLK_CTRL, BIT(12), 0, 3, + CLK_IS_CRITICAL); +CCU_DIV_DEFINE(cpu_c0_ace_clk, CCU_PARENT_HW(cpu_c0_core_clk), APMU_CPU_C0_CLK_CTRL, 6, 3, + CLK_IS_CRITICAL); +CCU_DIV_DEFINE(cpu_c0_tcm_clk, CCU_PARENT_HW(cpu_c0_core_clk), APMU_CPU_C0_CLK_CTRL, 9, 3, + CLK_IS_CRITICAL); + +static const struct clk_parent_data cpu_c1_hi_clk_parents[] = { + CCU_PARENT_HW(pll3_d2), + CCU_PARENT_HW(pll3_d1), +}; +CCU_MUX_DEFINE(cpu_c1_hi_clk, cpu_c1_hi_clk_parents, APMU_CPU_C1_CLK_CTRL, 13, 1, 0); +static const struct clk_parent_data cpu_c1_clk_parents[] = { + CCU_PARENT_HW(pll1_d4_614p4), + CCU_PARENT_HW(pll1_d3_819p2), + CCU_PARENT_HW(pll1_d6_409p6), + CCU_PARENT_HW(pll1_d5_491p52), + CCU_PARENT_HW(pll1_d2_1228p8), + CCU_PARENT_HW(pll3_d3), + CCU_PARENT_HW(pll2_d3), + CCU_PARENT_HW(cpu_c1_hi_clk), +}; +CCU_MUX_FC_DEFINE(cpu_c1_core_clk, cpu_c1_clk_parents, APMU_CPU_C1_CLK_CTRL, BIT(12), 0, 3, + CLK_IS_CRITICAL); +CCU_DIV_DEFINE(cpu_c1_ace_clk, CCU_PARENT_HW(cpu_c1_core_clk), APMU_CPU_C1_CLK_CTRL, 6, 3, + CLK_IS_CRITICAL); + +static const struct clk_parent_data jpg_parents[] = { + CCU_PARENT_HW(pll1_d4_614p4), + CCU_PARENT_HW(pll1_d6_409p6), + CCU_PARENT_HW(pll1_d5_491p52), + CCU_PARENT_HW(pll1_d3_819p2), + CCU_PARENT_HW(pll1_d2_1228p8), + CCU_PARENT_HW(pll2_d4), + CCU_PARENT_HW(pll2_d3), +}; +CCU_MUX_DIV_GATE_FC_DEFINE(jpg_clk, jpg_parents, APMU_JPG_CLK_RES_CTRL, 5, 3, BIT(15), 2, 3, + BIT(1), 0); + +static const struct clk_parent_data ccic2phy_parents[] = { + CCU_PARENT_HW(pll1_d24_102p4), + CCU_PARENT_HW(pll1_d48_51p2_ap), +}; +CCU_MUX_GATE_DEFINE(ccic2phy_clk, ccic2phy_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 7, 1, BIT(5), 0); + +static const struct clk_parent_data ccic3phy_parents[] = { + CCU_PARENT_HW(pll1_d24_102p4), + CCU_PARENT_HW(pll1_d48_51p2_ap), +}; +CCU_MUX_GATE_DEFINE(ccic3phy_clk, ccic3phy_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 31, 1, BIT(30), 0); + +static const struct clk_parent_data csi_parents[] = { + CCU_PARENT_HW(pll1_d5_491p52), + CCU_PARENT_HW(pll1_d6_409p6), + CCU_PARENT_HW(pll1_d4_614p4), + CCU_PARENT_HW(pll1_d3_819p2), + CCU_PARENT_HW(pll2_d2), + CCU_PARENT_HW(pll2_d3), + CCU_PARENT_HW(pll2_d4), + CCU_PARENT_HW(pll1_d2_1228p8), +}; +CCU_MUX_DIV_GATE_FC_DEFINE(csi_clk, csi_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 20, 3, BIT(15), + 16, 3, BIT(4), 0); + +static const struct clk_parent_data camm_parents[] = { + CCU_PARENT_HW(pll1_d8_307p2), + CCU_PARENT_HW(pll2_d5), + CCU_PARENT_HW(pll1_d6_409p6), + CCU_PARENT_NAME(vctcxo_24m), +}; +CCU_MUX_DIV_GATE_DEFINE(camm0_clk, camm_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 23, 4, 8, 2, + BIT(28), 0); +CCU_MUX_DIV_GATE_DEFINE(camm1_clk, camm_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 23, 4, 8, 2, + BIT(6), 0); +CCU_MUX_DIV_GATE_DEFINE(camm2_clk, camm_parents, APMU_CSI_CCIC2_CLK_RES_CTRL, 23, 4, 8, 2, + BIT(3), 0); + +static const struct clk_parent_data isp_cpp_parents[] = { + CCU_PARENT_HW(pll1_d8_307p2), + CCU_PARENT_HW(pll1_d6_409p6), +}; +CCU_MUX_DIV_GATE_DEFINE(isp_cpp_clk, isp_cpp_parents, APMU_ISP_CLK_RES_CTRL, 24, 2, 26, 1, + BIT(28), 0); +static const struct clk_parent_data isp_bus_parents[] = { + CCU_PARENT_HW(pll1_d6_409p6), + CCU_PARENT_HW(pll1_d5_491p52), + CCU_PARENT_HW(pll1_d8_307p2), + CCU_PARENT_HW(pll1_d10_245p76), +}; +CCU_MUX_DIV_GATE_FC_DEFINE(isp_bus_clk, isp_bus_parents, APMU_ISP_CLK_RES_CTRL, 18, 3, BIT(23), + 21, 2, BIT(17), 0); +static const struct clk_parent_data isp_parents[] = { + CCU_PARENT_HW(pll1_d6_409p6), + CCU_PARENT_HW(pll1_d5_491p52), + CCU_PARENT_HW(pll1_d4_614p4), + CCU_PARENT_HW(pll1_d8_307p2), +}; +CCU_MUX_DIV_GATE_FC_DEFINE(isp_clk, isp_parents, APMU_ISP_CLK_RES_CTRL, 4, 3, BIT(7), 8, 2, + BIT(1), 0); + +static const struct clk_parent_data dpumclk_parents[] = { + CCU_PARENT_HW(pll1_d6_409p6), + CCU_PARENT_HW(pll1_d5_491p52), + CCU_PARENT_HW(pll1_d4_614p4), + CCU_PARENT_HW(pll1_d8_307p2), +}; +CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(dpu_mclk, dpumclk_parents, APMU_LCD_CLK_RES_CTRL2, + APMU_LCD_CLK_RES_CTRL1, 1, 4, BIT(29), 5, 3, BIT(0), 0); + +static const struct clk_parent_data dpuesc_parents[] = { + CCU_PARENT_HW(pll1_d48_51p2_ap), + CCU_PARENT_HW(pll1_d52_47p26), + CCU_PARENT_HW(pll1_d96_25p6), + CCU_PARENT_HW(pll1_d32_76p8), +}; +CCU_MUX_GATE_DEFINE(dpu_esc_clk, dpuesc_parents, APMU_LCD_CLK_RES_CTRL1, 0, 2, BIT(2), 0); + +static const struct clk_parent_data dpubit_parents[] = { + CCU_PARENT_HW(pll1_d3_819p2), + CCU_PARENT_HW(pll2_d2), + CCU_PARENT_HW(pll2_d3), + CCU_PARENT_HW(pll1_d2_1228p8), + CCU_PARENT_HW(pll2_d4), + CCU_PARENT_HW(pll2_d5), + CCU_PARENT_HW(pll2_d7), + CCU_PARENT_HW(pll2_d8), +}; +CCU_MUX_DIV_GATE_FC_DEFINE(dpu_bit_clk, dpubit_parents, APMU_LCD_CLK_RES_CTRL1, 17, 3, BIT(31), + 20, 3, BIT(16), 0); + +static const struct clk_parent_data dpupx_parents[] = { + CCU_PARENT_HW(pll1_d6_409p6), + CCU_PARENT_HW(pll1_d5_491p52), + CCU_PARENT_HW(pll1_d4_614p4), + CCU_PARENT_HW(pll1_d8_307p2), + CCU_PARENT_HW(pll2_d7), + CCU_PARENT_HW(pll2_d8), +}; +CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(dpu_pxclk, dpupx_parents, APMU_LCD_CLK_RES_CTRL2, + APMU_LCD_CLK_RES_CTRL1, 17, 4, BIT(30), 21, 3, BIT(16), 0); + +CCU_GATE_DEFINE(dpu_hclk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_CLK_RES_CTRL1, + BIT(5), 0); + +static const struct clk_parent_data dpu_spi_parents[] = { + CCU_PARENT_HW(pll1_d8_307p2), + CCU_PARENT_HW(pll1_d6_409p6), + CCU_PARENT_HW(pll1_d10_245p76), + CCU_PARENT_HW(pll1_d11_223p4), + CCU_PARENT_HW(pll1_d13_189), + CCU_PARENT_HW(pll1_d23_106p8), + CCU_PARENT_HW(pll2_d3), + CCU_PARENT_HW(pll2_d5), +}; +CCU_MUX_DIV_GATE_FC_DEFINE(dpu_spi_clk, dpu_spi_parents, APMU_LCD_SPI_CLK_RES_CTRL, 8, 3, + BIT(7), 12, 3, BIT(1), 0); +CCU_GATE_DEFINE(dpu_spi_hbus_clk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_SPI_CLK_RES_CTRL, BIT(3), 0); +CCU_GATE_DEFINE(dpu_spi_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_SPI_CLK_RES_CTRL, BIT(5), 0); +CCU_GATE_DEFINE(dpu_spi_aclk, CCU_PARENT_HW(pmua_aclk), APMU_LCD_SPI_CLK_RES_CTRL, BIT(6), 0); + +static const struct clk_parent_data v2d_parents[] = { + CCU_PARENT_HW(pll1_d5_491p52), + CCU_PARENT_HW(pll1_d6_409p6), + CCU_PARENT_HW(pll1_d8_307p2), + CCU_PARENT_HW(pll1_d4_614p4), +}; +CCU_MUX_DIV_GATE_FC_DEFINE(v2d_clk, v2d_parents, APMU_LCD_CLK_RES_CTRL1, 9, 3, BIT(28), 12, 2, + BIT(8), 0); + +static const struct clk_parent_data ccic_4x_parents[] = { + CCU_PARENT_HW(pll1_d5_491p52), + CCU_PARENT_HW(pll1_d6_409p6), + CCU_PARENT_HW(pll1_d4_614p4), + CCU_PARENT_HW(pll1_d3_819p2), + CCU_PARENT_HW(pll2_d2), + CCU_PARENT_HW(pll2_d3), + CCU_PARENT_HW(pll2_d4), + CCU_PARENT_HW(pll1_d2_1228p8), +}; +CCU_MUX_DIV_GATE_FC_DEFINE(ccic_4x_clk, ccic_4x_parents, APMU_CCIC_CLK_RES_CTRL, 18, 3, + BIT(15), 23, 2, BIT(4), 0); + +static const struct clk_parent_data ccic1phy_parents[] = { + CCU_PARENT_HW(pll1_d24_102p4), + CCU_PARENT_HW(pll1_d48_51p2_ap), +}; +CCU_MUX_GATE_DEFINE(ccic1phy_clk, ccic1phy_parents, APMU_CCIC_CLK_RES_CTRL, 7, 1, BIT(5), 0); + +CCU_GATE_DEFINE(sdh_axi_aclk, CCU_PARENT_HW(pmua_aclk), APMU_SDH0_CLK_RES_CTRL, BIT(3), 0); +static const struct clk_parent_data sdh01_parents[] = { + CCU_PARENT_HW(pll1_d6_409p6), + CCU_PARENT_HW(pll1_d4_614p4), + CCU_PARENT_HW(pll2_d8), + CCU_PARENT_HW(pll2_d5), + CCU_PARENT_HW(pll1_d11_223p4), + CCU_PARENT_HW(pll1_d13_189), + CCU_PARENT_HW(pll1_d23_106p8), +}; +CCU_MUX_DIV_GATE_FC_DEFINE(sdh0_clk, sdh01_parents, APMU_SDH0_CLK_RES_CTRL, 8, 3, BIT(11), 5, 3, + BIT(4), 0); +CCU_MUX_DIV_GATE_FC_DEFINE(sdh1_clk, sdh01_parents, APMU_SDH1_CLK_RES_CTRL, 8, 3, BIT(11), 5, 3, + BIT(4), 0); +static const struct clk_parent_data sdh2_parents[] = { + CCU_PARENT_HW(pll1_d6_409p6), + CCU_PARENT_HW(pll1_d4_614p4), + CCU_PARENT_HW(pll2_d8), + CCU_PARENT_HW(pll1_d3_819p2), + CCU_PARENT_HW(pll1_d11_223p4), + CCU_PARENT_HW(pll1_d13_189), + CCU_PARENT_HW(pll1_d23_106p8), +}; +CCU_MUX_DIV_GATE_FC_DEFINE(sdh2_clk, sdh2_parents, APMU_SDH2_CLK_RES_CTRL, 8, 3, BIT(11), 5, 3, + BIT(4), 0); + +CCU_GATE_DEFINE(usb_axi_clk, CCU_PARENT_HW(pmua_aclk), APMU_USB_CLK_RES_CTRL, BIT(1), 0); +CCU_GATE_DEFINE(usb_p1_aclk, CCU_PARENT_HW(pmua_aclk), APMU_USB_CLK_RES_CTRL, BIT(5), 0); +CCU_GATE_DEFINE(usb30_clk, CCU_PARENT_HW(pmua_aclk), APMU_USB_CLK_RES_CTRL, BIT(8), 0); + +static const struct clk_parent_data qspi_parents[] = { + CCU_PARENT_HW(pll1_d6_409p6), + CCU_PARENT_HW(pll2_d8), + CCU_PARENT_HW(pll1_d8_307p2), + CCU_PARENT_HW(pll1_d10_245p76), + CCU_PARENT_HW(pll1_d11_223p4), + CCU_PARENT_HW(pll1_d23_106p8), + CCU_PARENT_HW(pll1_d5_491p52), + CCU_PARENT_HW(pll1_d13_189), +}; +CCU_MUX_DIV_GATE_FC_DEFINE(qspi_clk, qspi_parents, APMU_QSPI_CLK_RES_CTRL, 9, 3, BIT(12), 6, 3, + BIT(4), 0); +CCU_GATE_DEFINE(qspi_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_QSPI_CLK_RES_CTRL, BIT(3), 0); +CCU_GATE_DEFINE(dma_clk, CCU_PARENT_HW(pmua_aclk), APMU_DMA_CLK_RES_CTRL, BIT(3), 0); + +static const struct clk_parent_data aes_parents[] = { + CCU_PARENT_HW(pll1_d12_204p8), + CCU_PARENT_HW(pll1_d24_102p4), +}; +CCU_MUX_GATE_DEFINE(aes_clk, aes_parents, APMU_AES_CLK_RES_CTRL, 6, 1, BIT(5), 0); + +static const struct clk_parent_data vpu_parents[] = { + CCU_PARENT_HW(pll1_d4_614p4), + CCU_PARENT_HW(pll1_d5_491p52), + CCU_PARENT_HW(pll1_d3_819p2), + CCU_PARENT_HW(pll1_d6_409p6), + CCU_PARENT_HW(pll3_d6), + CCU_PARENT_HW(pll2_d3), + CCU_PARENT_HW(pll2_d4), + CCU_PARENT_HW(pll2_d5), +}; +CCU_MUX_DIV_GATE_FC_DEFINE(vpu_clk, vpu_parents, APMU_VPU_CLK_RES_CTRL, 13, 3, BIT(21), 10, 3, + BIT(3), 0); + +static const struct clk_parent_data gpu_parents[] = { + CCU_PARENT_HW(pll1_d4_614p4), + CCU_PARENT_HW(pll1_d5_491p52), + CCU_PARENT_HW(pll1_d3_819p2), + CCU_PARENT_HW(pll1_d6_409p6), + CCU_PARENT_HW(pll3_d6), + CCU_PARENT_HW(pll2_d3), + CCU_PARENT_HW(pll2_d4), + CCU_PARENT_HW(pll2_d5), +}; +CCU_MUX_DIV_GATE_FC_DEFINE(gpu_clk, gpu_parents, APMU_GPU_CLK_RES_CTRL, 12, 3, BIT(15), 18, 3, + BIT(4), 0); + +static const struct clk_parent_data emmc_parents[] = { + CCU_PARENT_HW(pll1_d6_409p6), + CCU_PARENT_HW(pll1_d4_614p4), + CCU_PARENT_HW(pll1_d52_47p26), + CCU_PARENT_HW(pll1_d3_819p2), +}; +CCU_MUX_DIV_GATE_FC_DEFINE(emmc_clk, emmc_parents, APMU_PMUA_EM_CLK_RES_CTRL, 8, 3, BIT(11), + 6, 2, BIT(4), 0); +CCU_DIV_GATE_DEFINE(emmc_x_clk, CCU_PARENT_HW(pll1_d2_1228p8), APMU_PMUA_EM_CLK_RES_CTRL, 12, + 3, BIT(15), 0); + +static const struct clk_parent_data audio_parents[] = { + CCU_PARENT_HW(pll1_aud_245p7), + CCU_PARENT_HW(pll1_d8_307p2), + CCU_PARENT_HW(pll1_d6_409p6), +}; +CCU_MUX_DIV_GATE_FC_DEFINE(audio_clk, audio_parents, APMU_AUDIO_CLK_RES_CTRL, 4, 3, BIT(15), + 7, 3, BIT(12), 0); + +static const struct clk_parent_data hdmi_parents[] = { + CCU_PARENT_HW(pll1_d6_409p6), + CCU_PARENT_HW(pll1_d5_491p52), + CCU_PARENT_HW(pll1_d4_614p4), + CCU_PARENT_HW(pll1_d8_307p2), +}; +CCU_MUX_DIV_GATE_FC_DEFINE(hdmi_mclk, hdmi_parents, APMU_HDMI_CLK_RES_CTRL, 1, 4, BIT(29), 5, + 3, BIT(0), 0); + +CCU_GATE_DEFINE(pcie0_master_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_0, BIT(2), 0); +CCU_GATE_DEFINE(pcie0_slave_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_0, BIT(1), 0); +CCU_GATE_DEFINE(pcie0_dbi_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_0, BIT(0), 0); + +CCU_GATE_DEFINE(pcie1_master_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_1, BIT(2), 0); +CCU_GATE_DEFINE(pcie1_slave_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_1, BIT(1), 0); +CCU_GATE_DEFINE(pcie1_dbi_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_1, BIT(0), 0); + +CCU_GATE_DEFINE(pcie2_master_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_2, BIT(2), 0); +CCU_GATE_DEFINE(pcie2_slave_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_2, BIT(1), 0); +CCU_GATE_DEFINE(pcie2_dbi_clk, CCU_PARENT_HW(pmua_aclk), APMU_PCIE_CLK_RES_CTRL_2, BIT(0), 0); + +CCU_GATE_DEFINE(emac0_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_EMAC0_CLK_RES_CTRL, BIT(0), 0); +CCU_GATE_DEFINE(emac0_ptp_clk, CCU_PARENT_HW(pll2_d6), APMU_EMAC0_CLK_RES_CTRL, BIT(15), 0); +CCU_GATE_DEFINE(emac1_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_EMAC1_CLK_RES_CTRL, BIT(0), 0); +CCU_GATE_DEFINE(emac1_ptp_clk, CCU_PARENT_HW(pll2_d6), APMU_EMAC1_CLK_RES_CTRL, BIT(15), 0); + +CCU_GATE_DEFINE(emmc_bus_clk, CCU_PARENT_HW(pmua_aclk), APMU_PMUA_EM_CLK_RES_CTRL, BIT(3), 0); +/* APMU clocks end */ + +static struct clk_hw *k1_ccu_pll_hws[] = { + [CLK_PLL1] = &pll1.common.hw, + [CLK_PLL2] = &pll2.common.hw, + [CLK_PLL3] = &pll3.common.hw, + [CLK_PLL1_D2] = &pll1_d2.common.hw, + [CLK_PLL1_D3] = &pll1_d3.common.hw, + [CLK_PLL1_D4] = &pll1_d4.common.hw, + [CLK_PLL1_D5] = &pll1_d5.common.hw, + [CLK_PLL1_D6] = &pll1_d6.common.hw, + [CLK_PLL1_D7] = &pll1_d7.common.hw, + [CLK_PLL1_D8] = &pll1_d8.common.hw, + [CLK_PLL1_D11] = &pll1_d11_223p4.common.hw, + [CLK_PLL1_D13] = &pll1_d13_189.common.hw, + [CLK_PLL1_D23] = &pll1_d23_106p8.common.hw, + [CLK_PLL1_D64] = &pll1_d64_38p4.common.hw, + [CLK_PLL1_D10_AUD] = &pll1_aud_245p7.common.hw, + [CLK_PLL1_D100_AUD] = &pll1_aud_24p5.common.hw, + [CLK_PLL2_D1] = &pll2_d1.common.hw, + [CLK_PLL2_D2] = &pll2_d2.common.hw, + [CLK_PLL2_D3] = &pll2_d3.common.hw, + [CLK_PLL2_D4] = &pll2_d4.common.hw, + [CLK_PLL2_D5] = &pll2_d5.common.hw, + [CLK_PLL2_D6] = &pll2_d6.common.hw, + [CLK_PLL2_D7] = &pll2_d7.common.hw, + [CLK_PLL2_D8] = &pll2_d8.common.hw, + [CLK_PLL3_D1] = &pll3_d1.common.hw, + [CLK_PLL3_D2] = &pll3_d2.common.hw, + [CLK_PLL3_D3] = &pll3_d3.common.hw, + [CLK_PLL3_D4] = &pll3_d4.common.hw, + [CLK_PLL3_D5] = &pll3_d5.common.hw, + [CLK_PLL3_D6] = &pll3_d6.common.hw, + [CLK_PLL3_D7] = &pll3_d7.common.hw, + [CLK_PLL3_D8] = &pll3_d8.common.hw, + [CLK_PLL3_80] = &pll3_80.common.hw, + [CLK_PLL3_40] = &pll3_40.common.hw, + [CLK_PLL3_20] = &pll3_20.common.hw, +}; + +static const struct spacemit_ccu_data k1_ccu_pll_data = { + .hws = k1_ccu_pll_hws, + .num = ARRAY_SIZE(k1_ccu_pll_hws), +}; + +static struct clk_hw *k1_ccu_mpmu_hws[] = { + [CLK_PLL1_307P2] = &pll1_d8_307p2.common.hw, + [CLK_PLL1_76P8] = &pll1_d32_76p8.common.hw, + [CLK_PLL1_61P44] = &pll1_d40_61p44.common.hw, + [CLK_PLL1_153P6] = &pll1_d16_153p6.common.hw, + [CLK_PLL1_102P4] = &pll1_d24_102p4.common.hw, + [CLK_PLL1_51P2] = &pll1_d48_51p2.common.hw, + [CLK_PLL1_51P2_AP] = &pll1_d48_51p2_ap.common.hw, + [CLK_PLL1_57P6] = &pll1_m3d128_57p6.common.hw, + [CLK_PLL1_25P6] = &pll1_d96_25p6.common.hw, + [CLK_PLL1_12P8] = &pll1_d192_12p8.common.hw, + [CLK_PLL1_12P8_WDT] = &pll1_d192_12p8_wdt.common.hw, + [CLK_PLL1_6P4] = &pll1_d384_6p4.common.hw, + [CLK_PLL1_3P2] = &pll1_d768_3p2.common.hw, + [CLK_PLL1_1P6] = &pll1_d1536_1p6.common.hw, + [CLK_PLL1_0P8] = &pll1_d3072_0p8.common.hw, + [CLK_PLL1_409P6] = &pll1_d6_409p6.common.hw, + [CLK_PLL1_204P8] = &pll1_d12_204p8.common.hw, + [CLK_PLL1_491] = &pll1_d5_491p52.common.hw, + [CLK_PLL1_245P76] = &pll1_d10_245p76.common.hw, + [CLK_PLL1_614] = &pll1_d4_614p4.common.hw, + [CLK_PLL1_47P26] = &pll1_d52_47p26.common.hw, + [CLK_PLL1_31P5] = &pll1_d78_31p5.common.hw, + [CLK_PLL1_819] = &pll1_d3_819p2.common.hw, + [CLK_PLL1_1228] = &pll1_d2_1228p8.common.hw, + [CLK_SLOW_UART] = &slow_uart.common.hw, + [CLK_SLOW_UART1] = &slow_uart1_14p74.common.hw, + [CLK_SLOW_UART2] = &slow_uart2_48.common.hw, + [CLK_WDT] = &wdt_clk.common.hw, + [CLK_RIPC] = &ripc_clk.common.hw, + [CLK_I2S_SYSCLK] = &i2s_sysclk.common.hw, + [CLK_I2S_BCLK] = &i2s_bclk.common.hw, + [CLK_APB] = &apb_clk.common.hw, + [CLK_WDT_BUS] = &wdt_bus_clk.common.hw, +}; + +static const struct spacemit_ccu_data k1_ccu_mpmu_data = { + .hws = k1_ccu_mpmu_hws, + .num = ARRAY_SIZE(k1_ccu_mpmu_hws), +}; + +static struct clk_hw *k1_ccu_apbc_hws[] = { + [CLK_UART0] = &uart0_clk.common.hw, + [CLK_UART2] = &uart2_clk.common.hw, + [CLK_UART3] = &uart3_clk.common.hw, + [CLK_UART4] = &uart4_clk.common.hw, + [CLK_UART5] = &uart5_clk.common.hw, + [CLK_UART6] = &uart6_clk.common.hw, + [CLK_UART7] = &uart7_clk.common.hw, + [CLK_UART8] = &uart8_clk.common.hw, + [CLK_UART9] = &uart9_clk.common.hw, + [CLK_GPIO] = &gpio_clk.common.hw, + [CLK_PWM0] = &pwm0_clk.common.hw, + [CLK_PWM1] = &pwm1_clk.common.hw, + [CLK_PWM2] = &pwm2_clk.common.hw, + [CLK_PWM3] = &pwm3_clk.common.hw, + [CLK_PWM4] = &pwm4_clk.common.hw, + [CLK_PWM5] = &pwm5_clk.common.hw, + [CLK_PWM6] = &pwm6_clk.common.hw, + [CLK_PWM7] = &pwm7_clk.common.hw, + [CLK_PWM8] = &pwm8_clk.common.hw, + [CLK_PWM9] = &pwm9_clk.common.hw, + [CLK_PWM10] = &pwm10_clk.common.hw, + [CLK_PWM11] = &pwm11_clk.common.hw, + [CLK_PWM12] = &pwm12_clk.common.hw, + [CLK_PWM13] = &pwm13_clk.common.hw, + [CLK_PWM14] = &pwm14_clk.common.hw, + [CLK_PWM15] = &pwm15_clk.common.hw, + [CLK_PWM16] = &pwm16_clk.common.hw, + [CLK_PWM17] = &pwm17_clk.common.hw, + [CLK_PWM18] = &pwm18_clk.common.hw, + [CLK_PWM19] = &pwm19_clk.common.hw, + [CLK_SSP3] = &ssp3_clk.common.hw, + [CLK_RTC] = &rtc_clk.common.hw, + [CLK_TWSI0] = &twsi0_clk.common.hw, + [CLK_TWSI1] = &twsi1_clk.common.hw, + [CLK_TWSI2] = &twsi2_clk.common.hw, + [CLK_TWSI4] = &twsi4_clk.common.hw, + [CLK_TWSI5] = &twsi5_clk.common.hw, + [CLK_TWSI6] = &twsi6_clk.common.hw, + [CLK_TWSI7] = &twsi7_clk.common.hw, + [CLK_TWSI8] = &twsi8_clk.common.hw, + [CLK_TIMERS1] = &timers1_clk.common.hw, + [CLK_TIMERS2] = &timers2_clk.common.hw, + [CLK_AIB] = &aib_clk.common.hw, + [CLK_ONEWIRE] = &onewire_clk.common.hw, + [CLK_SSPA0] = &sspa0_clk.common.hw, + [CLK_SSPA1] = &sspa1_clk.common.hw, + [CLK_DRO] = &dro_clk.common.hw, + [CLK_IR] = &ir_clk.common.hw, + [CLK_TSEN] = &tsen_clk.common.hw, + [CLK_IPC_AP2AUD] = &ipc_ap2aud_clk.common.hw, + [CLK_CAN0] = &can0_clk.common.hw, + [CLK_CAN0_BUS] = &can0_bus_clk.common.hw, + [CLK_UART0_BUS] = &uart0_bus_clk.common.hw, + [CLK_UART2_BUS] = &uart2_bus_clk.common.hw, + [CLK_UART3_BUS] = &uart3_bus_clk.common.hw, + [CLK_UART4_BUS] = &uart4_bus_clk.common.hw, + [CLK_UART5_BUS] = &uart5_bus_clk.common.hw, + [CLK_UART6_BUS] = &uart6_bus_clk.common.hw, + [CLK_UART7_BUS] = &uart7_bus_clk.common.hw, + [CLK_UART8_BUS] = &uart8_bus_clk.common.hw, + [CLK_UART9_BUS] = &uart9_bus_clk.common.hw, + [CLK_GPIO_BUS] = &gpio_bus_clk.common.hw, + [CLK_PWM0_BUS] = &pwm0_bus_clk.common.hw, + [CLK_PWM1_BUS] = &pwm1_bus_clk.common.hw, + [CLK_PWM2_BUS] = &pwm2_bus_clk.common.hw, + [CLK_PWM3_BUS] = &pwm3_bus_clk.common.hw, + [CLK_PWM4_BUS] = &pwm4_bus_clk.common.hw, + [CLK_PWM5_BUS] = &pwm5_bus_clk.common.hw, + [CLK_PWM6_BUS] = &pwm6_bus_clk.common.hw, + [CLK_PWM7_BUS] = &pwm7_bus_clk.common.hw, + [CLK_PWM8_BUS] = &pwm8_bus_clk.common.hw, + [CLK_PWM9_BUS] = &pwm9_bus_clk.common.hw, + [CLK_PWM10_BUS] = &pwm10_bus_clk.common.hw, + [CLK_PWM11_BUS] = &pwm11_bus_clk.common.hw, + [CLK_PWM12_BUS] = &pwm12_bus_clk.common.hw, + [CLK_PWM13_BUS] = &pwm13_bus_clk.common.hw, + [CLK_PWM14_BUS] = &pwm14_bus_clk.common.hw, + [CLK_PWM15_BUS] = &pwm15_bus_clk.common.hw, + [CLK_PWM16_BUS] = &pwm16_bus_clk.common.hw, + [CLK_PWM17_BUS] = &pwm17_bus_clk.common.hw, + [CLK_PWM18_BUS] = &pwm18_bus_clk.common.hw, + [CLK_PWM19_BUS] = &pwm19_bus_clk.common.hw, + [CLK_SSP3_BUS] = &ssp3_bus_clk.common.hw, + [CLK_RTC_BUS] = &rtc_bus_clk.common.hw, + [CLK_TWSI0_BUS] = &twsi0_bus_clk.common.hw, + [CLK_TWSI1_BUS] = &twsi1_bus_clk.common.hw, + [CLK_TWSI2_BUS] = &twsi2_bus_clk.common.hw, + [CLK_TWSI4_BUS] = &twsi4_bus_clk.common.hw, + [CLK_TWSI5_BUS] = &twsi5_bus_clk.common.hw, + [CLK_TWSI6_BUS] = &twsi6_bus_clk.common.hw, + [CLK_TWSI7_BUS] = &twsi7_bus_clk.common.hw, + [CLK_TWSI8_BUS] = &twsi8_bus_clk.common.hw, + [CLK_TIMERS1_BUS] = &timers1_bus_clk.common.hw, + [CLK_TIMERS2_BUS] = &timers2_bus_clk.common.hw, + [CLK_AIB_BUS] = &aib_bus_clk.common.hw, + [CLK_ONEWIRE_BUS] = &onewire_bus_clk.common.hw, + [CLK_SSPA0_BUS] = &sspa0_bus_clk.common.hw, + [CLK_SSPA1_BUS] = &sspa1_bus_clk.common.hw, + [CLK_TSEN_BUS] = &tsen_bus_clk.common.hw, + [CLK_IPC_AP2AUD_BUS] = &ipc_ap2aud_bus_clk.common.hw, +}; + +static const struct spacemit_ccu_data k1_ccu_apbc_data = { + .hws = k1_ccu_apbc_hws, + .num = ARRAY_SIZE(k1_ccu_apbc_hws), +}; + +static struct clk_hw *k1_ccu_apmu_hws[] = { + [CLK_CCI550] = &cci550_clk.common.hw, + [CLK_CPU_C0_HI] = &cpu_c0_hi_clk.common.hw, + [CLK_CPU_C0_CORE] = &cpu_c0_core_clk.common.hw, + [CLK_CPU_C0_ACE] = &cpu_c0_ace_clk.common.hw, + [CLK_CPU_C0_TCM] = &cpu_c0_tcm_clk.common.hw, + [CLK_CPU_C1_HI] = &cpu_c1_hi_clk.common.hw, + [CLK_CPU_C1_CORE] = &cpu_c1_core_clk.common.hw, + [CLK_CPU_C1_ACE] = &cpu_c1_ace_clk.common.hw, + [CLK_CCIC_4X] = &ccic_4x_clk.common.hw, + [CLK_CCIC1PHY] = &ccic1phy_clk.common.hw, + [CLK_SDH_AXI] = &sdh_axi_aclk.common.hw, + [CLK_SDH0] = &sdh0_clk.common.hw, + [CLK_SDH1] = &sdh1_clk.common.hw, + [CLK_SDH2] = &sdh2_clk.common.hw, + [CLK_USB_P1] = &usb_p1_aclk.common.hw, + [CLK_USB_AXI] = &usb_axi_clk.common.hw, + [CLK_USB30] = &usb30_clk.common.hw, + [CLK_QSPI] = &qspi_clk.common.hw, + [CLK_QSPI_BUS] = &qspi_bus_clk.common.hw, + [CLK_DMA] = &dma_clk.common.hw, + [CLK_AES] = &aes_clk.common.hw, + [CLK_VPU] = &vpu_clk.common.hw, + [CLK_GPU] = &gpu_clk.common.hw, + [CLK_EMMC] = &emmc_clk.common.hw, + [CLK_EMMC_X] = &emmc_x_clk.common.hw, + [CLK_AUDIO] = &audio_clk.common.hw, + [CLK_HDMI] = &hdmi_mclk.common.hw, + [CLK_PMUA_ACLK] = &pmua_aclk.common.hw, + [CLK_PCIE0_MASTER] = &pcie0_master_clk.common.hw, + [CLK_PCIE0_SLAVE] = &pcie0_slave_clk.common.hw, + [CLK_PCIE0_DBI] = &pcie0_dbi_clk.common.hw, + [CLK_PCIE1_MASTER] = &pcie1_master_clk.common.hw, + [CLK_PCIE1_SLAVE] = &pcie1_slave_clk.common.hw, + [CLK_PCIE1_DBI] = &pcie1_dbi_clk.common.hw, + [CLK_PCIE2_MASTER] = &pcie2_master_clk.common.hw, + [CLK_PCIE2_SLAVE] = &pcie2_slave_clk.common.hw, + [CLK_PCIE2_DBI] = &pcie2_dbi_clk.common.hw, + [CLK_EMAC0_BUS] = &emac0_bus_clk.common.hw, + [CLK_EMAC0_PTP] = &emac0_ptp_clk.common.hw, + [CLK_EMAC1_BUS] = &emac1_bus_clk.common.hw, + [CLK_EMAC1_PTP] = &emac1_ptp_clk.common.hw, + [CLK_JPG] = &jpg_clk.common.hw, + [CLK_CCIC2PHY] = &ccic2phy_clk.common.hw, + [CLK_CCIC3PHY] = &ccic3phy_clk.common.hw, + [CLK_CSI] = &csi_clk.common.hw, + [CLK_CAMM0] = &camm0_clk.common.hw, + [CLK_CAMM1] = &camm1_clk.common.hw, + [CLK_CAMM2] = &camm2_clk.common.hw, + [CLK_ISP_CPP] = &isp_cpp_clk.common.hw, + [CLK_ISP_BUS] = &isp_bus_clk.common.hw, + [CLK_ISP] = &isp_clk.common.hw, + [CLK_DPU_MCLK] = &dpu_mclk.common.hw, + [CLK_DPU_ESC] = &dpu_esc_clk.common.hw, + [CLK_DPU_BIT] = &dpu_bit_clk.common.hw, + [CLK_DPU_PXCLK] = &dpu_pxclk.common.hw, + [CLK_DPU_HCLK] = &dpu_hclk.common.hw, + [CLK_DPU_SPI] = &dpu_spi_clk.common.hw, + [CLK_DPU_SPI_HBUS] = &dpu_spi_hbus_clk.common.hw, + [CLK_DPU_SPIBUS] = &dpu_spi_bus_clk.common.hw, + [CLK_DPU_SPI_ACLK] = &dpu_spi_aclk.common.hw, + [CLK_V2D] = &v2d_clk.common.hw, + [CLK_EMMC_BUS] = &emmc_bus_clk.common.hw, +}; + +static const struct spacemit_ccu_data k1_ccu_apmu_data = { + .hws = k1_ccu_apmu_hws, + .num = ARRAY_SIZE(k1_ccu_apmu_hws), +}; + +static int spacemit_ccu_register(struct device *dev, + struct regmap *regmap, + struct regmap *lock_regmap, + const struct spacemit_ccu_data *data) +{ + struct clk_hw_onecell_data *clk_data; + int i, ret; + + clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, data->num), + GFP_KERNEL); + if (!clk_data) + return -ENOMEM; + + for (i = 0; i < data->num; i++) { + struct clk_hw *hw = data->hws[i]; + struct ccu_common *common; + const char *name; + + if (!hw) { + clk_data->hws[i] = ERR_PTR(-ENOENT); + continue; + } + + name = hw->init->name; + + common = hw_to_ccu_common(hw); + common->regmap = regmap; + common->lock_regmap = lock_regmap; + + ret = devm_clk_hw_register(dev, hw); + if (ret) { + dev_err(dev, "Cannot register clock %d - %s\n", + i, name); + return ret; + } + + clk_data->hws[i] = hw; + } + + clk_data->num = data->num; + + ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data); + if (ret) + dev_err(dev, "failed to add clock hardware provider (%d)\n", ret); + + return ret; +} + +static int k1_ccu_probe(struct platform_device *pdev) +{ + struct regmap *base_regmap, *lock_regmap = NULL; + struct device *dev = &pdev->dev; + int ret; + + base_regmap = device_node_to_regmap(dev->of_node); + if (IS_ERR(base_regmap)) + return dev_err_probe(dev, PTR_ERR(base_regmap), + "failed to get regmap\n"); + + /* + * The lock status of PLLs locate in MPMU region, while PLLs themselves + * are in APBS region. Reference to MPMU syscon is required to check PLL + * status. + */ + if (of_device_is_compatible(dev->of_node, "spacemit,k1-pll")) { + struct device_node *mpmu = of_parse_phandle(dev->of_node, + "spacemit,mpmu", 0); + if (!mpmu) + return dev_err_probe(dev, -ENODEV, + "Cannot parse MPMU region\n"); + + lock_regmap = device_node_to_regmap(mpmu); + of_node_put(mpmu); + + if (IS_ERR(lock_regmap)) + return dev_err_probe(dev, PTR_ERR(lock_regmap), + "failed to get lock regmap\n"); + } + + ret = spacemit_ccu_register(dev, base_regmap, lock_regmap, + of_device_get_match_data(dev)); + if (ret) + return dev_err_probe(dev, ret, "failed to register clocks\n"); + + return 0; +} + +static const struct of_device_id of_k1_ccu_match[] = { + { + .compatible = "spacemit,k1-pll", + .data = &k1_ccu_pll_data, + }, + { + .compatible = "spacemit,k1-syscon-mpmu", + .data = &k1_ccu_mpmu_data, + }, + { + .compatible = "spacemit,k1-syscon-apbc", + .data = &k1_ccu_apbc_data, + }, + { + .compatible = "spacemit,k1-syscon-apmu", + .data = &k1_ccu_apmu_data, + }, + { } +}; +MODULE_DEVICE_TABLE(of, of_k1_ccu_match); + +static struct platform_driver k1_ccu_driver = { + .driver = { + .name = "spacemit,k1-ccu", + .of_match_table = of_k1_ccu_match, + }, + .probe = k1_ccu_probe, +}; +module_platform_driver(k1_ccu_driver); + +MODULE_DESCRIPTION("SpacemiT K1 CCU driver"); +MODULE_AUTHOR("Haylen Chu <heylenay@4d2.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/spacemit/ccu_common.h b/drivers/clk/spacemit/ccu_common.h new file mode 100644 index 000000000000..da72f3836e0b --- /dev/null +++ b/drivers/clk/spacemit/ccu_common.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024 SpacemiT Technology Co. Ltd + * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org> + */ + +#ifndef _CCU_COMMON_H_ +#define _CCU_COMMON_H_ + +#include <linux/regmap.h> + +struct ccu_common { + struct regmap *regmap; + struct regmap *lock_regmap; + + union { + /* For DDN and MIX */ + struct { + u32 reg_ctrl; + u32 reg_fc; + u32 mask_fc; + }; + + /* For PLL */ + struct { + u32 reg_swcr1; + u32 reg_swcr3; + }; + }; + + struct clk_hw hw; +}; + +static inline struct ccu_common *hw_to_ccu_common(struct clk_hw *hw) +{ + return container_of(hw, struct ccu_common, hw); +} + +#define ccu_read(c, reg) \ + ({ \ + u32 tmp; \ + regmap_read((c)->regmap, (c)->reg_##reg, &tmp); \ + tmp; \ + }) +#define ccu_update(c, reg, mask, val) \ + regmap_update_bits((c)->regmap, (c)->reg_##reg, mask, val) + +#endif /* _CCU_COMMON_H_ */ diff --git a/drivers/clk/spacemit/ccu_ddn.c b/drivers/clk/spacemit/ccu_ddn.c new file mode 100644 index 000000000000..be311b045698 --- /dev/null +++ b/drivers/clk/spacemit/ccu_ddn.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024 SpacemiT Technology Co. Ltd + * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org> + * + * DDN stands for "Divider Denominator Numerator", it's M/N clock with a + * constant x2 factor. This clock hardware follows the equation below, + * + * numerator Fin + * 2 * ------------- = ------- + * denominator Fout + * + * Thus, Fout could be calculated with, + * + * Fin denominator + * Fout = ----- * ------------- + * 2 numerator + */ + +#include <linux/clk-provider.h> +#include <linux/rational.h> + +#include "ccu_ddn.h" + +static unsigned long ccu_ddn_calc_rate(unsigned long prate, + unsigned long num, unsigned long den) +{ + return prate * den / 2 / num; +} + +static unsigned long ccu_ddn_calc_best_rate(struct ccu_ddn *ddn, + unsigned long rate, unsigned long prate, + unsigned long *num, unsigned long *den) +{ + rational_best_approximation(rate, prate / 2, + ddn->den_mask >> ddn->den_shift, + ddn->num_mask >> ddn->num_shift, + den, num); + return ccu_ddn_calc_rate(prate, *num, *den); +} + +static long ccu_ddn_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); + unsigned long num, den; + + return ccu_ddn_calc_best_rate(ddn, rate, *prate, &num, &den); +} + +static unsigned long ccu_ddn_recalc_rate(struct clk_hw *hw, unsigned long prate) +{ + struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); + unsigned int val, num, den; + + val = ccu_read(&ddn->common, ctrl); + + num = (val & ddn->num_mask) >> ddn->num_shift; + den = (val & ddn->den_mask) >> ddn->den_shift; + + return ccu_ddn_calc_rate(prate, num, den); +} + +static int ccu_ddn_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long prate) +{ + struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); + unsigned long num, den; + + ccu_ddn_calc_best_rate(ddn, rate, prate, &num, &den); + + ccu_update(&ddn->common, ctrl, + ddn->num_mask | ddn->den_mask, + (num << ddn->num_shift) | (den << ddn->den_shift)); + + return 0; +} + +const struct clk_ops spacemit_ccu_ddn_ops = { + .recalc_rate = ccu_ddn_recalc_rate, + .round_rate = ccu_ddn_round_rate, + .set_rate = ccu_ddn_set_rate, +}; diff --git a/drivers/clk/spacemit/ccu_ddn.h b/drivers/clk/spacemit/ccu_ddn.h new file mode 100644 index 000000000000..a52fabe77d62 --- /dev/null +++ b/drivers/clk/spacemit/ccu_ddn.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024 SpacemiT Technology Co. Ltd + * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org> + */ + +#ifndef _CCU_DDN_H_ +#define _CCU_DDN_H_ + +#include <linux/bitops.h> +#include <linux/clk-provider.h> + +#include "ccu_common.h" + +struct ccu_ddn { + struct ccu_common common; + unsigned int num_mask; + unsigned int num_shift; + unsigned int den_mask; + unsigned int den_shift; +}; + +#define CCU_DDN_INIT(_name, _parent, _flags) \ + CLK_HW_INIT_HW(#_name, &_parent.common.hw, &spacemit_ccu_ddn_ops, _flags) + +#define CCU_DDN_DEFINE(_name, _parent, _reg_ctrl, _num_shift, _num_width, \ + _den_shift, _den_width, _flags) \ +static struct ccu_ddn _name = { \ + .common = { \ + .reg_ctrl = _reg_ctrl, \ + .hw.init = CCU_DDN_INIT(_name, _parent, _flags), \ + }, \ + .num_mask = GENMASK(_num_shift + _num_width - 1, _num_shift), \ + .num_shift = _num_shift, \ + .den_mask = GENMASK(_den_shift + _den_width - 1, _den_shift), \ + .den_shift = _den_shift, \ +} + +static inline struct ccu_ddn *hw_to_ccu_ddn(struct clk_hw *hw) +{ + struct ccu_common *common = hw_to_ccu_common(hw); + + return container_of(common, struct ccu_ddn, common); +} + +extern const struct clk_ops spacemit_ccu_ddn_ops; + +#endif diff --git a/drivers/clk/spacemit/ccu_mix.c b/drivers/clk/spacemit/ccu_mix.c new file mode 100644 index 000000000000..9b852aa61f78 --- /dev/null +++ b/drivers/clk/spacemit/ccu_mix.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024 SpacemiT Technology Co. Ltd + * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org> + * + * MIX clock type is the combination of mux, factor or divider, and gate + */ + +#include <linux/clk-provider.h> + +#include "ccu_mix.h" + +#define MIX_FC_TIMEOUT_US 10000 +#define MIX_FC_DELAY_US 5 + +static void ccu_gate_disable(struct clk_hw *hw) +{ + struct ccu_mix *mix = hw_to_ccu_mix(hw); + + ccu_update(&mix->common, ctrl, mix->gate.mask, 0); +} + +static int ccu_gate_enable(struct clk_hw *hw) +{ + struct ccu_mix *mix = hw_to_ccu_mix(hw); + struct ccu_gate_config *gate = &mix->gate; + + ccu_update(&mix->common, ctrl, gate->mask, gate->mask); + + return 0; +} + +static int ccu_gate_is_enabled(struct clk_hw *hw) +{ + struct ccu_mix *mix = hw_to_ccu_mix(hw); + struct ccu_gate_config *gate = &mix->gate; + + return (ccu_read(&mix->common, ctrl) & gate->mask) == gate->mask; +} + +static unsigned long ccu_factor_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ccu_mix *mix = hw_to_ccu_mix(hw); + + return parent_rate * mix->factor.mul / mix->factor.div; +} + +static unsigned long ccu_div_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ccu_mix *mix = hw_to_ccu_mix(hw); + struct ccu_div_config *div = &mix->div; + unsigned long val; + + val = ccu_read(&mix->common, ctrl) >> div->shift; + val &= (1 << div->width) - 1; + + return divider_recalc_rate(hw, parent_rate, val, NULL, 0, div->width); +} + +/* + * Some clocks require a "FC" (frequency change) bit to be set after changing + * their rates or reparenting. This bit will be automatically cleared by + * hardware in MIX_FC_TIMEOUT_US, which indicates the operation is completed. + */ +static int ccu_mix_trigger_fc(struct clk_hw *hw) +{ + struct ccu_common *common = hw_to_ccu_common(hw); + unsigned int val; + + if (common->reg_fc) + return 0; + + ccu_update(common, fc, common->mask_fc, common->mask_fc); + + return regmap_read_poll_timeout_atomic(common->regmap, common->reg_fc, + val, !(val & common->mask_fc), + MIX_FC_DELAY_US, + MIX_FC_TIMEOUT_US); +} + +static long ccu_factor_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + return ccu_factor_recalc_rate(hw, *prate); +} + +static int ccu_factor_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + return 0; +} + +static unsigned long +ccu_mix_calc_best_rate(struct clk_hw *hw, unsigned long rate, + struct clk_hw **best_parent, + unsigned long *best_parent_rate, + u32 *div_val) +{ + struct ccu_mix *mix = hw_to_ccu_mix(hw); + unsigned int parent_num = clk_hw_get_num_parents(hw); + struct ccu_div_config *div = &mix->div; + u32 div_max = 1 << div->width; + unsigned long best_rate = 0; + + for (int i = 0; i < parent_num; i++) { + struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i); + unsigned long parent_rate; + + if (!parent) + continue; + + parent_rate = clk_hw_get_rate(parent); + + for (int j = 1; j <= div_max; j++) { + unsigned long tmp = DIV_ROUND_CLOSEST_ULL(parent_rate, j); + + if (abs(tmp - rate) < abs(best_rate - rate)) { + best_rate = tmp; + + if (div_val) + *div_val = j - 1; + + if (best_parent) { + *best_parent = parent; + *best_parent_rate = parent_rate; + } + } + } + } + + return best_rate; +} + +static int ccu_mix_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + req->rate = ccu_mix_calc_best_rate(hw, req->rate, + &req->best_parent_hw, + &req->best_parent_rate, + NULL); + return 0; +} + +static int ccu_mix_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct ccu_mix *mix = hw_to_ccu_mix(hw); + struct ccu_common *common = &mix->common; + struct ccu_div_config *div = &mix->div; + u32 current_div, target_div, mask; + + ccu_mix_calc_best_rate(hw, rate, NULL, NULL, &target_div); + + current_div = ccu_read(common, ctrl) >> div->shift; + current_div &= (1 << div->width) - 1; + + if (current_div == target_div) + return 0; + + mask = GENMASK(div->width + div->shift - 1, div->shift); + + ccu_update(common, ctrl, mask, target_div << div->shift); + + return ccu_mix_trigger_fc(hw); +} + +static u8 ccu_mux_get_parent(struct clk_hw *hw) +{ + struct ccu_mix *mix = hw_to_ccu_mix(hw); + struct ccu_mux_config *mux = &mix->mux; + u8 parent; + + parent = ccu_read(&mix->common, ctrl) >> mux->shift; + parent &= (1 << mux->width) - 1; + + return parent; +} + +static int ccu_mux_set_parent(struct clk_hw *hw, u8 index) +{ + struct ccu_mix *mix = hw_to_ccu_mix(hw); + struct ccu_mux_config *mux = &mix->mux; + u32 mask; + + mask = GENMASK(mux->width + mux->shift - 1, mux->shift); + + ccu_update(&mix->common, ctrl, mask, index << mux->shift); + + return ccu_mix_trigger_fc(hw); +} + +const struct clk_ops spacemit_ccu_gate_ops = { + .disable = ccu_gate_disable, + .enable = ccu_gate_enable, + .is_enabled = ccu_gate_is_enabled, +}; + +const struct clk_ops spacemit_ccu_factor_ops = { + .round_rate = ccu_factor_round_rate, + .recalc_rate = ccu_factor_recalc_rate, + .set_rate = ccu_factor_set_rate, +}; + +const struct clk_ops spacemit_ccu_mux_ops = { + .determine_rate = ccu_mix_determine_rate, + .get_parent = ccu_mux_get_parent, + .set_parent = ccu_mux_set_parent, +}; + +const struct clk_ops spacemit_ccu_div_ops = { + .determine_rate = ccu_mix_determine_rate, + .recalc_rate = ccu_div_recalc_rate, + .set_rate = ccu_mix_set_rate, +}; + +const struct clk_ops spacemit_ccu_factor_gate_ops = { + .disable = ccu_gate_disable, + .enable = ccu_gate_enable, + .is_enabled = ccu_gate_is_enabled, + + .round_rate = ccu_factor_round_rate, + .recalc_rate = ccu_factor_recalc_rate, + .set_rate = ccu_factor_set_rate, +}; + +const struct clk_ops spacemit_ccu_mux_gate_ops = { + .disable = ccu_gate_disable, + .enable = ccu_gate_enable, + .is_enabled = ccu_gate_is_enabled, + + .determine_rate = ccu_mix_determine_rate, + .get_parent = ccu_mux_get_parent, + .set_parent = ccu_mux_set_parent, +}; + +const struct clk_ops spacemit_ccu_div_gate_ops = { + .disable = ccu_gate_disable, + .enable = ccu_gate_enable, + .is_enabled = ccu_gate_is_enabled, + + .determine_rate = ccu_mix_determine_rate, + .recalc_rate = ccu_div_recalc_rate, + .set_rate = ccu_mix_set_rate, +}; + +const struct clk_ops spacemit_ccu_mux_div_gate_ops = { + .disable = ccu_gate_disable, + .enable = ccu_gate_enable, + .is_enabled = ccu_gate_is_enabled, + + .get_parent = ccu_mux_get_parent, + .set_parent = ccu_mux_set_parent, + + .determine_rate = ccu_mix_determine_rate, + .recalc_rate = ccu_div_recalc_rate, + .set_rate = ccu_mix_set_rate, +}; + +const struct clk_ops spacemit_ccu_mux_div_ops = { + .get_parent = ccu_mux_get_parent, + .set_parent = ccu_mux_set_parent, + + .determine_rate = ccu_mix_determine_rate, + .recalc_rate = ccu_div_recalc_rate, + .set_rate = ccu_mix_set_rate, +}; diff --git a/drivers/clk/spacemit/ccu_mix.h b/drivers/clk/spacemit/ccu_mix.h new file mode 100644 index 000000000000..51d19f5d6aac --- /dev/null +++ b/drivers/clk/spacemit/ccu_mix.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024 SpacemiT Technology Co. Ltd + * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org> + */ + +#ifndef _CCU_MIX_H_ +#define _CCU_MIX_H_ + +#include <linux/clk-provider.h> + +#include "ccu_common.h" + +/** + * struct ccu_gate_config - Gate configuration + * + * @mask: Mask to enable the gate. Some clocks may have more than one bit + * set in this field. + */ +struct ccu_gate_config { + u32 mask; +}; + +struct ccu_factor_config { + u32 div; + u32 mul; +}; + +struct ccu_mux_config { + u8 shift; + u8 width; +}; + +struct ccu_div_config { + u8 shift; + u8 width; +}; + +struct ccu_mix { + struct ccu_factor_config factor; + struct ccu_gate_config gate; + struct ccu_div_config div; + struct ccu_mux_config mux; + struct ccu_common common; +}; + +#define CCU_GATE_INIT(_mask) { .mask = _mask } +#define CCU_FACTOR_INIT(_div, _mul) { .div = _div, .mul = _mul } +#define CCU_MUX_INIT(_shift, _width) { .shift = _shift, .width = _width } +#define CCU_DIV_INIT(_shift, _width) { .shift = _shift, .width = _width } + +#define CCU_PARENT_HW(_parent) { .hw = &_parent.common.hw } +#define CCU_PARENT_NAME(_name) { .fw_name = #_name } + +#define CCU_MIX_INITHW(_name, _parent, _ops, _flags) \ + .hw.init = &(struct clk_init_data) { \ + .flags = _flags, \ + .name = #_name, \ + .parent_data = (const struct clk_parent_data[]) \ + { _parent }, \ + .num_parents = 1, \ + .ops = &_ops, \ + } + +#define CCU_MIX_INITHW_PARENTS(_name, _parents, _ops, _flags) \ + .hw.init = CLK_HW_INIT_PARENTS_DATA(#_name, _parents, &_ops, _flags) + +#define CCU_GATE_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _flags) \ +static struct ccu_mix _name = { \ + .gate = CCU_GATE_INIT(_mask_gate), \ + .common = { \ + .reg_ctrl = _reg_ctrl, \ + CCU_MIX_INITHW(_name, _parent, spacemit_ccu_gate_ops, _flags), \ + } \ +} + +#define CCU_FACTOR_DEFINE(_name, _parent, _div, _mul) \ +static struct ccu_mix _name = { \ + .factor = CCU_FACTOR_INIT(_div, _mul), \ + .common = { \ + CCU_MIX_INITHW(_name, _parent, spacemit_ccu_factor_ops, 0), \ + } \ +} + +#define CCU_MUX_DEFINE(_name, _parents, _reg_ctrl, _shift, _width, _flags) \ +static struct ccu_mix _name = { \ + .mux = CCU_MUX_INIT(_shift, _width), \ + .common = { \ + .reg_ctrl = _reg_ctrl, \ + CCU_MIX_INITHW_PARENTS(_name, _parents, spacemit_ccu_mux_ops, \ + _flags), \ + } \ +} + +#define CCU_DIV_DEFINE(_name, _parent, _reg_ctrl, _shift, _width, _flags) \ +static struct ccu_mix _name = { \ + .div = CCU_DIV_INIT(_shift, _width), \ + .common = { \ + .reg_ctrl = _reg_ctrl, \ + CCU_MIX_INITHW(_name, _parent, spacemit_ccu_div_ops, _flags) \ + } \ +} + +#define CCU_FACTOR_GATE_DEFINE(_name, _parent, _reg_ctrl, _mask_gate, _div, \ + _mul) \ +static struct ccu_mix _name = { \ + .gate = CCU_GATE_INIT(_mask_gate), \ + .factor = CCU_FACTOR_INIT(_div, _mul), \ + .common = { \ + .reg_ctrl = _reg_ctrl, \ + CCU_MIX_INITHW(_name, _parent, spacemit_ccu_factor_gate_ops, 0) \ + } \ +} + +#define CCU_MUX_GATE_DEFINE(_name, _parents, _reg_ctrl, _shift, _width, \ + _mask_gate, _flags) \ +static struct ccu_mix _name = { \ + .gate = CCU_GATE_INIT(_mask_gate), \ + .mux = CCU_MUX_INIT(_shift, _width), \ + .common = { \ + .reg_ctrl = _reg_ctrl, \ + CCU_MIX_INITHW_PARENTS(_name, _parents, \ + spacemit_ccu_mux_gate_ops, _flags), \ + } \ +} + +#define CCU_DIV_GATE_DEFINE(_name, _parent, _reg_ctrl, _shift, _width, \ + _mask_gate, _flags) \ +static struct ccu_mix _name = { \ + .gate = CCU_GATE_INIT(_mask_gate), \ + .div = CCU_DIV_INIT(_shift, _width), \ + .common = { \ + .reg_ctrl = _reg_ctrl, \ + CCU_MIX_INITHW(_name, _parent, spacemit_ccu_div_gate_ops, \ + _flags), \ + } \ +} + +#define CCU_MUX_DIV_GATE_DEFINE(_name, _parents, _reg_ctrl, _mshift, _mwidth, \ + _muxshift, _muxwidth, _mask_gate, _flags) \ +static struct ccu_mix _name = { \ + .gate = CCU_GATE_INIT(_mask_gate), \ + .div = CCU_DIV_INIT(_mshift, _mwidth), \ + .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \ + .common = { \ + .reg_ctrl = _reg_ctrl, \ + CCU_MIX_INITHW_PARENTS(_name, _parents, \ + spacemit_ccu_mux_div_gate_ops, _flags), \ + }, \ +} + +#define CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(_name, _parents, _reg_ctrl, _reg_fc, \ + _mshift, _mwidth, _mask_fc, _muxshift, \ + _muxwidth, _mask_gate, _flags) \ +static struct ccu_mix _name = { \ + .gate = CCU_GATE_INIT(_mask_gate), \ + .div = CCU_DIV_INIT(_mshift, _mwidth), \ + .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \ + .common = { \ + .reg_ctrl = _reg_ctrl, \ + .reg_fc = _reg_fc, \ + .mask_fc = _mask_fc, \ + CCU_MIX_INITHW_PARENTS(_name, _parents, \ + spacemit_ccu_mux_div_gate_ops, _flags), \ + }, \ +} + +#define CCU_MUX_DIV_GATE_FC_DEFINE(_name, _parents, _reg_ctrl, _mshift, _mwidth,\ + _mask_fc, _muxshift, _muxwidth, _mask_gate, \ + _flags) \ +CCU_MUX_DIV_GATE_SPLIT_FC_DEFINE(_name, _parents, _reg_ctrl, _reg_ctrl, _mshift,\ + _mwidth, _mask_fc, _muxshift, _muxwidth, \ + _mask_gate, _flags) + +#define CCU_MUX_DIV_FC_DEFINE(_name, _parents, _reg_ctrl, _mshift, _mwidth, \ + _mask_fc, _muxshift, _muxwidth, _flags) \ +static struct ccu_mix _name = { \ + .div = CCU_DIV_INIT(_mshift, _mwidth), \ + .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \ + .common = { \ + .reg_ctrl = _reg_ctrl, \ + .reg_fc = _reg_ctrl, \ + .mask_fc = _mask_fc, \ + CCU_MIX_INITHW_PARENTS(_name, _parents, \ + spacemit_ccu_mux_div_ops, _flags), \ + }, \ +} + +#define CCU_MUX_FC_DEFINE(_name, _parents, _reg_ctrl, _mask_fc, _muxshift, \ + _muxwidth, _flags) \ +static struct ccu_mix _name = { \ + .mux = CCU_MUX_INIT(_muxshift, _muxwidth), \ + .common = { \ + .reg_ctrl = _reg_ctrl, \ + .reg_fc = _reg_ctrl, \ + .mask_fc = _mask_fc, \ + CCU_MIX_INITHW_PARENTS(_name, _parents, spacemit_ccu_mux_ops, \ + _flags) \ + }, \ +} + +static inline struct ccu_mix *hw_to_ccu_mix(struct clk_hw *hw) +{ + struct ccu_common *common = hw_to_ccu_common(hw); + + return container_of(common, struct ccu_mix, common); +} + +extern const struct clk_ops spacemit_ccu_gate_ops; +extern const struct clk_ops spacemit_ccu_factor_ops; +extern const struct clk_ops spacemit_ccu_mux_ops; +extern const struct clk_ops spacemit_ccu_div_ops; +extern const struct clk_ops spacemit_ccu_factor_gate_ops; +extern const struct clk_ops spacemit_ccu_div_gate_ops; +extern const struct clk_ops spacemit_ccu_mux_gate_ops; +extern const struct clk_ops spacemit_ccu_mux_div_ops; +extern const struct clk_ops spacemit_ccu_mux_div_gate_ops; +#endif /* _CCU_DIV_H_ */ diff --git a/drivers/clk/spacemit/ccu_pll.c b/drivers/clk/spacemit/ccu_pll.c new file mode 100644 index 000000000000..4427dcfbbb97 --- /dev/null +++ b/drivers/clk/spacemit/ccu_pll.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024 SpacemiT Technology Co. Ltd + * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org> + */ + +#include <linux/clk-provider.h> +#include <linux/math.h> +#include <linux/regmap.h> + +#include "ccu_common.h" +#include "ccu_pll.h" + +#define PLL_TIMEOUT_US 3000 +#define PLL_DELAY_US 5 + +#define PLL_SWCR3_EN ((u32)BIT(31)) +#define PLL_SWCR3_MASK GENMASK(30, 0) + +static const struct ccu_pll_rate_tbl *ccu_pll_lookup_best_rate(struct ccu_pll *pll, + unsigned long rate) +{ + struct ccu_pll_config *config = &pll->config; + const struct ccu_pll_rate_tbl *best_entry; + unsigned long best_delta = ULONG_MAX; + int i; + + for (i = 0; i < config->tbl_num; i++) { + const struct ccu_pll_rate_tbl *entry = &config->rate_tbl[i]; + unsigned long delta = abs_diff(entry->rate, rate); + + if (delta < best_delta) { + best_delta = delta; + best_entry = entry; + } + } + + return best_entry; +} + +static const struct ccu_pll_rate_tbl *ccu_pll_lookup_matched_entry(struct ccu_pll *pll) +{ + struct ccu_pll_config *config = &pll->config; + u32 swcr1, swcr3; + int i; + + swcr1 = ccu_read(&pll->common, swcr1); + swcr3 = ccu_read(&pll->common, swcr3); + swcr3 &= PLL_SWCR3_MASK; + + for (i = 0; i < config->tbl_num; i++) { + const struct ccu_pll_rate_tbl *entry = &config->rate_tbl[i]; + + if (swcr1 == entry->swcr1 && swcr3 == entry->swcr3) + return entry; + } + + return NULL; +} + +static void ccu_pll_update_param(struct ccu_pll *pll, const struct ccu_pll_rate_tbl *entry) +{ + struct ccu_common *common = &pll->common; + + regmap_write(common->regmap, common->reg_swcr1, entry->swcr1); + ccu_update(common, swcr3, PLL_SWCR3_MASK, entry->swcr3); +} + +static int ccu_pll_is_enabled(struct clk_hw *hw) +{ + struct ccu_common *common = hw_to_ccu_common(hw); + + return ccu_read(common, swcr3) & PLL_SWCR3_EN; +} + +static int ccu_pll_enable(struct clk_hw *hw) +{ + struct ccu_pll *pll = hw_to_ccu_pll(hw); + struct ccu_common *common = &pll->common; + unsigned int tmp; + + ccu_update(common, swcr3, PLL_SWCR3_EN, PLL_SWCR3_EN); + + /* check lock status */ + return regmap_read_poll_timeout_atomic(common->lock_regmap, + pll->config.reg_lock, + tmp, + tmp & pll->config.mask_lock, + PLL_DELAY_US, PLL_TIMEOUT_US); +} + +static void ccu_pll_disable(struct clk_hw *hw) +{ + struct ccu_common *common = hw_to_ccu_common(hw); + + ccu_update(common, swcr3, PLL_SWCR3_EN, 0); +} + +/* + * PLLs must be gated before changing rate, which is ensured by + * flag CLK_SET_RATE_GATE. + */ +static int ccu_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct ccu_pll *pll = hw_to_ccu_pll(hw); + const struct ccu_pll_rate_tbl *entry; + + entry = ccu_pll_lookup_best_rate(pll, rate); + ccu_pll_update_param(pll, entry); + + return 0; +} + +static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct ccu_pll *pll = hw_to_ccu_pll(hw); + const struct ccu_pll_rate_tbl *entry; + + entry = ccu_pll_lookup_matched_entry(pll); + + WARN_ON_ONCE(!entry); + + return entry ? entry->rate : -EINVAL; +} + +static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct ccu_pll *pll = hw_to_ccu_pll(hw); + + return ccu_pll_lookup_best_rate(pll, rate)->rate; +} + +static int ccu_pll_init(struct clk_hw *hw) +{ + struct ccu_pll *pll = hw_to_ccu_pll(hw); + + if (ccu_pll_lookup_matched_entry(pll)) + return 0; + + ccu_pll_disable(hw); + ccu_pll_update_param(pll, &pll->config.rate_tbl[0]); + + return 0; +} + +const struct clk_ops spacemit_ccu_pll_ops = { + .init = ccu_pll_init, + .enable = ccu_pll_enable, + .disable = ccu_pll_disable, + .set_rate = ccu_pll_set_rate, + .recalc_rate = ccu_pll_recalc_rate, + .round_rate = ccu_pll_round_rate, + .is_enabled = ccu_pll_is_enabled, +}; diff --git a/drivers/clk/spacemit/ccu_pll.h b/drivers/clk/spacemit/ccu_pll.h new file mode 100644 index 000000000000..0592f4c3068c --- /dev/null +++ b/drivers/clk/spacemit/ccu_pll.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024 SpacemiT Technology Co. Ltd + * Copyright (c) 2024-2025 Haylen Chu <heylenay@4d2.org> + */ + +#ifndef _CCU_PLL_H_ +#define _CCU_PLL_H_ + +#include <linux/clk-provider.h> + +#include "ccu_common.h" + +/** + * struct ccu_pll_rate_tbl - Structure mapping between PLL rate and register + * configuration. + * + * @rate: PLL rate + * @swcr1: Register value of PLLX_SW1_CTRL (PLLx_SWCR1). + * @swcr3: Register value of the PLLx_SW3_CTRL's lowest 31 bits of + * PLLx_SW3_CTRL (PLLx_SWCR3). This highest bit is for enabling + * the PLL and not contained in this field. + */ +struct ccu_pll_rate_tbl { + unsigned long rate; + u32 swcr1; + u32 swcr3; +}; + +struct ccu_pll_config { + const struct ccu_pll_rate_tbl *rate_tbl; + u32 tbl_num; + u32 reg_lock; + u32 mask_lock; +}; + +#define CCU_PLL_RATE(_rate, _swcr1, _swcr3) \ + { \ + .rate = _rate, \ + .swcr1 = _swcr1, \ + .swcr3 = _swcr3, \ + } + +struct ccu_pll { + struct ccu_common common; + struct ccu_pll_config config; +}; + +#define CCU_PLL_CONFIG(_table, _reg_lock, _mask_lock) \ + { \ + .rate_tbl = _table, \ + .tbl_num = ARRAY_SIZE(_table), \ + .reg_lock = (_reg_lock), \ + .mask_lock = (_mask_lock), \ + } + +#define CCU_PLL_HWINIT(_name, _flags) \ + (&(struct clk_init_data) { \ + .name = #_name, \ + .ops = &spacemit_ccu_pll_ops, \ + .parent_data = &(struct clk_parent_data) { .index = 0 }, \ + .num_parents = 1, \ + .flags = _flags, \ + }) + +#define CCU_PLL_DEFINE(_name, _table, _reg_swcr1, _reg_swcr3, _reg_lock, \ + _mask_lock, _flags) \ +static struct ccu_pll _name = { \ + .config = CCU_PLL_CONFIG(_table, _reg_lock, _mask_lock), \ + .common = { \ + .reg_swcr1 = _reg_swcr1, \ + .reg_swcr3 = _reg_swcr3, \ + .hw.init = CCU_PLL_HWINIT(_name, _flags) \ + } \ +} + +static inline struct ccu_pll *hw_to_ccu_pll(struct clk_hw *hw) +{ + struct ccu_common *common = hw_to_ccu_common(hw); + + return container_of(common, struct ccu_pll, common); +} + +extern const struct clk_ops spacemit_ccu_pll_ops; + +#endif diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig index 5830a9d87bf2..8896fd052ef1 100644 --- a/drivers/clk/sunxi-ng/Kconfig +++ b/drivers/clk/sunxi-ng/Kconfig @@ -9,123 +9,123 @@ if SUNXI_CCU config SUNIV_F1C100S_CCU tristate "Support for the Allwinner newer F1C100s CCU" - default y + default ARCH_SUNXI depends on MACH_SUNIV || COMPILE_TEST config SUN20I_D1_CCU tristate "Support for the Allwinner D1/R528/T113 CCU" - default y + default ARCH_SUNXI depends on MACH_SUN8I || RISCV || COMPILE_TEST config SUN20I_D1_R_CCU tristate "Support for the Allwinner D1/R528/T113 PRCM CCU" - default y + default ARCH_SUNXI depends on MACH_SUN8I || RISCV || COMPILE_TEST config SUN50I_A64_CCU tristate "Support for the Allwinner A64 CCU" - default y + default ARCH_SUNXI depends on ARM64 || COMPILE_TEST config SUN50I_A100_CCU tristate "Support for the Allwinner A100 CCU" - default y + default ARCH_SUNXI depends on ARM64 || COMPILE_TEST config SUN50I_A100_R_CCU tristate "Support for the Allwinner A100 PRCM CCU" - default y + default ARCH_SUNXI depends on ARM64 || COMPILE_TEST config SUN50I_H6_CCU tristate "Support for the Allwinner H6 CCU" - default y + default ARCH_SUNXI depends on ARM64 || COMPILE_TEST config SUN50I_H616_CCU tristate "Support for the Allwinner H616 CCU" - default y + default ARCH_SUNXI depends on ARM64 || COMPILE_TEST config SUN50I_H6_R_CCU tristate "Support for the Allwinner H6 and H616 PRCM CCU" - default y + default ARCH_SUNXI depends on ARM64 || COMPILE_TEST config SUN55I_A523_CCU tristate "Support for the Allwinner A523/T527 CCU" - default y + default ARCH_SUNXI depends on ARM64 || COMPILE_TEST config SUN55I_A523_R_CCU tristate "Support for the Allwinner A523/T527 PRCM CCU" - default y + default ARCH_SUNXI depends on ARM64 || COMPILE_TEST config SUN4I_A10_CCU tristate "Support for the Allwinner A10/A20 CCU" - default y + default ARCH_SUNXI depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST config SUN5I_CCU bool "Support for the Allwinner sun5i family CCM" - default y + default ARCH_SUNXI depends on MACH_SUN5I || COMPILE_TEST depends on SUNXI_CCU=y config SUN6I_A31_CCU tristate "Support for the Allwinner A31/A31s CCU" - default y + default ARCH_SUNXI depends on MACH_SUN6I || COMPILE_TEST config SUN6I_RTC_CCU tristate "Support for the Allwinner H616/R329 RTC CCU" - default y + default ARCH_SUNXI depends on MACH_SUN8I || ARM64 || RISCV || COMPILE_TEST config SUN8I_A23_CCU tristate "Support for the Allwinner A23 CCU" - default y + default ARCH_SUNXI depends on MACH_SUN8I || COMPILE_TEST config SUN8I_A33_CCU tristate "Support for the Allwinner A33 CCU" - default y + default ARCH_SUNXI depends on MACH_SUN8I || COMPILE_TEST config SUN8I_A83T_CCU tristate "Support for the Allwinner A83T CCU" - default y + default ARCH_SUNXI depends on MACH_SUN8I || COMPILE_TEST config SUN8I_H3_CCU tristate "Support for the Allwinner H3 CCU" - default y + default ARCH_SUNXI depends on MACH_SUN8I || ARM64 || COMPILE_TEST config SUN8I_V3S_CCU tristate "Support for the Allwinner V3s CCU" - default y + default ARCH_SUNXI depends on MACH_SUN8I || COMPILE_TEST config SUN8I_DE2_CCU tristate "Support for the Allwinner SoCs DE2 CCU" - default y + default ARCH_SUNXI depends on MACH_SUN8I || ARM64 || RISCV || COMPILE_TEST config SUN8I_R40_CCU tristate "Support for the Allwinner R40 CCU" - default y + default ARCH_SUNXI depends on MACH_SUN8I || COMPILE_TEST config SUN9I_A80_CCU tristate "Support for the Allwinner A80 CCU" - default y + default ARCH_SUNXI depends on MACH_SUN9I || COMPILE_TEST config SUN8I_R_CCU tristate "Support for Allwinner SoCs' PRCM CCUs" - default y + default ARCH_SUNXI depends on MACH_SUN8I || ARM64 || COMPILE_TEST endif diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c index daa462c7d477..955c614830fa 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c @@ -1094,6 +1094,7 @@ static const struct ccu_reset_map sun50i_h616_ccu_resets[] = { [RST_BUS_TCON_LCD1] = { 0xb7c, BIT(17) }, [RST_BUS_TCON_TV0] = { 0xb9c, BIT(16) }, [RST_BUS_TCON_TV1] = { 0xb9c, BIT(17) }, + [RST_BUS_LVDS] = { 0xbac, BIT(16) }, [RST_BUS_TVE_TOP] = { 0xbbc, BIT(16) }, [RST_BUS_TVE0] = { 0xbbc, BIT(17) }, [RST_BUS_HDCP] = { 0xc4c, BIT(16) }, diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c index f2aa71206bc2..a6cd0f988859 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c @@ -5,6 +5,7 @@ #include <linux/clk.h> #include <linux/clk-provider.h> +#include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> @@ -239,6 +240,16 @@ static const struct sunxi_ccu_desc sun50i_h5_de2_clk_desc = { .num_resets = ARRAY_SIZE(sun50i_h5_de2_resets), }; +static const struct sunxi_ccu_desc sun50i_h616_de33_clk_desc = { + .ccu_clks = sun8i_de2_ccu_clks, + .num_ccu_clks = ARRAY_SIZE(sun8i_de2_ccu_clks), + + .hw_clks = &sun8i_h3_de2_hw_clks, + + .resets = sun50i_h5_de2_resets, + .num_resets = ARRAY_SIZE(sun50i_h5_de2_resets), +}; + static int sunxi_de2_clk_probe(struct platform_device *pdev) { struct clk *bus_clk, *mod_clk; @@ -291,6 +302,16 @@ static int sunxi_de2_clk_probe(struct platform_device *pdev) goto err_disable_mod_clk; } + /* + * The DE33 requires these additional (unknown) registers set + * during initialisation. + */ + if (of_device_is_compatible(pdev->dev.of_node, + "allwinner,sun50i-h616-de33-clk")) { + writel(0, reg + 0x24); + writel(0x0000a980, reg + 0x28); + } + ret = devm_sunxi_ccu_probe(&pdev->dev, reg, ccu_desc); if (ret) goto err_assert_reset; @@ -335,6 +356,10 @@ static const struct of_device_id sunxi_de2_clk_ids[] = { .compatible = "allwinner,sun50i-h6-de3-clk", .data = &sun50i_h5_de2_clk_desc, }, + { + .compatible = "allwinner,sun50i-h616-de33-clk", + .data = &sun50i_h616_de33_clk_desc, + }, { } }; MODULE_DEVICE_TABLE(of, sunxi_de2_clk_ids); diff --git a/drivers/clk/sunxi/Kconfig b/drivers/clk/sunxi/Kconfig index 1c4e543366dd..5e2f92bfe412 100644 --- a/drivers/clk/sunxi/Kconfig +++ b/drivers/clk/sunxi/Kconfig @@ -2,13 +2,13 @@ menuconfig CLK_SUNXI bool "Legacy clock support for Allwinner SoCs" depends on (ARM && ARCH_SUNXI) || COMPILE_TEST - default y + default (ARM && ARCH_SUNXI) if CLK_SUNXI config CLK_SUNXI_CLOCKS bool "Legacy clock drivers" - default y + default ARCH_SUNXI help Legacy clock drivers being used on older (A10, A13, A20, A23, A31, A80) SoCs. These drivers are kept around for @@ -19,14 +19,14 @@ config CLK_SUNXI_CLOCKS config CLK_SUNXI_PRCM_SUN6I bool "Legacy A31 PRCM driver" - default y + default ARCH_SUNXI help Legacy clock driver for the A31 PRCM clocks. Those are usually needed for the PMIC communication, mostly. config CLK_SUNXI_PRCM_SUN8I bool "Legacy sun8i PRCM driver" - default y + default ARCH_SUNXI help Legacy clock driver for the sun8i family PRCM clocks. Those are usually needed for the PMIC communication, @@ -34,7 +34,7 @@ config CLK_SUNXI_PRCM_SUN8I config CLK_SUNXI_PRCM_SUN9I bool "Legacy A80 PRCM driver" - default y + default ARCH_SUNXI help Legacy clock driver for the A80 PRCM clocks. Those are usually needed for the PMIC communication, mostly. diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c index 4c9555fc6184..ebfb1d59401d 100644 --- a/drivers/clk/thead/clk-th1520-ap.c +++ b/drivers/clk/thead/clk-th1520-ap.c @@ -847,6 +847,67 @@ static CCU_GATE(CLK_SRAM1, sram1_clk, "sram1", axi_aclk_pd, 0x20c, BIT(3), 0); static CCU_GATE(CLK_SRAM2, sram2_clk, "sram2", axi_aclk_pd, 0x20c, BIT(2), 0); static CCU_GATE(CLK_SRAM3, sram3_clk, "sram3", axi_aclk_pd, 0x20c, BIT(1), 0); +static CCU_GATE(CLK_AXI4_VO_ACLK, axi4_vo_aclk, "axi4-vo-aclk", + video_pll_clk_pd, 0x0, BIT(0), 0); +static CCU_GATE(CLK_GPU_CORE, gpu_core_clk, "gpu-core-clk", video_pll_clk_pd, + 0x0, BIT(3), 0); +static CCU_GATE(CLK_GPU_CFG_ACLK, gpu_cfg_aclk, "gpu-cfg-aclk", + video_pll_clk_pd, 0x0, BIT(4), 0); +static CCU_GATE(CLK_DPU_PIXELCLK0, dpu0_pixelclk, "dpu0-pixelclk", + video_pll_clk_pd, 0x0, BIT(5), 0); +static CCU_GATE(CLK_DPU_PIXELCLK1, dpu1_pixelclk, "dpu1-pixelclk", + video_pll_clk_pd, 0x0, BIT(6), 0); +static CCU_GATE(CLK_DPU_HCLK, dpu_hclk, "dpu-hclk", video_pll_clk_pd, 0x0, + BIT(7), 0); +static CCU_GATE(CLK_DPU_ACLK, dpu_aclk, "dpu-aclk", video_pll_clk_pd, 0x0, + BIT(8), 0); +static CCU_GATE(CLK_DPU_CCLK, dpu_cclk, "dpu-cclk", video_pll_clk_pd, 0x0, + BIT(9), 0); +static CCU_GATE(CLK_HDMI_SFR, hdmi_sfr_clk, "hdmi-sfr-clk", video_pll_clk_pd, + 0x0, BIT(10), 0); +static CCU_GATE(CLK_HDMI_PCLK, hdmi_pclk, "hdmi-pclk", video_pll_clk_pd, 0x0, + BIT(11), 0); +static CCU_GATE(CLK_HDMI_CEC, hdmi_cec_clk, "hdmi-cec-clk", video_pll_clk_pd, + 0x0, BIT(12), 0); +static CCU_GATE(CLK_MIPI_DSI0_PCLK, mipi_dsi0_pclk, "mipi-dsi0-pclk", + video_pll_clk_pd, 0x0, BIT(13), 0); +static CCU_GATE(CLK_MIPI_DSI1_PCLK, mipi_dsi1_pclk, "mipi-dsi1-pclk", + video_pll_clk_pd, 0x0, BIT(14), 0); +static CCU_GATE(CLK_MIPI_DSI0_CFG, mipi_dsi0_cfg_clk, "mipi-dsi0-cfg-clk", + video_pll_clk_pd, 0x0, BIT(15), 0); +static CCU_GATE(CLK_MIPI_DSI1_CFG, mipi_dsi1_cfg_clk, "mipi-dsi1-cfg-clk", + video_pll_clk_pd, 0x0, BIT(16), 0); +static CCU_GATE(CLK_MIPI_DSI0_REFCLK, mipi_dsi0_refclk, "mipi-dsi0-refclk", + video_pll_clk_pd, 0x0, BIT(17), 0); +static CCU_GATE(CLK_MIPI_DSI1_REFCLK, mipi_dsi1_refclk, "mipi-dsi1-refclk", + video_pll_clk_pd, 0x0, BIT(18), 0); +static CCU_GATE(CLK_HDMI_I2S, hdmi_i2s_clk, "hdmi-i2s-clk", video_pll_clk_pd, + 0x0, BIT(19), 0); +static CCU_GATE(CLK_X2H_DPU1_ACLK, x2h_dpu1_aclk, "x2h-dpu1-aclk", + video_pll_clk_pd, 0x0, BIT(20), 0); +static CCU_GATE(CLK_X2H_DPU_ACLK, x2h_dpu_aclk, "x2h-dpu-aclk", + video_pll_clk_pd, 0x0, BIT(21), 0); +static CCU_GATE(CLK_AXI4_VO_PCLK, axi4_vo_pclk, "axi4-vo-pclk", + video_pll_clk_pd, 0x0, BIT(22), 0); +static CCU_GATE(CLK_IOPMP_VOSYS_DPU_PCLK, iopmp_vosys_dpu_pclk, + "iopmp-vosys-dpu-pclk", video_pll_clk_pd, 0x0, BIT(23), 0); +static CCU_GATE(CLK_IOPMP_VOSYS_DPU1_PCLK, iopmp_vosys_dpu1_pclk, + "iopmp-vosys-dpu1-pclk", video_pll_clk_pd, 0x0, BIT(24), 0); +static CCU_GATE(CLK_IOPMP_VOSYS_GPU_PCLK, iopmp_vosys_gpu_pclk, + "iopmp-vosys-gpu-pclk", video_pll_clk_pd, 0x0, BIT(25), 0); +static CCU_GATE(CLK_IOPMP_DPU1_ACLK, iopmp_dpu1_aclk, "iopmp-dpu1-aclk", + video_pll_clk_pd, 0x0, BIT(27), 0); +static CCU_GATE(CLK_IOPMP_DPU_ACLK, iopmp_dpu_aclk, "iopmp-dpu-aclk", + video_pll_clk_pd, 0x0, BIT(28), 0); +static CCU_GATE(CLK_IOPMP_GPU_ACLK, iopmp_gpu_aclk, "iopmp-gpu-aclk", + video_pll_clk_pd, 0x0, BIT(29), 0); +static CCU_GATE(CLK_MIPIDSI0_PIXCLK, mipi_dsi0_pixclk, "mipi-dsi0-pixclk", + video_pll_clk_pd, 0x0, BIT(30), 0); +static CCU_GATE(CLK_MIPIDSI1_PIXCLK, mipi_dsi1_pixclk, "mipi-dsi1-pixclk", + video_pll_clk_pd, 0x0, BIT(31), 0); +static CCU_GATE(CLK_HDMI_PIXCLK, hdmi_pixclk, "hdmi-pixclk", video_pll_clk_pd, + 0x4, BIT(0), 0); + static CLK_FIXED_FACTOR_HW(gmac_pll_clk_100m, "gmac-pll-clk-100m", &gmac_pll_clk.common.hw, 10, 1, 0); @@ -963,7 +1024,38 @@ static struct ccu_common *th1520_gate_clks[] = { &sram3_clk.common, }; -#define NR_CLKS (CLK_UART_SCLK + 1) +static struct ccu_common *th1520_vo_gate_clks[] = { + &axi4_vo_aclk.common, + &gpu_core_clk.common, + &gpu_cfg_aclk.common, + &dpu0_pixelclk.common, + &dpu1_pixelclk.common, + &dpu_hclk.common, + &dpu_aclk.common, + &dpu_cclk.common, + &hdmi_sfr_clk.common, + &hdmi_pclk.common, + &hdmi_cec_clk.common, + &mipi_dsi0_pclk.common, + &mipi_dsi1_pclk.common, + &mipi_dsi0_cfg_clk.common, + &mipi_dsi1_cfg_clk.common, + &mipi_dsi0_refclk.common, + &mipi_dsi1_refclk.common, + &hdmi_i2s_clk.common, + &x2h_dpu1_aclk.common, + &x2h_dpu_aclk.common, + &axi4_vo_pclk.common, + &iopmp_vosys_dpu_pclk.common, + &iopmp_vosys_dpu1_pclk.common, + &iopmp_vosys_gpu_pclk.common, + &iopmp_dpu1_aclk.common, + &iopmp_dpu_aclk.common, + &iopmp_gpu_aclk.common, + &mipi_dsi0_pixclk.common, + &mipi_dsi1_pixclk.common, + &hdmi_pixclk.common +}; static const struct regmap_config th1520_clk_regmap_config = { .reg_bits = 32, @@ -972,8 +1064,44 @@ static const struct regmap_config th1520_clk_regmap_config = { .fast_io = true, }; +struct th1520_plat_data { + struct ccu_common **th1520_pll_clks; + struct ccu_common **th1520_div_clks; + struct ccu_common **th1520_mux_clks; + struct ccu_common **th1520_gate_clks; + + int nr_clks; + int nr_pll_clks; + int nr_div_clks; + int nr_mux_clks; + int nr_gate_clks; +}; + +static const struct th1520_plat_data th1520_ap_platdata = { + .th1520_pll_clks = th1520_pll_clks, + .th1520_div_clks = th1520_div_clks, + .th1520_mux_clks = th1520_mux_clks, + .th1520_gate_clks = th1520_gate_clks, + + .nr_clks = CLK_UART_SCLK + 1, + + .nr_pll_clks = ARRAY_SIZE(th1520_pll_clks), + .nr_div_clks = ARRAY_SIZE(th1520_div_clks), + .nr_mux_clks = ARRAY_SIZE(th1520_mux_clks), + .nr_gate_clks = ARRAY_SIZE(th1520_gate_clks), +}; + +static const struct th1520_plat_data th1520_vo_platdata = { + .th1520_gate_clks = th1520_vo_gate_clks, + + .nr_clks = CLK_HDMI_PIXCLK + 1, + + .nr_gate_clks = ARRAY_SIZE(th1520_vo_gate_clks), +}; + static int th1520_clk_probe(struct platform_device *pdev) { + const struct th1520_plat_data *plat_data; struct device *dev = &pdev->dev; struct clk_hw_onecell_data *priv; @@ -982,11 +1110,16 @@ static int th1520_clk_probe(struct platform_device *pdev) struct clk_hw *hw; int ret, i; - priv = devm_kzalloc(dev, struct_size(priv, hws, NR_CLKS), GFP_KERNEL); + plat_data = device_get_match_data(&pdev->dev); + if (!plat_data) + return dev_err_probe(&pdev->dev, -ENODEV, + "No device match data found\n"); + + priv = devm_kzalloc(dev, struct_size(priv, hws, plat_data->nr_clks), GFP_KERNEL); if (!priv) return -ENOMEM; - priv->num = NR_CLKS; + priv->num = plat_data->nr_clks; base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) @@ -996,35 +1129,35 @@ static int th1520_clk_probe(struct platform_device *pdev) if (IS_ERR(map)) return PTR_ERR(map); - for (i = 0; i < ARRAY_SIZE(th1520_pll_clks); i++) { - struct ccu_pll *cp = hw_to_ccu_pll(&th1520_pll_clks[i]->hw); + for (i = 0; i < plat_data->nr_pll_clks; i++) { + struct ccu_pll *cp = hw_to_ccu_pll(&plat_data->th1520_pll_clks[i]->hw); - th1520_pll_clks[i]->map = map; + plat_data->th1520_pll_clks[i]->map = map; - ret = devm_clk_hw_register(dev, &th1520_pll_clks[i]->hw); + ret = devm_clk_hw_register(dev, &plat_data->th1520_pll_clks[i]->hw); if (ret) return ret; priv->hws[cp->common.clkid] = &cp->common.hw; } - for (i = 0; i < ARRAY_SIZE(th1520_div_clks); i++) { - struct ccu_div *cd = hw_to_ccu_div(&th1520_div_clks[i]->hw); + for (i = 0; i < plat_data->nr_div_clks; i++) { + struct ccu_div *cd = hw_to_ccu_div(&plat_data->th1520_div_clks[i]->hw); - th1520_div_clks[i]->map = map; + plat_data->th1520_div_clks[i]->map = map; - ret = devm_clk_hw_register(dev, &th1520_div_clks[i]->hw); + ret = devm_clk_hw_register(dev, &plat_data->th1520_div_clks[i]->hw); if (ret) return ret; priv->hws[cd->common.clkid] = &cd->common.hw; } - for (i = 0; i < ARRAY_SIZE(th1520_mux_clks); i++) { - struct ccu_mux *cm = hw_to_ccu_mux(&th1520_mux_clks[i]->hw); + for (i = 0; i < plat_data->nr_mux_clks; i++) { + struct ccu_mux *cm = hw_to_ccu_mux(&plat_data->th1520_mux_clks[i]->hw); const struct clk_init_data *init = cm->common.hw.init; - th1520_mux_clks[i]->map = map; + plat_data->th1520_mux_clks[i]->map = map; hw = devm_clk_hw_register_mux_parent_data_table(dev, init->name, init->parent_data, @@ -1040,10 +1173,10 @@ static int th1520_clk_probe(struct platform_device *pdev) priv->hws[cm->common.clkid] = hw; } - for (i = 0; i < ARRAY_SIZE(th1520_gate_clks); i++) { - struct ccu_gate *cg = hw_to_ccu_gate(&th1520_gate_clks[i]->hw); + for (i = 0; i < plat_data->nr_gate_clks; i++) { + struct ccu_gate *cg = hw_to_ccu_gate(&plat_data->th1520_gate_clks[i]->hw); - th1520_gate_clks[i]->map = map; + plat_data->th1520_gate_clks[i]->map = map; hw = devm_clk_hw_register_gate_parent_data(dev, cg->common.hw.init->name, @@ -1057,19 +1190,21 @@ static int th1520_clk_probe(struct platform_device *pdev) priv->hws[cg->common.clkid] = hw; } - ret = devm_clk_hw_register(dev, &osc12m_clk.hw); - if (ret) - return ret; - priv->hws[CLK_OSC12M] = &osc12m_clk.hw; + if (plat_data == &th1520_ap_platdata) { + ret = devm_clk_hw_register(dev, &osc12m_clk.hw); + if (ret) + return ret; + priv->hws[CLK_OSC12M] = &osc12m_clk.hw; - ret = devm_clk_hw_register(dev, &gmac_pll_clk_100m.hw); - if (ret) - return ret; - priv->hws[CLK_PLL_GMAC_100M] = &gmac_pll_clk_100m.hw; + ret = devm_clk_hw_register(dev, &gmac_pll_clk_100m.hw); + if (ret) + return ret; + priv->hws[CLK_PLL_GMAC_100M] = &gmac_pll_clk_100m.hw; - ret = devm_clk_hw_register(dev, &emmc_sdio_ref_clk.hw); - if (ret) - return ret; + ret = devm_clk_hw_register(dev, &emmc_sdio_ref_clk.hw); + if (ret) + return ret; + } ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, priv); if (ret) @@ -1081,6 +1216,11 @@ static int th1520_clk_probe(struct platform_device *pdev) static const struct of_device_id th1520_clk_match[] = { { .compatible = "thead,th1520-clk-ap", + .data = &th1520_ap_platdata, + }, + { + .compatible = "thead,th1520-clk-vo", + .data = &th1520_vo_platdata, }, { /* sentinel */ }, }; diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index d64b07ec48e5..78702a08364f 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -217,6 +217,18 @@ config CPUFREQ_DT If in doubt, say N. +config CPUFREQ_DT_RUST + tristate "Rust based Generic DT based cpufreq driver" + depends on HAVE_CLK && OF && RUST + select CPUFREQ_DT_PLATDEV + select PM_OPP + help + This adds a Rust based generic DT based cpufreq driver for frequency + management. It supports both uniprocessor (UP) and symmetric + multiprocessor (SMP) systems. + + If in doubt, say N. + config CPUFREQ_VIRT tristate "Virtual cpufreq driver" depends on GENERIC_ARCH_TOPOLOGY diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 22ab45209f9b..d38526b8e063 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o +obj-$(CONFIG_CPUFREQ_DT_RUST) += rcpufreq_dt.o obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o obj-$(CONFIG_CPUFREQ_VIRT) += virtual-cpufreq.o diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index ea4b8f220a05..4f7f9201598d 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -660,7 +660,7 @@ static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq) nominal_perf = perf_caps.nominal_perf; if (nominal_freq) - *nominal_freq = perf_caps.nominal_freq; + *nominal_freq = perf_caps.nominal_freq * 1000; if (!highest_perf || !nominal_perf) { pr_debug("CPU%d: highest or nominal performance missing\n", cpu); diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 4e3ba6e68c32..f7512b4e923e 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -482,7 +482,7 @@ static void check_supported_cpu(void *_rc) cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); if ((edx & P_STATE_TRANSITION_CAPABLE) != P_STATE_TRANSITION_CAPABLE) { - pr_info("Power state transitions not supported\n"); + pr_info_once("Power state transitions not supported\n"); return; } *rc = 0; diff --git a/drivers/cpufreq/rcpufreq_dt.rs b/drivers/cpufreq/rcpufreq_dt.rs new file mode 100644 index 000000000000..94ed81644fe1 --- /dev/null +++ b/drivers/cpufreq/rcpufreq_dt.rs @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Rust based implementation of the cpufreq-dt driver. + +use kernel::{ + c_str, + clk::Clk, + cpu, cpufreq, + cpumask::CpumaskVar, + device::{Core, Device}, + error::code::*, + fmt, + macros::vtable, + module_platform_driver, of, opp, platform, + prelude::*, + str::CString, + sync::Arc, +}; + +/// Finds exact supply name from the OF node. +fn find_supply_name_exact(dev: &Device, name: &str) -> Option<CString> { + let prop_name = CString::try_from_fmt(fmt!("{}-supply", name)).ok()?; + dev.property_present(&prop_name) + .then(|| CString::try_from_fmt(fmt!("{name}")).ok()) + .flatten() +} + +/// Finds supply name for the CPU from DT. +fn find_supply_names(dev: &Device, cpu: u32) -> Option<KVec<CString>> { + // Try "cpu0" for older DTs, fallback to "cpu". + let name = (cpu == 0) + .then(|| find_supply_name_exact(dev, "cpu0")) + .flatten() + .or_else(|| find_supply_name_exact(dev, "cpu"))?; + + let mut list = KVec::with_capacity(1, GFP_KERNEL).ok()?; + list.push(name, GFP_KERNEL).ok()?; + + Some(list) +} + +/// Represents the cpufreq dt device. +struct CPUFreqDTDevice { + opp_table: opp::Table, + freq_table: opp::FreqTable, + _mask: CpumaskVar, + _token: Option<opp::ConfigToken>, + _clk: Clk, +} + +#[derive(Default)] +struct CPUFreqDTDriver; + +#[vtable] +impl opp::ConfigOps for CPUFreqDTDriver {} + +#[vtable] +impl cpufreq::Driver for CPUFreqDTDriver { + const NAME: &'static CStr = c_str!("cpufreq-dt"); + const FLAGS: u16 = cpufreq::flags::NEED_INITIAL_FREQ_CHECK | cpufreq::flags::IS_COOLING_DEV; + const BOOST_ENABLED: bool = true; + + type PData = Arc<CPUFreqDTDevice>; + + fn init(policy: &mut cpufreq::Policy) -> Result<Self::PData> { + let cpu = policy.cpu(); + // SAFETY: The CPU device is only used during init; it won't get hot-unplugged. The cpufreq + // core registers with CPU notifiers and the cpufreq core/driver won't use the CPU device, + // once the CPU is hot-unplugged. + let dev = unsafe { cpu::from_cpu(cpu)? }; + let mut mask = CpumaskVar::new_zero(GFP_KERNEL)?; + + mask.set(cpu); + + let token = find_supply_names(dev, cpu) + .map(|names| { + opp::Config::<Self>::new() + .set_regulator_names(names)? + .set(dev) + }) + .transpose()?; + + // Get OPP-sharing information from "operating-points-v2" bindings. + let fallback = match opp::Table::of_sharing_cpus(dev, &mut mask) { + Ok(()) => false, + Err(e) if e == ENOENT => { + // "operating-points-v2" not supported. If the platform hasn't + // set sharing CPUs, fallback to all CPUs share the `Policy` + // for backward compatibility. + opp::Table::sharing_cpus(dev, &mut mask).is_err() + } + Err(e) => return Err(e), + }; + + // Initialize OPP tables for all policy cpus. + // + // For platforms not using "operating-points-v2" bindings, we do this + // before updating policy cpus. Otherwise, we will end up creating + // duplicate OPPs for the CPUs. + // + // OPPs might be populated at runtime, don't fail for error here unless + // it is -EPROBE_DEFER. + let mut opp_table = match opp::Table::from_of_cpumask(dev, &mut mask) { + Ok(table) => table, + Err(e) => { + if e == EPROBE_DEFER { + return Err(e); + } + + // The table is added dynamically ? + opp::Table::from_dev(dev)? + } + }; + + // The OPP table must be initialized, statically or dynamically, by this point. + opp_table.opp_count()?; + + // Set sharing cpus for fallback scenario. + if fallback { + mask.setall(); + opp_table.set_sharing_cpus(&mut mask)?; + } + + let mut transition_latency = opp_table.max_transition_latency_ns() as u32; + if transition_latency == 0 { + transition_latency = cpufreq::ETERNAL_LATENCY_NS; + } + + policy + .set_dvfs_possible_from_any_cpu(true) + .set_suspend_freq(opp_table.suspend_freq()) + .set_transition_latency_ns(transition_latency); + + let freq_table = opp_table.cpufreq_table()?; + // SAFETY: The `freq_table` is not dropped while it is getting used by the C code. + unsafe { policy.set_freq_table(&freq_table) }; + + // SAFETY: The returned `clk` is not dropped while it is getting used by the C code. + let clk = unsafe { policy.set_clk(dev, None)? }; + + mask.copy(policy.cpus()); + + Ok(Arc::new( + CPUFreqDTDevice { + opp_table, + freq_table, + _mask: mask, + _token: token, + _clk: clk, + }, + GFP_KERNEL, + )?) + } + + fn exit(_policy: &mut cpufreq::Policy, _data: Option<Self::PData>) -> Result { + Ok(()) + } + + fn online(_policy: &mut cpufreq::Policy) -> Result { + // We did light-weight tear down earlier, nothing to do here. + Ok(()) + } + + fn offline(_policy: &mut cpufreq::Policy) -> Result { + // Preserve policy->data and don't free resources on light-weight + // tear down. + Ok(()) + } + + fn suspend(policy: &mut cpufreq::Policy) -> Result { + policy.generic_suspend() + } + + fn verify(data: &mut cpufreq::PolicyData) -> Result { + data.generic_verify() + } + + fn target_index(policy: &mut cpufreq::Policy, index: cpufreq::TableIndex) -> Result { + let Some(data) = policy.data::<Self::PData>() else { + return Err(ENOENT); + }; + + let freq = data.freq_table.freq(index)?; + data.opp_table.set_rate(freq) + } + + fn get(policy: &mut cpufreq::Policy) -> Result<u32> { + policy.generic_get() + } + + fn set_boost(_policy: &mut cpufreq::Policy, _state: i32) -> Result { + Ok(()) + } + + fn register_em(policy: &mut cpufreq::Policy) { + policy.register_em_opp() + } +} + +kernel::of_device_table!( + OF_TABLE, + MODULE_OF_TABLE, + <CPUFreqDTDriver as platform::Driver>::IdInfo, + [(of::DeviceId::new(c_str!("operating-points-v2")), ())] +); + +impl platform::Driver for CPUFreqDTDriver { + type IdInfo = (); + const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE); + + fn probe( + pdev: &platform::Device<Core>, + _id_info: Option<&Self::IdInfo>, + ) -> Result<Pin<KBox<Self>>> { + cpufreq::Registration::<CPUFreqDTDriver>::new_foreign_owned(pdev.as_ref())?; + Ok(KBox::new(Self {}, GFP_KERNEL)?.into()) + } +} + +module_platform_driver! { + type: CPUFreqDTDriver, + name: "cpufreq-dt", + author: "Viresh Kumar <viresh.kumar@linaro.org>", + description: "Generic CPUFreq DT driver", + license: "GPL v2", +} diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 944e899eb1be..ef078426bfd5 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -393,6 +393,40 @@ static struct cpufreq_driver scmi_cpufreq_driver = { .set_boost = cpufreq_boost_set_sw, }; +static bool scmi_dev_used_by_cpus(struct device *scmi_dev) +{ + struct device_node *scmi_np = dev_of_node(scmi_dev); + struct device_node *cpu_np, *np; + struct device *cpu_dev; + int cpu, idx; + + if (!scmi_np) + return false; + + for_each_possible_cpu(cpu) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) + continue; + + cpu_np = dev_of_node(cpu_dev); + + np = of_parse_phandle(cpu_np, "clocks", 0); + of_node_put(np); + + if (np == scmi_np) + return true; + + idx = of_property_match_string(cpu_np, "power-domain-names", "perf"); + np = of_parse_phandle(cpu_np, "power-domains", idx); + of_node_put(np); + + if (np == scmi_np) + return true; + } + + return false; +} + static int scmi_cpufreq_probe(struct scmi_device *sdev) { int ret; @@ -401,7 +435,7 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev) handle = sdev->handle; - if (!handle) + if (!handle || !scmi_dev_used_by_cpus(dev)) return -ENODEV; scmi_cpufreq_driver.driver_data = sdev; diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c index 3c2756a539c4..4e1ba35deda9 100644 --- a/drivers/cpuidle/cpuidle-psci.c +++ b/drivers/cpuidle/cpuidle-psci.c @@ -456,14 +456,13 @@ static struct faux_device_ops psci_cpuidle_ops = { static bool __init dt_idle_state_present(void) { - struct device_node *cpu_node __free(device_node); - struct device_node *state_node __free(device_node); - - cpu_node = of_cpu_device_node_get(cpumask_first(cpu_possible_mask)); + struct device_node *cpu_node __free(device_node) = + of_cpu_device_node_get(cpumask_first(cpu_possible_mask)); if (!cpu_node) return false; - state_node = of_get_cpu_state_node(cpu_node, 0); + struct device_node *state_node __free(device_node) = + of_get_cpu_state_node(cpu_node, 0); if (!state_node) return false; diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c index e97d47f42ee2..584c70a34b52 100644 --- a/drivers/dax/kmem.c +++ b/drivers/dax/kmem.c @@ -13,6 +13,7 @@ #include <linux/mman.h> #include <linux/memory-tiers.h> #include <linux/memory_hotplug.h> +#include <linux/string_helpers.h> #include "dax-private.h" #include "bus.h" @@ -68,7 +69,7 @@ static void kmem_put_memory_types(void) static int dev_dax_kmem_probe(struct dev_dax *dev_dax) { struct device *dev = &dev_dax->dev; - unsigned long total_len = 0; + unsigned long total_len = 0, orig_len = 0; struct dax_kmem_data *data; struct memory_dev_type *mtype; int i, rc, mapped = 0; @@ -97,6 +98,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax) for (i = 0; i < dev_dax->nr_range; i++) { struct range range; + orig_len += range_len(&dev_dax->ranges[i].range); rc = dax_kmem_range(dev_dax, i, &range); if (rc) { dev_info(dev, "mapping%d: %#llx-%#llx too small after alignment\n", @@ -109,6 +111,12 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax) if (!total_len) { dev_warn(dev, "rejecting DAX region without any memory after alignment\n"); return -EINVAL; + } else if (total_len != orig_len) { + char buf[16]; + + string_get_size(orig_len - total_len, 1, STRING_UNITS_2, + buf, sizeof(buf)); + dev_warn(dev, "DAX region truncated by %s due to alignment\n", buf); } init_node_memory_type(numa_node, mtype); diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 760b7d81fcd8..80355d03004d 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -702,27 +702,6 @@ static void idxd_disable_system_pasid(struct idxd_device *idxd) idxd->pasid = IOMMU_PASID_INVALID; } -static int idxd_enable_sva(struct pci_dev *pdev) -{ - int ret; - - ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); - if (ret) - return ret; - - ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); - if (ret) - iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); - - return ret; -} - -static void idxd_disable_sva(struct pci_dev *pdev) -{ - iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); - iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); -} - static int idxd_probe(struct idxd_device *idxd) { struct pci_dev *pdev = idxd->pdev; @@ -737,17 +716,13 @@ static int idxd_probe(struct idxd_device *idxd) dev_dbg(dev, "IDXD reset complete\n"); if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { - if (idxd_enable_sva(pdev)) { - dev_warn(dev, "Unable to turn on user SVA feature.\n"); - } else { - set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); + set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); - rc = idxd_enable_system_pasid(idxd); - if (rc) - dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc); - else - set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); - } + rc = idxd_enable_system_pasid(idxd); + if (rc) + dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc); + else + set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); } else if (!sva) { dev_warn(dev, "User forced SVA off via module param.\n"); } @@ -785,8 +760,6 @@ static int idxd_probe(struct idxd_device *idxd) err: if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); - if (device_user_pasid_enabled(idxd)) - idxd_disable_sva(pdev); return rc; } @@ -797,8 +770,6 @@ static void idxd_cleanup(struct idxd_device *idxd) idxd_cleanup_internals(idxd); if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); - if (device_user_pasid_enabled(idxd)) - idxd_disable_sva(idxd->pdev); } /* diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index 20333608b983..cae52c654a15 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -1746,9 +1746,9 @@ altr_edac_a10_device_trig(struct file *file, const char __user *user_buf, local_irq_save(flags); if (trig_type == ALTR_UE_TRIGGER_CHAR) - writel(priv->ue_set_mask, set_addr); + writew(priv->ue_set_mask, set_addr); else - writel(priv->ce_set_mask, set_addr); + writew(priv->ce_set_mask, set_addr); /* Ensure the interrupt test bits are set */ wmb(); @@ -1778,7 +1778,7 @@ altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf, local_irq_save(flags); if (trig_type == ALTR_UE_TRIGGER_CHAR) { - writel(priv->ue_set_mask, set_addr); + writew(priv->ue_set_mask, set_addr); } else { /* Setup read/write of 4 bytes */ writel(ECC_WORD_WRITE, drvdata->base + ECC_BLK_DBYTECTRL_OFST); diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 7df19d82aa68..bbd2155d8483 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -267,6 +267,23 @@ config TURRIS_MOX_RWTM other manufacturing data and also utilize the Entropy Bit Generator for hardware random number generation. +if TURRIS_MOX_RWTM + +config TURRIS_MOX_RWTM_KEYCTL + bool "Turris Mox rWTM ECDSA message signing" + default y + depends on KEYS + depends on ASYMMETRIC_KEY_TYPE + select CZNIC_PLATFORMS + select TURRIS_SIGNING_KEY + help + Say Y here to add support for ECDSA message signing with board private + key (each Turris Mox has an ECDSA private key generated in the secure + coprocessor when manufactured). This functionality is exposed via the + keyctl() syscall. + +endif # TURRIS_MOX_RWTM + source "drivers/firmware/arm_ffa/Kconfig" source "drivers/firmware/broadcom/Kconfig" source "drivers/firmware/cirrus/Kconfig" diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig index dabd874641d0..e3fb36825978 100644 --- a/drivers/firmware/arm_scmi/Kconfig +++ b/drivers/firmware/arm_scmi/Kconfig @@ -69,6 +69,19 @@ config ARM_SCMI_DEBUG_COUNTERS such useful debug counters. This can be helpful for debugging and SCMI monitoring. +config ARM_SCMI_QUIRKS + bool "Enable SCMI Quirks framework" + depends on JUMP_LABEL || COMPILE_TEST + default y + help + Enables support for SCMI Quirks framework to workaround SCMI platform + firmware bugs on system already deployed in the wild. + + The framework allows the definition of platform-specific code quirks + that will be associated and enabled only on the desired platforms + depending on the SCMI firmware advertised versions and/or machine + compatibles. + source "drivers/firmware/arm_scmi/transports/Kconfig" source "drivers/firmware/arm_scmi/vendors/imx/Kconfig" diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index 9ac81adff567..780cd62b2f78 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -3,6 +3,7 @@ scmi-bus-y = bus.o scmi-core-objs := $(scmi-bus-y) scmi-driver-y = driver.o notify.o +scmi-driver-$(CONFIG_ARM_SCMI_QUIRKS) += quirks.o scmi-driver-$(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT) += raw_mode.o scmi-transport-$(CONFIG_ARM_SCMI_HAVE_SHMEM) = shmem.o scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 3a5474015f7d..1adef0389475 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -201,55 +201,51 @@ scmi_protocol_table_unregister(const struct scmi_device_id *id_table) scmi_protocol_device_unrequest(entry); } -static const struct scmi_device_id * -scmi_dev_match_id(struct scmi_device *scmi_dev, const struct scmi_driver *scmi_drv) +static int scmi_dev_match_by_id_table(struct scmi_device *scmi_dev, + const struct scmi_device_id *id_table) { - const struct scmi_device_id *id = scmi_drv->id_table; - - if (!id) - return NULL; - - for (; id->protocol_id; id++) - if (id->protocol_id == scmi_dev->protocol_id) { - if (!id->name) - return id; - else if (!strcmp(id->name, scmi_dev->name)) - return id; - } + if (!id_table || !id_table->name) + return 0; + + /* Always skip transport devices from matching */ + for (; id_table->protocol_id && id_table->name; id_table++) + if (id_table->protocol_id == scmi_dev->protocol_id && + strncmp(scmi_dev->name, "__scmi_transport_device", 23) && + !strcmp(id_table->name, scmi_dev->name)) + return 1; + return 0; +} - return NULL; +static int scmi_dev_match_id(struct scmi_device *scmi_dev, + const struct scmi_driver *scmi_drv) +{ + return scmi_dev_match_by_id_table(scmi_dev, scmi_drv->id_table); } static int scmi_dev_match(struct device *dev, const struct device_driver *drv) { const struct scmi_driver *scmi_drv = to_scmi_driver(drv); struct scmi_device *scmi_dev = to_scmi_dev(dev); - const struct scmi_device_id *id; - - id = scmi_dev_match_id(scmi_dev, scmi_drv); - if (id) - return 1; - return 0; + return scmi_dev_match_id(scmi_dev, scmi_drv); } static int scmi_match_by_id_table(struct device *dev, const void *data) { - struct scmi_device *sdev = to_scmi_dev(dev); + struct scmi_device *scmi_dev = to_scmi_dev(dev); const struct scmi_device_id *id_table = data; - return sdev->protocol_id == id_table->protocol_id && - (id_table->name && !strcmp(sdev->name, id_table->name)); + return scmi_dev_match_by_id_table(scmi_dev, id_table); } static struct scmi_device *scmi_child_dev_find(struct device *parent, int prot_id, const char *name) { - struct scmi_device_id id_table; + struct scmi_device_id id_table[2] = { 0 }; struct device *dev; - id_table.protocol_id = prot_id; - id_table.name = name; + id_table[0].protocol_id = prot_id; + id_table[0].name = name; dev = device_find_child(parent, &id_table, scmi_match_by_id_table); if (!dev) @@ -463,6 +459,20 @@ put_dev: return NULL; } +static struct scmi_device * +_scmi_device_create(struct device_node *np, struct device *parent, + int protocol, const char *name) +{ + struct scmi_device *sdev; + + sdev = __scmi_device_create(np, parent, protocol, name); + if (!sdev) + pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n", + of_node_full_name(parent->of_node), protocol, name); + + return sdev; +} + /** * scmi_device_create - A method to create one or more SCMI devices * @@ -495,7 +505,7 @@ struct scmi_device *scmi_device_create(struct device_node *np, struct scmi_device *scmi_dev = NULL; if (name) - return __scmi_device_create(np, parent, protocol, name); + return _scmi_device_create(np, parent, protocol, name); mutex_lock(&scmi_requested_devices_mtx); phead = idr_find(&scmi_requested_devices, protocol); @@ -509,18 +519,13 @@ struct scmi_device *scmi_device_create(struct device_node *np, list_for_each_entry(rdev, phead, node) { struct scmi_device *sdev; - sdev = __scmi_device_create(np, parent, - rdev->id_table->protocol_id, - rdev->id_table->name); - /* Report errors and carry on... */ + sdev = _scmi_device_create(np, parent, + rdev->id_table->protocol_id, + rdev->id_table->name); if (sdev) scmi_dev = sdev; - else - pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n", - of_node_full_name(parent->of_node), - rdev->id_table->protocol_id, - rdev->id_table->name); } + mutex_unlock(&scmi_requested_devices_mtx); return scmi_dev; diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index 2ed2279388f0..afa7981efe82 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -11,6 +11,7 @@ #include "protocols.h" #include "notify.h" +#include "quirks.h" /* Updated only after ALL the mandatory features for that version are merged */ #define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000 @@ -429,6 +430,23 @@ static void iter_clk_describe_prepare_message(void *message, msg->rate_index = cpu_to_le32(desc_index); } +#define QUIRK_OUT_OF_SPEC_TRIPLET \ + ({ \ + /* \ + * A known quirk: a triplet is returned but num_returned != 3 \ + * Check for a safe payload size and fix. \ + */ \ + if (st->num_returned != 3 && st->num_remaining == 0 && \ + st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) { \ + st->num_returned = 3; \ + st->num_remaining = 0; \ + } else { \ + dev_err(p->dev, \ + "Cannot fix out-of-spec reply !\n"); \ + return -EPROTO; \ + } \ + }) + static int iter_clk_describe_update_state(struct scmi_iterator_state *st, const void *response, void *priv) @@ -450,19 +468,8 @@ iter_clk_describe_update_state(struct scmi_iterator_state *st, p->clk->name, st->num_returned, st->num_remaining, st->rx_len); - /* - * A known quirk: a triplet is returned but num_returned != 3 - * Check for a safe payload size and fix. - */ - if (st->num_returned != 3 && st->num_remaining == 0 && - st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) { - st->num_returned = 3; - st->num_remaining = 0; - } else { - dev_err(p->dev, - "Cannot fix out-of-spec reply !\n"); - return -EPROTO; - } + SCMI_QUIRK(clock_rates_triplet_out_of_spec, + QUIRK_OUT_OF_SPEC_TRIPLET); } return 0; diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 10ea7962323e..dab758c5fdea 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -475,6 +475,7 @@ static int __tag##_probe(struct platform_device *pdev) \ if (ret) \ goto err; \ \ + spdev->dev.parent = dev; \ ret = platform_device_add(spdev); \ if (ret) \ goto err; \ diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 0390d5ff195e..395fe9289035 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -11,7 +11,7 @@ * various power domain DVFS including the core/cluster, certain system * clocks configuration, thermal sensors and many others. * - * Copyright (C) 2018-2024 ARM Ltd. + * Copyright (C) 2018-2025 ARM Ltd. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -38,6 +38,7 @@ #include "common.h" #include "notify.h" +#include "quirks.h" #include "raw_mode.h" @@ -439,14 +440,8 @@ static void scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info, int prot_id, const char *name) { - struct scmi_device *sdev; - mutex_lock(&info->devreq_mtx); - sdev = scmi_device_create(np, info->dev, prot_id, name); - if (name && !sdev) - dev_err(info->dev, - "failed to create device for protocol 0x%X (%s)\n", - prot_id, name); + scmi_device_create(np, info->dev, prot_id, name); mutex_unlock(&info->devreq_mtx); } @@ -1190,7 +1185,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, * RX path since it will be already queued at the end of the TX * poll loop. */ - if (!xfer->hdr.poll_completion) + if (!xfer->hdr.poll_completion || + xfer->hdr.type == MSG_TYPE_DELAYED_RESP) scmi_raw_message_report(info->raw, xfer, SCMI_RAW_REPLY_QUEUE, cinfo->id); @@ -1738,6 +1734,39 @@ static int scmi_common_get_max_msg_size(const struct scmi_protocol_handle *ph) } /** + * scmi_protocol_msg_check - Check protocol message attributes + * + * @ph: A reference to the protocol handle. + * @message_id: The ID of the message to check. + * @attributes: A parameter to optionally return the retrieved message + * attributes, in case of Success. + * + * An helper to check protocol message attributes for a specific protocol + * and message pair. + * + * Return: 0 on SUCCESS + */ +static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph, + u32 message_id, u32 *attributes) +{ + int ret; + struct scmi_xfer *t; + + ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES, + sizeof(__le32), 0, &t); + if (ret) + return ret; + + put_unaligned_le32(message_id, t->tx.buf); + ret = do_xfer(ph, t); + if (!ret && attributes) + *attributes = get_unaligned_le32(t->rx.buf); + xfer_put(ph, t); + + return ret; +} + +/** * struct scmi_iterator - Iterator descriptor * @msg: A reference to the message TX buffer; filled by @prepare_message with * a proper custom command payload for each multi-part command request. @@ -1869,6 +1898,13 @@ struct scmi_msg_resp_desc_fc { __le32 db_preserve_hmask; }; +#define QUIRK_PERF_FC_FORCE \ + ({ \ + if (pi->proto->id == SCMI_PROTOCOL_PERF && \ + message_id == 0x8 /* PERF_LEVEL_GET */) \ + attributes |= BIT(0); \ + }) + static void scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph, u8 describe_id, u32 message_id, u32 valid_size, @@ -1878,6 +1914,7 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph, int ret; u32 flags; u64 phys_addr; + u32 attributes; u8 size; void __iomem *addr; struct scmi_xfer *t; @@ -1886,6 +1923,16 @@ scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph, struct scmi_msg_resp_desc_fc *resp; const struct scmi_protocol_instance *pi = ph_to_pi(ph); + /* Check if the MSG_ID supports fastchannel */ + ret = scmi_protocol_msg_check(ph, message_id, &attributes); + SCMI_QUIRK(perf_level_get_fc_force, QUIRK_PERF_FC_FORCE); + if (ret || !MSG_SUPPORTS_FASTCHANNEL(attributes)) { + dev_dbg(ph->dev, + "Skip FC init for 0x%02X/%d domain:%d - ret:%d\n", + pi->proto->id, message_id, domain, ret); + return; + } + if (!p_addr) { ret = -EINVAL; goto err_out; @@ -2003,39 +2050,6 @@ static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db) SCMI_PROTO_FC_RING_DB(64); } -/** - * scmi_protocol_msg_check - Check protocol message attributes - * - * @ph: A reference to the protocol handle. - * @message_id: The ID of the message to check. - * @attributes: A parameter to optionally return the retrieved message - * attributes, in case of Success. - * - * An helper to check protocol message attributes for a specific protocol - * and message pair. - * - * Return: 0 on SUCCESS - */ -static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph, - u32 message_id, u32 *attributes) -{ - int ret; - struct scmi_xfer *t; - - ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES, - sizeof(__le32), 0, &t); - if (ret) - return ret; - - put_unaligned_le32(message_id, t->tx.buf); - ret = do_xfer(ph, t); - if (!ret && attributes) - *attributes = get_unaligned_le32(t->rx.buf); - xfer_put(ph, t); - - return ret; -} - static const struct scmi_proto_helpers_ops helpers_ops = { .extended_name_get = scmi_common_extended_name_get, .get_max_msg_size = scmi_common_get_max_msg_size, @@ -2828,9 +2842,8 @@ static int scmi_bus_notifier(struct notifier_block *nb, struct scmi_info *info = bus_nb_to_scmi_info(nb); struct scmi_device *sdev = to_scmi_dev(data); - /* Skip transport devices and devices of different SCMI instances */ - if (!strncmp(sdev->name, "__scmi_transport_device", 23) || - sdev->dev.parent != info->dev) + /* Skip devices of different SCMI instances */ + if (sdev->dev.parent != info->dev) return NOTIFY_DONE; switch (action) { @@ -3101,6 +3114,18 @@ static const struct scmi_desc *scmi_transport_setup(struct device *dev) return &trans->desc; } +static void scmi_enable_matching_quirks(struct scmi_info *info) +{ + struct scmi_revision_info *rev = &info->version; + + dev_dbg(info->dev, "Looking for quirks matching: %s/%s/0x%08X\n", + rev->vendor_id, rev->sub_vendor_id, rev->impl_ver); + + /* Enable applicable quirks */ + scmi_quirks_enable(info->dev, rev->vendor_id, + rev->sub_vendor_id, rev->impl_ver); +} + static int scmi_probe(struct platform_device *pdev) { int ret; @@ -3222,6 +3247,8 @@ static int scmi_probe(struct platform_device *pdev) list_add_tail(&info->node, &scmi_list); mutex_unlock(&scmi_list_mutex); + scmi_enable_matching_quirks(info); + for_each_available_child_of_node(np, child) { u32 prot_id; @@ -3380,6 +3407,8 @@ static struct dentry *scmi_debugfs_init(void) static int __init scmi_driver_init(void) { + scmi_quirks_initialize(); + /* Bail out if no SCMI transport was configured */ if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT))) return -EINVAL; diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h index aaee57cdcd55..d62c4469d1fd 100644 --- a/drivers/firmware/arm_scmi/protocols.h +++ b/drivers/firmware/arm_scmi/protocols.h @@ -31,6 +31,8 @@ #define SCMI_PROTOCOL_VENDOR_BASE 0x80 +#define MSG_SUPPORTS_FASTCHANNEL(x) ((x) & BIT(0)) + enum scmi_common_cmd { PROTOCOL_VERSION = 0x0, PROTOCOL_ATTRIBUTES = 0x1, diff --git a/drivers/firmware/arm_scmi/quirks.c b/drivers/firmware/arm_scmi/quirks.c new file mode 100644 index 000000000000..03960aca3610 --- /dev/null +++ b/drivers/firmware/arm_scmi/quirks.c @@ -0,0 +1,322 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Message Protocol Quirks + * + * Copyright (C) 2025 ARM Ltd. + */ + +/** + * DOC: Theory of operation + * + * A framework to define SCMI quirks and their activation conditions based on + * existing static_keys kernel facilities. + * + * Quirks are named and their activation conditions defined using the macro + * DEFINE_SCMI_QUIRK() in this file. + * + * After a quirk is defined, a corresponding entry must also be added to the + * global @scmi_quirks_table in this file using __DECLARE_SCMI_QUIRK_ENTRY(). + * + * Additionally a corresponding quirk declaration must be added also to the + * quirk.h file using DECLARE_SCMI_QUIRK(). + * + * The needed quirk code-snippet itself will be defined local to the SCMI code + * that is meant to fix and will be associated to the previously defined quirk + * and related activation conditions using the macro SCMI_QUIRK(). + * + * At runtime, during the SCMI stack probe sequence, once the SCMI Server had + * advertised the running platform Vendor, SubVendor and Implementation Version + * data, all the defined quirks matching the activation conditions will be + * enabled. + * + * Example + * + * quirk.c + * ------- + * DEFINE_SCMI_QUIRK(fix_me, "vendor", "subvend", "0x12000-0x30000", + * "someone,plat_A", "another,plat_b", "vend,sku"); + * + * static struct scmi_quirk *scmi_quirks_table[] = { + * ... + * __DECLARE_SCMI_QUIRK_ENTRY(fix_me), + * NULL + * }; + * + * quirk.h + * ------- + * DECLARE_SCMI_QUIRK(fix_me); + * + * <somewhere_in_the_scmi_stack.c> + * ------------------------------ + * + * #define QUIRK_CODE_SNIPPET_FIX_ME() \ + * ({ \ + * if (p->condition) \ + * a_ptr->calculated_val = 123; \ + * }) + * + * + * int some_function_to_fix(int param, struct something *p) + * { + * struct some_strut *a_ptr; + * + * a_ptr = some_load_func(p); + * SCMI_QUIRK(fix_me, QUIRK_CODE_SNIPPET_FIX_ME); + * some_more_func(a_ptr); + * ... + * + * return 0; + * } + * + */ + +#include <linux/ctype.h> +#include <linux/device.h> +#include <linux/export.h> +#include <linux/hashtable.h> +#include <linux/kstrtox.h> +#include <linux/of.h> +#include <linux/slab.h> +#include <linux/static_key.h> +#include <linux/string.h> +#include <linux/stringhash.h> +#include <linux/types.h> + +#include "quirks.h" + +#define SCMI_QUIRKS_HT_SZ 4 + +struct scmi_quirk { + bool enabled; + const char *name; + char *vendor; + char *sub_vendor_id; + char *impl_ver_range; + u32 start_range; + u32 end_range; + struct static_key_false *key; + struct hlist_node hash; + unsigned int hkey; + const char *const compats[]; +}; + +#define __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ...) \ + static struct scmi_quirk scmi_quirk_entry_ ## _qn = { \ + .name = __stringify(quirk_ ## _qn), \ + .vendor = _ven, \ + .sub_vendor_id = _sub, \ + .impl_ver_range = _impl, \ + .key = &(scmi_quirk_ ## _qn), \ + .compats = { __VA_ARGS__ __VA_OPT__(,) NULL }, \ + } + +#define __DECLARE_SCMI_QUIRK_ENTRY(_qn) (&(scmi_quirk_entry_ ## _qn)) + +/* + * Define a quirk by name and provide the matching tokens where: + * + * _qn: A string which will be used to build the quirk and the global + * static_key names. + * _ven : SCMI Vendor ID string match, NULL means any. + * _sub : SCMI SubVendor ID string match, NULL means any. + * _impl : SCMI Implementation Version string match, NULL means any. + * This string can be used to express version ranges which will be + * interpreted as follows: + * + * NULL [0, 0xFFFFFFFF] + * "X" [X, X] + * "X-" [X, 0xFFFFFFFF] + * "-X" [0, X] + * "X-Y" [X, Y] + * + * with X <= Y and <v> in [X, Y] meaning X <= <v> <= Y + * + * ... : An optional variadic macros argument used to provide a comma-separated + * list of compatible strings matches; when no variadic argument is + * provided, ANY compatible will match this quirk. + * + * This implicitly define also a properly named global static-key that + * will be used to dynamically enable the quirk at initialization time. + * + * Note that it is possible to associate multiple quirks to the same + * matching pattern, if your firmware quality is really astounding :P + * + * Example: + * + * Compatibles list NOT provided, so ANY compatible will match: + * + * DEFINE_SCMI_QUIRK(my_new_issue, "Vend", "SVend", "0x12000-0x30000"); + * + * + * A few compatibles provided to match against: + * + * DEFINE_SCMI_QUIRK(my_new_issue, "Vend", "SVend", "0x12000-0x30000", + * "xvend,plat_a", "xvend,plat_b", "xvend,sku_name"); + */ +#define DEFINE_SCMI_QUIRK(_qn, _ven, _sub, _impl, ...) \ + DEFINE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn); \ + __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ##__VA_ARGS__) + +/* + * Same as DEFINE_SCMI_QUIRK but EXPORTED: this is meant to address quirks + * that possibly reside in code that is included in loadable kernel modules + * that needs to be able to access the global static keys at runtime to + * determine if enabled or not. (see SCMI_QUIRK to understand usage) + */ +#define DEFINE_SCMI_QUIRK_EXPORTED(_qn, _ven, _sub, _impl, ...) \ + DEFINE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn); \ + EXPORT_SYMBOL_GPL(scmi_quirk_ ## _qn); \ + __DEFINE_SCMI_QUIRK_ENTRY(_qn, _ven, _sub, _impl, ##__VA_ARGS__) + +/* Global Quirks Definitions */ +DEFINE_SCMI_QUIRK(clock_rates_triplet_out_of_spec, NULL, NULL, NULL); +DEFINE_SCMI_QUIRK(perf_level_get_fc_force, "Qualcomm", NULL, "0x20000-"); + +/* + * Quirks Pointers Array + * + * This is filled at compile-time with the list of pointers to all the currently + * defined quirks descriptors. + */ +static struct scmi_quirk *scmi_quirks_table[] = { + __DECLARE_SCMI_QUIRK_ENTRY(clock_rates_triplet_out_of_spec), + __DECLARE_SCMI_QUIRK_ENTRY(perf_level_get_fc_force), + NULL +}; + +/* + * Quirks HashTable + * + * A run-time populated hashtable containing all the defined quirks descriptors + * hashed by matching pattern. + */ +static DEFINE_READ_MOSTLY_HASHTABLE(scmi_quirks_ht, SCMI_QUIRKS_HT_SZ); + +static unsigned int scmi_quirk_signature(const char *vend, const char *sub_vend) +{ + char *signature, *p; + unsigned int hash32; + unsigned long hash = 0; + + /* vendor_id/sub_vendor_id guaranteed <= SCMI_SHORT_NAME_MAX_SIZE */ + signature = kasprintf(GFP_KERNEL, "|%s|%s|", vend ?: "", sub_vend ?: ""); + if (!signature) + return 0; + + pr_debug("SCMI Quirk Signature >>>%s<<<\n", signature); + + p = signature; + while (*p) + hash = partial_name_hash(tolower(*p++), hash); + hash32 = end_name_hash(hash); + + kfree(signature); + + return hash32; +} + +static int scmi_quirk_range_parse(struct scmi_quirk *quirk) +{ + const char *last, *first = quirk->impl_ver_range; + size_t len; + char *sep; + int ret; + + quirk->start_range = 0; + quirk->end_range = 0xFFFFFFFF; + len = quirk->impl_ver_range ? strlen(quirk->impl_ver_range) : 0; + if (!len) + return 0; + + last = first + len - 1; + sep = strchr(quirk->impl_ver_range, '-'); + if (sep) + *sep = '\0'; + + if (sep == first) /* -X */ + ret = kstrtouint(first + 1, 0, &quirk->end_range); + else /* X OR X- OR X-y */ + ret = kstrtouint(first, 0, &quirk->start_range); + if (ret) + return ret; + + if (!sep) + quirk->end_range = quirk->start_range; + else if (sep != last) /* x-Y */ + ret = kstrtouint(sep + 1, 0, &quirk->end_range); + + if (quirk->start_range > quirk->end_range) + return -EINVAL; + + return ret; +} + +void scmi_quirks_initialize(void) +{ + struct scmi_quirk *quirk; + int i; + + for (i = 0, quirk = scmi_quirks_table[0]; quirk; + i++, quirk = scmi_quirks_table[i]) { + int ret; + + ret = scmi_quirk_range_parse(quirk); + if (ret) { + pr_err("SCMI skip QUIRK [%s] - BAD RANGE - |%s|\n", + quirk->name, quirk->impl_ver_range); + continue; + } + quirk->hkey = scmi_quirk_signature(quirk->vendor, + quirk->sub_vendor_id); + + hash_add(scmi_quirks_ht, &quirk->hash, quirk->hkey); + + pr_debug("Registered SCMI QUIRK [%s] -- %p - Key [0x%08X] - %s/%s/[0x%08X-0x%08X]\n", + quirk->name, quirk, quirk->hkey, + quirk->vendor, quirk->sub_vendor_id, + quirk->start_range, quirk->end_range); + } + + pr_debug("SCMI Quirks initialized\n"); +} + +void scmi_quirks_enable(struct device *dev, const char *vend, + const char *subv, const u32 impl) +{ + for (int i = 3; i >= 0; i--) { + struct scmi_quirk *quirk; + unsigned int hkey; + + hkey = scmi_quirk_signature(i > 1 ? vend : NULL, + i > 2 ? subv : NULL); + + /* + * Note that there could be multiple matches so we + * will enable multiple quirk part of a hash collision + * domain...BUT we cannot assume that ALL quirks on the + * same collision domain are a full match. + */ + hash_for_each_possible(scmi_quirks_ht, quirk, hash, hkey) { + if (quirk->enabled || quirk->hkey != hkey || + impl < quirk->start_range || + impl > quirk->end_range) + continue; + + if (quirk->compats[0] && + !of_machine_compatible_match(quirk->compats)) + continue; + + dev_info(dev, "Enabling SCMI Quirk [%s]\n", + quirk->name); + + dev_dbg(dev, + "Quirk matched on: %s/%s/%s/[0x%08X-0x%08X]\n", + quirk->compats[0], quirk->vendor, + quirk->sub_vendor_id, + quirk->start_range, quirk->end_range); + + static_branch_enable(quirk->key); + quirk->enabled = true; + } + } +} diff --git a/drivers/firmware/arm_scmi/quirks.h b/drivers/firmware/arm_scmi/quirks.h new file mode 100644 index 000000000000..a71fde85a527 --- /dev/null +++ b/drivers/firmware/arm_scmi/quirks.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * System Control and Management Interface (SCMI) Message Protocol Quirks + * + * Copyright (C) 2025 ARM Ltd. + */ +#ifndef _SCMI_QUIRKS_H +#define _SCMI_QUIRKS_H + +#include <linux/static_key.h> +#include <linux/types.h> + +#ifdef CONFIG_ARM_SCMI_QUIRKS + +#define DECLARE_SCMI_QUIRK(_qn) \ + DECLARE_STATIC_KEY_FALSE(scmi_quirk_ ## _qn) + +/* + * A helper to associate the actual code snippet to use as a quirk + * named as _qn. + */ +#define SCMI_QUIRK(_qn, _blk) \ + do { \ + if (static_branch_unlikely(&(scmi_quirk_ ## _qn))) \ + (_blk); \ + } while (0) + +void scmi_quirks_initialize(void); +void scmi_quirks_enable(struct device *dev, const char *vend, + const char *subv, const u32 impl); + +#else + +#define DECLARE_SCMI_QUIRK(_qn) +/* Force quirks compilation even when SCMI Quirks are disabled */ +#define SCMI_QUIRK(_qn, _blk) \ + do { \ + if (0) \ + (_blk); \ + } while (0) + +static inline void scmi_quirks_initialize(void) { } +static inline void scmi_quirks_enable(struct device *dev, const char *vend, + const char *sub_vend, const u32 impl) { } + +#endif /* CONFIG_ARM_SCMI_QUIRKS */ + +/* Quirk delarations */ +DECLARE_SCMI_QUIRK(clock_rates_triplet_out_of_spec); +DECLARE_SCMI_QUIRK(perf_level_get_fc_force); + +#endif /* _SCMI_QUIRKS_H */ diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c index 7cc0d616b8de..3d543b1d8947 100644 --- a/drivers/firmware/arm_scmi/raw_mode.c +++ b/drivers/firmware/arm_scmi/raw_mode.c @@ -671,11 +671,13 @@ static int scmi_do_xfer_raw_start(struct scmi_raw_mode_info *raw, * @len: Length of the message in @buf. * @chan_id: The channel ID to use. * @async: A flag stating if an asynchronous command is required. + * @poll: A flag stating if a polling transmission is required. * * Return: 0 on Success */ static int scmi_raw_message_send(struct scmi_raw_mode_info *raw, - void *buf, size_t len, u8 chan_id, bool async) + void *buf, size_t len, u8 chan_id, + bool async, bool poll) { int ret; struct scmi_xfer *xfer; @@ -684,6 +686,16 @@ static int scmi_raw_message_send(struct scmi_raw_mode_info *raw, if (ret) return ret; + if (poll) { + if (is_transport_polling_capable(raw->desc)) { + xfer->hdr.poll_completion = true; + } else { + dev_err(raw->handle->dev, + "Failed to send RAW message - Polling NOT supported\n"); + return -EINVAL; + } + } + ret = scmi_do_xfer_raw_start(raw, xfer, chan_id, async); if (ret) scmi_xfer_raw_put(raw->handle, xfer); @@ -801,7 +813,7 @@ static ssize_t scmi_dbg_raw_mode_common_read(struct file *filp, static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos, - bool async) + bool async, bool poll) { int ret; struct scmi_dbg_raw_data *rd = filp->private_data; @@ -831,7 +843,7 @@ static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp, } ret = scmi_raw_message_send(rd->raw, rd->tx.buf, rd->tx_size, - rd->chan_id, async); + rd->chan_id, async, poll); /* Reset ppos for next message ... */ rd->tx_size = 0; @@ -875,7 +887,8 @@ static ssize_t scmi_dbg_raw_mode_message_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { - return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, false); + return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, + false, false); } static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp, @@ -964,7 +977,8 @@ static ssize_t scmi_dbg_raw_mode_message_async_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { - return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, true); + return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, + true, false); } static const struct file_operations scmi_dbg_raw_mode_message_async_fops = { @@ -976,6 +990,40 @@ static const struct file_operations scmi_dbg_raw_mode_message_async_fops = { .owner = THIS_MODULE, }; +static ssize_t scmi_dbg_raw_mode_message_poll_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, + false, true); +} + +static const struct file_operations scmi_dbg_raw_mode_message_poll_fops = { + .open = scmi_dbg_raw_mode_open, + .release = scmi_dbg_raw_mode_release, + .read = scmi_dbg_raw_mode_message_read, + .write = scmi_dbg_raw_mode_message_poll_write, + .poll = scmi_dbg_raw_mode_message_poll, + .owner = THIS_MODULE, +}; + +static ssize_t scmi_dbg_raw_mode_message_poll_async_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, + true, true); +} + +static const struct file_operations scmi_dbg_raw_mode_message_poll_async_fops = { + .open = scmi_dbg_raw_mode_open, + .release = scmi_dbg_raw_mode_release, + .read = scmi_dbg_raw_mode_message_read, + .write = scmi_dbg_raw_mode_message_poll_async_write, + .poll = scmi_dbg_raw_mode_message_poll, + .owner = THIS_MODULE, +}; + static ssize_t scmi_test_dbg_raw_mode_notif_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) @@ -1199,6 +1247,12 @@ void *scmi_raw_mode_init(const struct scmi_handle *handle, debugfs_create_file("message_async", 0600, raw->dentry, raw, &scmi_dbg_raw_mode_message_async_fops); + debugfs_create_file("message_poll", 0600, raw->dentry, raw, + &scmi_dbg_raw_mode_message_poll_fops); + + debugfs_create_file("message_poll_async", 0600, raw->dentry, raw, + &scmi_dbg_raw_mode_message_poll_async_fops); + debugfs_create_file("notification", 0400, raw->dentry, raw, &scmi_dbg_raw_mode_notification_fops); @@ -1230,6 +1284,14 @@ void *scmi_raw_mode_init(const struct scmi_handle *handle, debugfs_create_file_aux_num("message_async", 0600, chd, raw, channels[i], &scmi_dbg_raw_mode_message_async_fops); + + debugfs_create_file_aux_num("message_poll", 0600, chd, + raw, channels[i], + &scmi_dbg_raw_mode_message_poll_fops); + + debugfs_create_file_aux_num("message_poll_async", 0600, + chd, raw, channels[i], + &scmi_dbg_raw_mode_message_poll_async_fops); } } diff --git a/drivers/firmware/arm_scmi/vendors/imx/Kconfig b/drivers/firmware/arm_scmi/vendors/imx/Kconfig index a01bf5e47301..c34c8c837441 100644 --- a/drivers/firmware/arm_scmi/vendors/imx/Kconfig +++ b/drivers/firmware/arm_scmi/vendors/imx/Kconfig @@ -12,6 +12,30 @@ config IMX_SCMI_BBM_EXT To compile this driver as a module, choose M here: the module will be called imx-sm-bbm. +config IMX_SCMI_CPU_EXT + tristate "i.MX SCMI CPU EXTENSION" + depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) + depends on IMX_SCMI_CPU_DRV + default y if ARCH_MXC + help + This enables i.MX System CPU Protocol to manage cpu + start, stop and etc. + + To compile this driver as a module, choose M here: the + module will be called imx-sm-cpu. + +config IMX_SCMI_LMM_EXT + tristate "i.MX SCMI LMM EXTENSION" + depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) + depends on IMX_SCMI_LMM_DRV + default y if ARCH_MXC + help + This enables i.MX System Logical Machine Protocol to + manage Logical Machines boot, shutdown and etc. + + To compile this driver as a module, choose M here: the + module will be called imx-sm-lmm. + config IMX_SCMI_MISC_EXT tristate "i.MX SCMI MISC EXTENSION" depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) diff --git a/drivers/firmware/arm_scmi/vendors/imx/Makefile b/drivers/firmware/arm_scmi/vendors/imx/Makefile index d3ee6d544924..e3a5ea46345c 100644 --- a/drivers/firmware/arm_scmi/vendors/imx/Makefile +++ b/drivers/firmware/arm_scmi/vendors/imx/Makefile @@ -1,3 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_IMX_SCMI_BBM_EXT) += imx-sm-bbm.o +obj-$(CONFIG_IMX_SCMI_CPU_EXT) += imx-sm-cpu.o +obj-$(CONFIG_IMX_SCMI_LMM_EXT) += imx-sm-lmm.o obj-$(CONFIG_IMX_SCMI_MISC_EXT) += imx-sm-misc.o diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c new file mode 100644 index 000000000000..66f47f5371e5 --- /dev/null +++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-cpu.c @@ -0,0 +1,276 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System control and Management Interface (SCMI) NXP CPU Protocol + * + * Copyright 2025 NXP + */ + +#include <linux/bits.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/scmi_protocol.h> +#include <linux/scmi_imx_protocol.h> + +#include "../../protocols.h" +#include "../../notify.h" + +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000 + +enum scmi_imx_cpu_protocol_cmd { + SCMI_IMX_CPU_ATTRIBUTES = 0x3, + SCMI_IMX_CPU_START = 0x4, + SCMI_IMX_CPU_STOP = 0x5, + SCMI_IMX_CPU_RESET_VECTOR_SET = 0x6, + SCMI_IMX_CPU_INFO_GET = 0xC, +}; + +struct scmi_imx_cpu_info { + u32 nr_cpu; +}; + +#define SCMI_IMX_CPU_NR_CPU_MASK GENMASK(15, 0) +struct scmi_msg_imx_cpu_protocol_attributes { + __le32 attributes; +}; + +struct scmi_msg_imx_cpu_attributes_out { + __le32 attributes; +#define CPU_MAX_NAME 16 + u8 name[CPU_MAX_NAME]; +}; + +struct scmi_imx_cpu_reset_vector_set_in { + __le32 cpuid; +#define CPU_VEC_FLAGS_RESUME BIT(31) +#define CPU_VEC_FLAGS_START BIT(30) +#define CPU_VEC_FLAGS_BOOT BIT(29) + __le32 flags; + __le32 resetvectorlow; + __le32 resetvectorhigh; +}; + +struct scmi_imx_cpu_info_get_out { +#define CPU_RUN_MODE_START 0 +#define CPU_RUN_MODE_HOLD 1 +#define CPU_RUN_MODE_STOP 2 +#define CPU_RUN_MODE_SLEEP 3 + __le32 runmode; + __le32 sleepmode; + __le32 resetvectorlow; + __le32 resetvectorhigh; +}; + +static int scmi_imx_cpu_validate_cpuid(const struct scmi_protocol_handle *ph, + u32 cpuid) +{ + struct scmi_imx_cpu_info *info = ph->get_priv(ph); + + if (cpuid >= info->nr_cpu) + return -EINVAL; + + return 0; +} + +static int scmi_imx_cpu_start(const struct scmi_protocol_handle *ph, + u32 cpuid, bool start) +{ + struct scmi_xfer *t; + u8 msg_id; + int ret; + + ret = scmi_imx_cpu_validate_cpuid(ph, cpuid); + if (ret) + return ret; + + if (start) + msg_id = SCMI_IMX_CPU_START; + else + msg_id = SCMI_IMX_CPU_STOP; + + ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(u32), 0, &t); + if (ret) + return ret; + + put_unaligned_le32(cpuid, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_cpu_reset_vector_set(const struct scmi_protocol_handle *ph, + u32 cpuid, u64 vector, bool start, + bool boot, bool resume) +{ + struct scmi_imx_cpu_reset_vector_set_in *in; + struct scmi_xfer *t; + int ret; + + ret = scmi_imx_cpu_validate_cpuid(ph, cpuid); + if (ret) + return ret; + + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_RESET_VECTOR_SET, sizeof(*in), + 0, &t); + if (ret) + return ret; + + in = t->tx.buf; + in->cpuid = cpu_to_le32(cpuid); + in->flags = cpu_to_le32(0); + if (start) + in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_START); + if (boot) + in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_BOOT); + if (resume) + in->flags |= le32_encode_bits(1, CPU_VEC_FLAGS_RESUME); + in->resetvectorlow = cpu_to_le32(lower_32_bits(vector)); + in->resetvectorhigh = cpu_to_le32(upper_32_bits(vector)); + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_cpu_started(const struct scmi_protocol_handle *ph, u32 cpuid, + bool *started) +{ + struct scmi_imx_cpu_info_get_out *out; + struct scmi_xfer *t; + u32 mode; + int ret; + + if (!started) + return -EINVAL; + + *started = false; + ret = scmi_imx_cpu_validate_cpuid(ph, cpuid); + if (ret) + return ret; + + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_INFO_GET, sizeof(u32), + 0, &t); + if (ret) + return ret; + + put_unaligned_le32(cpuid, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + out = t->rx.buf; + mode = le32_to_cpu(out->runmode); + if (mode == CPU_RUN_MODE_START || mode == CPU_RUN_MODE_SLEEP) + *started = true; + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static const struct scmi_imx_cpu_proto_ops scmi_imx_cpu_proto_ops = { + .cpu_reset_vector_set = scmi_imx_cpu_reset_vector_set, + .cpu_start = scmi_imx_cpu_start, + .cpu_started = scmi_imx_cpu_started, +}; + +static int scmi_imx_cpu_protocol_attributes_get(const struct scmi_protocol_handle *ph, + struct scmi_imx_cpu_info *info) +{ + struct scmi_msg_imx_cpu_protocol_attributes *attr; + struct scmi_xfer *t; + int ret; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, + sizeof(*attr), &t); + if (ret) + return ret; + + attr = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + info->nr_cpu = le32_get_bits(attr->attributes, SCMI_IMX_CPU_NR_CPU_MASK); + dev_info(ph->dev, "i.MX SM CPU: %d cpus\n", + info->nr_cpu); + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_cpu_attributes_get(const struct scmi_protocol_handle *ph, + u32 cpuid) +{ + struct scmi_msg_imx_cpu_attributes_out *out; + char name[SCMI_SHORT_NAME_MAX_SIZE] = {'\0'}; + struct scmi_xfer *t; + int ret; + + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_CPU_ATTRIBUTES, sizeof(u32), 0, &t); + if (ret) + return ret; + + put_unaligned_le32(cpuid, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + out = t->rx.buf; + strscpy(name, out->name, SCMI_SHORT_NAME_MAX_SIZE); + dev_info(ph->dev, "i.MX CPU: name: %s\n", name); + } else { + dev_err(ph->dev, "i.MX cpu: Failed to get info of cpu(%u)\n", cpuid); + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_cpu_protocol_init(const struct scmi_protocol_handle *ph) +{ + struct scmi_imx_cpu_info *info; + u32 version; + int ret, i; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_info(ph->dev, "NXP SM CPU Protocol Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + info = devm_kzalloc(ph->dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + ret = scmi_imx_cpu_protocol_attributes_get(ph, info); + if (ret) + return ret; + + for (i = 0; i < info->nr_cpu; i++) { + ret = scmi_imx_cpu_attributes_get(ph, i); + if (ret) + return ret; + } + + return ph->set_priv(ph, info, version); +} + +static const struct scmi_protocol scmi_imx_cpu = { + .id = SCMI_PROTOCOL_IMX_CPU, + .owner = THIS_MODULE, + .instance_init = &scmi_imx_cpu_protocol_init, + .ops = &scmi_imx_cpu_proto_ops, + .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION, + .vendor_id = SCMI_IMX_VENDOR, + .sub_vendor_id = SCMI_IMX_SUBVENDOR, +}; +module_scmi_protocol(scmi_imx_cpu); + +MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_CPU) "-" SCMI_IMX_VENDOR); +MODULE_DESCRIPTION("i.MX SCMI CPU driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c new file mode 100644 index 000000000000..b519c67fe920 --- /dev/null +++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-lmm.c @@ -0,0 +1,263 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System control and Management Interface (SCMI) NXP LMM Protocol + * + * Copyright 2025 NXP + */ + +#include <linux/bits.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/scmi_protocol.h> +#include <linux/scmi_imx_protocol.h> + +#include "../../protocols.h" +#include "../../notify.h" + +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000 + +enum scmi_imx_lmm_protocol_cmd { + SCMI_IMX_LMM_ATTRIBUTES = 0x3, + SCMI_IMX_LMM_BOOT = 0x4, + SCMI_IMX_LMM_RESET = 0x5, + SCMI_IMX_LMM_SHUTDOWN = 0x6, + SCMI_IMX_LMM_WAKE = 0x7, + SCMI_IMX_LMM_SUSPEND = 0x8, + SCMI_IMX_LMM_NOTIFY = 0x9, + SCMI_IMX_LMM_RESET_REASON = 0xA, + SCMI_IMX_LMM_POWER_ON = 0xB, + SCMI_IMX_LMM_RESET_VECTOR_SET = 0xC, +}; + +struct scmi_imx_lmm_priv { + u32 nr_lmm; +}; + +#define SCMI_IMX_LMM_NR_LM_MASK GENMASK(5, 0) +#define SCMI_IMX_LMM_NR_MAX 16 +struct scmi_msg_imx_lmm_protocol_attributes { + __le32 attributes; +}; + +struct scmi_msg_imx_lmm_attributes_out { + __le32 lmid; + __le32 attributes; + __le32 state; + __le32 errstatus; + u8 name[LMM_MAX_NAME]; +}; + +struct scmi_imx_lmm_reset_vector_set_in { + __le32 lmid; + __le32 cpuid; + __le32 flags; /* reserved for future extension */ + __le32 resetvectorlow; + __le32 resetvectorhigh; +}; + +struct scmi_imx_lmm_shutdown_in { + __le32 lmid; +#define SCMI_IMX_LMM_SHUTDOWN_GRACEFUL BIT(0) + __le32 flags; +}; + +static int scmi_imx_lmm_validate_lmid(const struct scmi_protocol_handle *ph, u32 lmid) +{ + struct scmi_imx_lmm_priv *priv = ph->get_priv(ph); + + if (lmid >= priv->nr_lmm) + return -EINVAL; + + return 0; +} + +static int scmi_imx_lmm_attributes(const struct scmi_protocol_handle *ph, + u32 lmid, struct scmi_imx_lmm_info *info) +{ + struct scmi_msg_imx_lmm_attributes_out *out; + struct scmi_xfer *t; + int ret; + + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_ATTRIBUTES, sizeof(u32), 0, &t); + if (ret) + return ret; + + put_unaligned_le32(lmid, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + out = t->rx.buf; + info->lmid = le32_to_cpu(out->lmid); + info->state = le32_to_cpu(out->state); + info->errstatus = le32_to_cpu(out->errstatus); + strscpy(info->name, out->name); + dev_dbg(ph->dev, "i.MX LMM: Logical Machine(%d), name: %s\n", + info->lmid, info->name); + } else { + dev_err(ph->dev, "i.MX LMM: Failed to get info of Logical Machine(%u)\n", lmid); + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int +scmi_imx_lmm_power_boot(const struct scmi_protocol_handle *ph, u32 lmid, bool boot) +{ + struct scmi_xfer *t; + u8 msg_id; + int ret; + + ret = scmi_imx_lmm_validate_lmid(ph, lmid); + if (ret) + return ret; + + if (boot) + msg_id = SCMI_IMX_LMM_BOOT; + else + msg_id = SCMI_IMX_LMM_POWER_ON; + + ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(u32), 0, &t); + if (ret) + return ret; + + put_unaligned_le32(lmid, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_lmm_reset_vector_set(const struct scmi_protocol_handle *ph, + u32 lmid, u32 cpuid, u32 flags, u64 vector) +{ + struct scmi_imx_lmm_reset_vector_set_in *in; + struct scmi_xfer *t; + int ret; + + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_RESET_VECTOR_SET, sizeof(*in), + 0, &t); + if (ret) + return ret; + + in = t->tx.buf; + in->lmid = cpu_to_le32(lmid); + in->cpuid = cpu_to_le32(cpuid); + in->flags = cpu_to_le32(0); + in->resetvectorlow = cpu_to_le32(lower_32_bits(vector)); + in->resetvectorhigh = cpu_to_le32(upper_32_bits(vector)); + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_lmm_shutdown(const struct scmi_protocol_handle *ph, u32 lmid, + u32 flags) +{ + struct scmi_imx_lmm_shutdown_in *in; + struct scmi_xfer *t; + int ret; + + ret = scmi_imx_lmm_validate_lmid(ph, lmid); + if (ret) + return ret; + + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_LMM_SHUTDOWN, sizeof(*in), + 0, &t); + if (ret) + return ret; + + in = t->tx.buf; + in->lmid = cpu_to_le32(lmid); + if (flags & SCMI_IMX_LMM_SHUTDOWN_GRACEFUL) + in->flags = cpu_to_le32(SCMI_IMX_LMM_SHUTDOWN_GRACEFUL); + else + in->flags = cpu_to_le32(0); + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static const struct scmi_imx_lmm_proto_ops scmi_imx_lmm_proto_ops = { + .lmm_power_boot = scmi_imx_lmm_power_boot, + .lmm_info = scmi_imx_lmm_attributes, + .lmm_reset_vector_set = scmi_imx_lmm_reset_vector_set, + .lmm_shutdown = scmi_imx_lmm_shutdown, +}; + +static int scmi_imx_lmm_protocol_attributes_get(const struct scmi_protocol_handle *ph, + struct scmi_imx_lmm_priv *priv) +{ + struct scmi_msg_imx_lmm_protocol_attributes *attr; + struct scmi_xfer *t; + int ret; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, + sizeof(*attr), &t); + if (ret) + return ret; + + attr = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + priv->nr_lmm = le32_get_bits(attr->attributes, SCMI_IMX_LMM_NR_LM_MASK); + if (priv->nr_lmm > SCMI_IMX_LMM_NR_MAX) { + dev_err(ph->dev, "i.MX LMM: %d:Exceed max supported Logical Machines\n", + priv->nr_lmm); + ret = -EINVAL; + } else { + dev_info(ph->dev, "i.MX LMM: %d Logical Machines\n", priv->nr_lmm); + } + } + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_imx_lmm_protocol_init(const struct scmi_protocol_handle *ph) +{ + struct scmi_imx_lmm_priv *info; + u32 version; + int ret; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_info(ph->dev, "NXP SM LMM Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + info = devm_kzalloc(ph->dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + ret = scmi_imx_lmm_protocol_attributes_get(ph, info); + if (ret) + return ret; + + return ph->set_priv(ph, info, version); +} + +static const struct scmi_protocol scmi_imx_lmm = { + .id = SCMI_PROTOCOL_IMX_LMM, + .owner = THIS_MODULE, + .instance_init = &scmi_imx_lmm_protocol_init, + .ops = &scmi_imx_lmm_proto_ops, + .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION, + .vendor_id = SCMI_IMX_VENDOR, + .sub_vendor_id = SCMI_IMX_SUBVENDOR, +}; +module_scmi_protocol(scmi_imx_lmm); + +MODULE_ALIAS("scmi-protocol-" __stringify(SCMI_PROTOCOL_IMX_LMM) "-" SCMI_IMX_VENDOR); +MODULE_DESCRIPTION("i.MX SCMI LMM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst index b2dfd6c46ca2..4e246a78a042 100644 --- a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst +++ b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst @@ -32,6 +32,518 @@ port, and deploy the SM on supported processors. The SM implements an interface compliant with the Arm SCMI Specification with additional vendor specific extensions. +System Control and Management Logical Machine Management Vendor Protocol +======================================================================== + +The SM adds the concept of logical machines (LMs). These are analogous to +VMs and each has its own instance of SCMI. All normal SCMI calls only apply +the LM running the calling agent. That includes boot, shutdown, reset, +suspend, wake, etc. If a caller makes the SCMI base call to get a list +of agents, it will only get those on that LM. Each LM is completely isolated +from the others. This is mandatory for these to operate independently. + +This protocol is intended to support boot, shutdown, and reset of other logical +machines (LM). It is usually used to allow one LM(e.g. OSPM) to manage +another LM which is usually an offload or accelerator engine. Notifications +from this protocol can also be used to manage a communication link to another +LM. The LMM protocol provides commands to: + +- Describe the protocol version. +- Discover implementation attributes. +- Discover all the LMs defined in the system. +- Boot a target LM. +- Shutdown a target LM (gracefully or forcibly). +- Reset a target LM (gracefully or forcibly). +- Wake a target LM from suspend. +- Suspend a target LM (gracefully). +- Read boot/shutdown/reset information for a target LM. +- Get notifications when a target LM boots or shuts down (e.g. LM 'X' requested + notification of LM 'Y' boots or shuts down, when LM 'Y' boots or shuts down, + SCMI firmware will send notification to LM 'X'). + +'Graceful' means asking LM itself to shutdown/reset/etc (e.g. sending +notification to Linux, Then Linux reboots or powers down itself). It is async +command that the SUCCESS of the command just means the command successfully +return, not means reboot/reset successfully finished. + +'Forceful' means the SM will force shutdown/reset/etc the LM. It is sync +command that the SUCCESS of the command means the LM has been successfully +shutdown/reset/etc. +If the commands not have Graceful/Forceful flag settings, such as WAKE, SUSEND, +it is a Graceful command. + +Commands: +_________ + +PROTOCOL_VERSION +~~~~~~~~~~~~~~~~ + +message_id: 0x0 +protocol_id: 0x80 +This command is mandatory. + ++---------------+--------------------------------------------------------------+ +|Return values | ++---------------+--------------------------------------------------------------+ +|Name |Description | ++---------------+--------------------------------------------------------------+ +|int32 status | See ARM SCMI Specification for status code definitions. | ++---------------+--------------------------------------------------------------+ +|uint32 version | For this revision of the specification, this value must be | +| | 0x10000. | ++---------------+--------------------------------------------------------------+ + +PROTOCOL_ATTRIBUTES +~~~~~~~~~~~~~~~~~~~ + +message_id: 0x1 +protocol_id: 0x80 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status | See ARM SCMI Specification for status code definitions. | ++------------------+-----------------------------------------------------------+ +|uint32 attributes |Protocol attributes: | +| |Bits[31:5] Reserved, must be zero. | +| |Bits[4:0] Number of Logical Machines | +| |Note that due to both hardware limitations and reset reason| +| |field limitations, the max number of LM is 16. The minimum | +| |is 1. | ++------------------+-----------------------------------------------------------+ + +PROTOCOL_MESSAGE_ATTRIBUTES +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +message_id: 0x2 +protocol_id: 0x80 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: in case the message is implemented and available | +| |to use. | +| |NOT_FOUND: if the message identified by message_id is | +| |invalid or not implemented | ++------------------+-----------------------------------------------------------+ +|uint32 attributes |Flags that are associated with a specific command in the | +| |protocol. For all commands in this protocol, this | +| |parameter has a value of 0 | ++------------------+-----------------------------------------------------------+ + +LMM_ATTRIBUTES +~~~~~~~~~~~~~~ + +message_id: 0x3 +protocol_id: 0x80 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 lmid |ID of the Logical Machine | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if valid attributes are returned. | +| |NOT_FOUND: if lmid not points to a valid logical machine. | +| |DENIED: if the agent does not have permission to get info | +| |for the LM specified by lmid. | ++------------------+-----------------------------------------------------------+ +|uint32 lmid |Identifier of the LM whose identification is requested. | +| |This field is: Populated with the lmid of the calling | +| |agent, when the lmid parameter passed via the command is | +| |0xFFFFFFFF. Identical to the lmid field passed via the | +| |calling parameters, in all other cases | ++------------------+-----------------------------------------------------------+ +|uint32 attributes | Bits[31:0] reserved. must be zero | ++------------------+-----------------------------------------------------------+ +|uint32 state | Current state of the LM | ++------------------+-----------------------------------------------------------+ +|uint32 errStatus | Last error status recorded | ++------------------+-----------------------------------------------------------+ +|char name[16] | A NULL terminated ASCII string with the LM name, of up | +| | to 16 bytes | ++------------------+-----------------------------------------------------------+ + +LMM_BOOT +~~~~~~~~ + +message_id: 0x4 +protocol_id: 0x80 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 lmid |ID of the Logical Machine | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if LM boots successfully started. | +| |NOT_FOUND: if lmid not points to a valid logical machine. | +| |INVALID_PARAMETERS: if lmid is same as the caller. | +| |DENIED: if the agent does not have permission to manage the| +| |the LM specified by lmid. | ++------------------+-----------------------------------------------------------+ + +LMM_RESET +~~~~~~~~~ + +message_id: 0x5 +protocol_id: 0x80 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 lmid |ID of the Logical Machine | ++------------------+-----------------------------------------------------------+ +|uint32 flags |Reset flags: | +| |Bits[31:1] Reserved, must be zero. | +| |Bit[0] Graceful request: | +| |Set to 1 if the request is a graceful request. | +| |Set to 0 if the request is a forceful request. | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: The LMM RESET command finished successfully in | +| |graceful reset or LM successfully resets in forceful reset.| +| |NOT_FOUND: if lmid not points to a valid logical machine. | +| |INVALID_PARAMETERS: if lmid is same as the caller. | +| |DENIED: if the agent does not have permission to manage the| +| |the LM specified by lmid. | ++------------------+-----------------------------------------------------------+ + +LMM_SHUTDOWN +~~~~~~~~~~~~ + +message_id: 0x6 +protocol_id: 0x80 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 lmid |ID of the Logical Machine | ++------------------+-----------------------------------------------------------+ +|uint32 flags |Reset flags: | +| |Bits[31:1] Reserved, must be zero. | +| |Bit[0] Graceful request: | +| |Set to 1 if the request is a graceful request. | +| |Set to 0 if the request is a forceful request. | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: The LMM shutdown command finished successfully in | +| |graceful request or LM successfully shutdown in forceful | +| |request. | +| |NOT_FOUND: if lmid not points to a valid logical machine. | +| |INVALID_PARAMETERS: if lmid is same as the caller. | +| |DENIED: if the agent does not have permission to manage the| +| |the LM specified by lmid. | ++------------------+-----------------------------------------------------------+ + +LMM_WAKE +~~~~~~~~ + +message_id: 0x7 +protocol_id: 0x80 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 lmid |ID of the Logical Machine | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if LM wake command successfully returns. | +| |NOT_FOUND: if lmid not points to a valid logical machine. | +| |INVALID_PARAMETERS: if lmid is same as the caller. | +| |DENIED: if the agent does not have permission to manage the| +| |the LM specified by lmid. | ++------------------+-----------------------------------------------------------+ + +LMM_SUSPEND +~~~~~~~~~~~ + +message_id: 0x8 +protocol_id: 0x80 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 lmid |ID of the Logical Machine | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if LM suspend command successfully returns. | +| |NOT_FOUND: if lmid not points to a valid logical machine. | +| |INVALID_PARAMETERS: if lmid is same as the caller. | +| |DENIED: if the agent does not have permission to manage the| +| |the LM specified by lmid. | ++------------------+-----------------------------------------------------------+ + +LMM_NOTIFY +~~~~~~~~~~ + +message_id: 0x9 +protocol_id: 0x80 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 lmid |ID of the Logical Machine | ++------------------+-----------------------------------------------------------+ +|uint32 flags |Notification flags: | +| |Bits[31:3] Reserved, must be zero. | +| |Bit[3] Wake (resume) notification: | +| |Set to 1 to send notification. | +| |Set to 0 if no notification. | +| |Bit[2] Suspend (sleep) notification: | +| |Set to 1 to send notification. | +| |Set to 0 if no notification. | +| |Bit[1] Shutdown (off) notification: | +| |Set to 1 to send notification. | +| |Set to 0 if no notification. | +| |Bit[0] Boot (on) notification: | +| |Set to 1 to send notification. | +| |Set to 0 if no notification | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the notification state successfully updated. | +| |NOT_FOUND: if lmid not points to a valid logical machine. | +| |INVALID_PARAMETERS: if input attributes flag specifies | +| |unsupported or invalid configurations. | +| |DENIED: if the agent does not have permission to request | +| |the notification. | ++------------------+-----------------------------------------------------------+ + +LMM_RESET_REASON +~~~~~~~~~~~~~~~~ + +message_id: 0xA +protocol_id: 0x80 +This command is mandatory. + +This command is to return the reset reason that caused the last reset, such as +POR, WDOG, JTAG and etc. + ++---------------------+--------------------------------------------------------+ +|Parameters | ++---------------------+--------------------------------------------------------+ +|Name |Description | ++---------------------+--------------------------------------------------------+ +|uint32 lmid |ID of the Logical Machine | ++---------------------+--------------------------------------------------------+ +|Return values | ++---------------------+--------------------------------------------------------+ +|Name |Description | ++---------------------+--------------------------------------------------------+ +|int32 status |SUCCESS: if the reset reason of the LM successfully | +| |updated. | +| |NOT_FOUND: if lmid not points to a valid logical machine| +| |DENIED: if the agent does not have permission to request| +| |the reset reason. | ++---------------------+--------------------------------------------------------+ +|uint32 bootflags |Boot reason flags. This parameter has the format: | +| |Bits[31] Valid. | +| |Set to 1 if the entire reason is valid. | +| |Set to 0 if the entire reason is not valid. | +| |Bits[30:29] Reserved, must be zero. | +| |Bit[28] Valid origin: | +| |Set to 1 if the origin field is valid. | +| |Set to 0 if the origin field is not valid. | +| |Bits[27:24] Origin. | +| |Logical Machine(LM) ID that causes the BOOT of this LM | +| |Bit[23] Valid err ID: | +| |Set to 1 if the error ID field is valid. | +| |Set to 0 if the error ID field is not valid. | +| |Bits[22:8] Error ID(Agent ID of the system). | +| |Bit[7:0] Reason(WDOG, POR, FCCU and etc): | +| |See the SRESR register description in the System | +| |Reset Controller (SRC) section in SoC reference mannual | +| |One reason maps to BIT(reason) in SRESR | ++---------------------+--------------------------------------------------------+ +|uint32 shutdownflags |Shutdown reason flags. This parameter has the format: | +| |Bits[31] Valid. | +| |Set to 1 if the entire reason is valid. | +| |Set to 0 if the entire reason is not valid. | +| |Bits[30:29] Number of valid extended info words. | +| |Bit[28] Valid origin: | +| |Set to 1 if the origin field is valid. | +| |Set to 0 if the origin field is not valid. | +| |Bits[27:24] Origin. | +| |Logical Machine(LM) ID that causes the BOOT of this LM | +| |Bit[23] Valid err ID: | +| |Set to 1 if the error ID field is valid. | +| |Set to 0 if the error ID field is not valid. | +| |Bits[22:8] Error ID(Agent ID of the System). | +| |Bit[7:0] Reason | +| |See the SRESR register description in the System | +| |Reset Controller (SRC) section in SoC reference mannual | +| |One reason maps to BIT(reason) in SRESR | ++---------------------+--------------------------------------------------------+ +|uint32 extinfo[3] |Array of extended info words(e.g. fault pc) | ++---------------------+--------------------------------------------------------+ + +LMM_POWER_ON +~~~~~~~~~~~~ + +message_id: 0xB +protocol_id: 0x80 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 lmid |ID of the Logical Machine | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if LM successfully powers on. | +| |NOT_FOUND: if lmid not points to a valid logical machine. | +| |INVALID_PARAMETERS: if lmid is same as the caller. | +| |DENIED: if the agent does not have permission to manage the| +| |the LM specified by lmid. | ++------------------+-----------------------------------------------------------+ + +LMM_RESET_VECTOR_SET +~~~~~~~~~~~~~~~~~~~~ + +message_id: 0xC +protocol_id: 0x80 +This command is mandatory. + ++-----------------------+------------------------------------------------------+ +|Parameters | ++-----------------------+------------------------------------------------------+ +|Name |Description | ++-----------------------+------------------------------------------------------+ +|uint32 lmid |ID of the Logical Machine | ++-----------------------+------------------------------------------------------+ +|uint32 cpuid |ID of the CPU inside the LM | ++-----------------------+------------------------------------------------------+ +|uint32 flags |Reset vector flags | +| |Bits[31:0] Reserved, must be zero. | ++-----------------------+------------------------------------------------------+ +|uint32 resetVectorLow |Lower vector | ++-----------------------+------------------------------------------------------+ +|uint32 resetVectorHigh |Higher vector | ++-----------------------+------------------------------------------------------+ +|Return values | ++-----------------------+------------------------------------------------------+ +|Name |Description | ++-----------------------+------------------------------------------------------+ +|int32 status |SUCCESS: If reset vector is set successfully. | +| |NOT_FOUND: if lmid not points to a valid logical | +| |machine, or cpuId is not valid. | +| |INVALID_PARAMETERS: if reset vector is invalid. | +| |DENIED: if the agent does not have permission to set | +| |the reset vector for the CPU in the LM. | ++-----------------------+------------------------------------------------------+ + +NEGOTIATE_PROTOCOL_VERSION +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +message_id: 0x10 +protocol_id: 0x80 +This command is mandatory. + ++--------------------+---------------------------------------------------------+ +|Parameters | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|uint32 version |The negotiated protocol version the agent intends to use | ++--------------------+---------------------------------------------------------+ +|Return values | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|int32 status |SUCCESS: if the negotiated protocol version is supported | +| |by the platform. All commands, responses, and | +| |notifications post successful return of this command must| +| |comply with the negotiated version. | +| |NOT_SUPPORTED: if the protocol version is not supported. | ++--------------------+---------------------------------------------------------+ + +Notifications +_____________ + +LMM_EVENT +~~~~~~~~~ + +message_id: 0x0 +protocol_id: 0x80 + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 lmid |Identifier for the LM that caused the transition. | ++------------------+-----------------------------------------------------------+ +|uint32 eventlm |Identifier of the LM this event refers to. | ++------------------+-----------------------------------------------------------+ +|uint32 flags |LM events: | +| |Bits[31:3] Reserved, must be zero. | +| |Bit[3] Wake (resume) event: | +| |1 LM has awakened. | +| |0 not a wake event. | +| |Bit[2] Suspend (sleep) event: | +| |1 LM has suspended. | +| |0 not a suspend event. | +| |Bit[1] Shutdown (off) event: | +| |1 LM has shutdown. | +| |0 not a shutdown event. | +| |Bit[0] Boot (on) event: | +| |1 LM has booted. | +| |0 not a boot event. | ++------------------+-----------------------------------------------------------+ + SCMI_BBM: System Control and Management BBM Vendor Protocol ============================================================== @@ -436,6 +948,322 @@ protocol_id: 0x81 | |0 no button change detected. | +------------------+-----------------------------------------------------------+ +System Control and Management CPU Vendor Protocol +================================================= + +This protocol allows an agent to start or stop a CPU. It is used to manage +auxiliary CPUs in a target LM (e.g. additional cores in an AP cluster or +Cortex-M cores). +Note: + - For cores in AP cluster, PSCI should be used and PSCI firmware will use CPU + protocol to handle them. For cores in non-AP cluster, Operating System(e.g. + Linux OS) could use CPU protocols to control Cortex-M7 cores. + - CPU indicates the core and its auxiliary peripherals(e.g. TCM) inside + i.MX SoC + +There are cases where giving an agent full control of a CPU via the CPU +protocol is not desired. The LMM protocol is more restricted to just boot, +shutdown, etc. So an agent might boot another logical machine but not be +able to directly mess the state of its CPUs. Its also the reason there is an +LMM power on command even though that could have been done through the +power protocol. + +The CPU protocol provides commands to: + +- Describe the protocol version. +- Discover implementation attributes. +- Discover the CPUs defined in the system. +- Start a CPU. +- Stop a CPU. +- Set the boot and resume addresses for a CPU. +- Set the sleep mode of a CPU. +- Configure wake-up sources for a CPU. +- Configure power domain reactions (LPM mode and retention mask) for a CPU. +- The CPU IDs can be found in the CPU section of the SoC DEVICE: SM Device + Interface. They can also be found in the SoC RM. See the CPU Mode Control + (CMC) list in General Power Controller (GPC) section. + +CPU settings are not aggregated and setting their state is normally exclusive +to one client. + +Commands: +_________ + +PROTOCOL_VERSION +~~~~~~~~~~~~~~~~ + +message_id: 0x0 +protocol_id: 0x82 +This command is mandatory. + ++---------------+--------------------------------------------------------------+ +|Return values | ++---------------+--------------------------------------------------------------+ +|Name |Description | ++---------------+--------------------------------------------------------------+ +|int32 status | See ARM SCMI Specification for status code definitions. | ++---------------+--------------------------------------------------------------+ +|uint32 version | For this revision of the specification, this value must be | +| | 0x10000. | ++---------------+--------------------------------------------------------------+ + +PROTOCOL_ATTRIBUTES +~~~~~~~~~~~~~~~~~~~ + +message_id: 0x1 +protocol_id: 0x82 +This command is mandatory. + ++---------------+--------------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status | See ARM SCMI Specification for status code definitions. | ++------------------+-----------------------------------------------------------+ +|uint32 attributes |Protocol attributes: | +| |Bits[31:16] Reserved, must be zero. | +| |Bits[15:0] Number of CPUs | ++------------------+-----------------------------------------------------------+ + +PROTOCOL_MESSAGE_ATTRIBUTES +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +message_id: 0x2 +protocol_id: 0x82 +This command is mandatory. + ++---------------+--------------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: in case the message is implemented and available | +| |to use. | +| |NOT_FOUND: if the message identified by message_id is | +| |invalid or not implemented | ++------------------+-----------------------------------------------------------+ +|uint32 attributes |Flags that are associated with a specific command in the | +| |protocol. For all commands in this protocol, this | +| |parameter has a value of 0 | ++------------------+-----------------------------------------------------------+ + +CPU_ATTRIBUTES +~~~~~~~~~~~~~~ + +message_id: 0x4 +protocol_id: 0x82 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 cpuid |Identifier for the CPU | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if valid attributes are returned successfully. | +| |NOT_FOUND: if the cpuid is not valid. | ++------------------+-----------------------------------------------------------+ +|uint32 attributes |Bits[31:0] Reserved, must be zero | ++------------------+-----------------------------------------------------------+ +|char name[16] |NULL terminated ASCII string with CPU name up to 16 bytes | ++------------------+-----------------------------------------------------------+ + +CPU_START +~~~~~~~~~ + +message_id: 0x4 +protocol_id: 0x82 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 cpuid |Identifier for the CPU | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the cpu is started successfully. | +| |NOT_FOUND: if cpuid is not valid. | +| |DENIED: the calling agent is not allowed to start this CPU.| ++------------------+-----------------------------------------------------------+ + +CPU_STOP +~~~~~~~~ + +message_id: 0x5 +protocol_id: 0x82 +This command is mandatory. + ++------------------+-----------------------------------------------------------+ +|Parameters | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|uint32 cpuid |Identifier for the CPU | ++------------------+-----------------------------------------------------------+ +|Return values | ++------------------+-----------------------------------------------------------+ +|Name |Description | ++------------------+-----------------------------------------------------------+ +|int32 status |SUCCESS: if the cpu is started successfully. | +| |NOT_FOUND: if cpuid is not valid. | +| |DENIED: the calling agent is not allowed to stop this CPU. | ++------------------+-----------------------------------------------------------+ + +CPU_RESET_VECTOR_SET +~~~~~~~~~~~~~~~~~~~~ + +message_id: 0x6 +protocol_id: 0x82 +This command is mandatory. + ++----------------------+-------------------------------------------------------+ +|Parameters | ++----------------------+-------------------------------------------------------+ +|Name |Description | ++----------------------+-------------------------------------------------------+ +|uint32 cpuid |Identifier for the CPU | ++----------------------+-------------------------------------------------------+ +|uint32 flags |Reset vector flags: | +| |Bit[31] Resume flag. | +| |Set to 1 to update the reset vector used on resume. | +| |Bit[30] Boot flag. | +| |Set to 1 to update the reset vector used for boot. | +| |Bits[29:1] Reserved, must be zero. | +| |Bit[0] Table flag. | +| |Set to 1 if vector is the vector table base address. | ++----------------------+-------------------------------------------------------+ +|uint32 resetVectorLow |Lower vector: | +| |If bit[0] of flags is 0, the lower 32 bits of the | +| |physical address where the CPU should execute from on | +| |reset. If bit[0] of flags is 1, the lower 32 bits of | +| |the vector table base address | ++----------------------+-------------------------------------------------------+ +|uint32 resetVectorhigh|Upper vector: | +| |If bit[0] of flags is 0, the upper 32 bits of the | +| |physical address where the CPU should execute from on | +| |reset. If bit[0] of flags is 1, the upper 32 bits of | +| |the vector table base address | ++----------------------+-------------------------------------------------------+ +|Return values | ++----------------------+-------------------------------------------------------+ +|Name |Description | ++----------------------+-------------------------------------------------------+ +|int32 status |SUCCESS: if the CPU reset vector is set successfully. | +| |NOT_FOUND: if cpuId does not point to a valid CPU. | +| |INVALID_PARAMETERS: the requested vector type is not | +| |supported by this CPU. | +| |DENIED: the calling agent is not allowed to set the | +| |reset vector of this CPU | ++----------------------+-------------------------------------------------------+ + +CPU_SLEEP_MODE_SET +~~~~~~~~~~~~~~~~~~ + +message_id: 0x7 +protocol_id: 0x82 +This command is mandatory. + ++----------------------+-------------------------------------------------------+ +|Parameters | ++----------------------+-------------------------------------------------------+ +|Name |Description | ++----------------------+-------------------------------------------------------+ +|uint32 cpuid |Identifier for the CPU | ++----------------------+-------------------------------------------------------+ +|uint32 flags |Sleep mode flags: | +| |Bits[31:1] Reserved, must be zero. | +| |Bit[0] IRQ mux: | +| |If set to 1 the wakeup mux source is the GIC, else if 0| +| |then the GPC | ++----------------------+-------------------------------------------------------+ +|uint32 sleepmode |target sleep mode. When CPU runs into WFI, the GPC mode| +| |will be triggered to be in below modes: | +| |RUN: (0) | +| |WAIT: (1) | +| |STOP: (2) | +| |SUSPEND: (3) | ++----------------------+-------------------------------------------------------+ +|Return values | ++----------------------+-------------------------------------------------------+ +|Name |Description | ++----------------------+-------------------------------------------------------+ +|int32 status |SUCCESS: if the CPU sleep mode is set successfully. | +| |NOT_FOUND: if cpuId does not point to a valid CPU. | +| |INVALID_PARAMETERS: the sleepmode or flags is invalid. | +| |DENIED: the calling agent is not allowed to configure | +| |the CPU | ++----------------------+-------------------------------------------------------+ + +CPU_INFO_GET +~~~~~~~~~~~~ + +message_id: 0xC +protocol_id: 0x82 +This command is mandatory. + ++----------------------+-------------------------------------------------------+ +|Parameters | ++----------------------+-------------------------------------------------------+ +|Name |Description | ++----------------------+-------------------------------------------------------+ +|uint32 cpuid |Identifier for the CPU | ++----------------------+-------------------------------------------------------+ +|Return values | ++----------------------+-------------------------------------------------------+ +|Name |Description | ++----------------------+-------------------------------------------------------+ +|int32 status |SUCCESS: if valid attributes are returned successfully.| +| |NOT_FOUND: if the cpuid is not valid. | ++----------------------+-------------------------------------------------------+ +|uint32 runmode |Run mode for the CPU | +| |RUN(0):cpu started | +| |HOLD(1):cpu powered up and reset asserted | +| |STOP(2):cpu reseted and hold cpu | +| |SUSPEND(3):in cpuidle state | ++----------------------+-------------------------------------------------------+ +|uint32 sleepmode |Sleep mode for the CPU, see CPU_SLEEP_MODE_SET | ++----------------------+-------------------------------------------------------+ +|uint32 resetvectorlow |Reset vector low 32 bits for the CPU | ++----------------------+-------------------------------------------------------+ +|uint32 resetvecothigh |Reset vector high 32 bits for the CPU | ++----------------------+-------------------------------------------------------+ + +NEGOTIATE_PROTOCOL_VERSION +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +message_id: 0x10 +protocol_id: 0x82 +This command is mandatory. + ++--------------------+---------------------------------------------------------+ +|Parameters | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|uint32 version |The negotiated protocol version the agent intends to use | ++--------------------+---------------------------------------------------------+ +|Return values | ++--------------------+---------------------------------------------------------+ +|Name |Description | ++--------------------+---------------------------------------------------------+ +|int32 status |SUCCESS: if the negotiated protocol version is supported | +| |by the platform. All commands, responses, and | +| |notifications post successful return of this command must| +| |comply with the negotiated version. | +| |NOT_SUPPORTED: if the protocol version is not supported. | ++--------------------+---------------------------------------------------------+ + SCMI_MISC: System Control and Management MISC Vendor Protocol ================================================================ diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 5fe61b9ab5f9..db8c5c03d3a2 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -281,6 +281,30 @@ config EFI_EMBEDDED_FIRMWARE bool select CRYPTO_LIB_SHA256 +config EFI_SBAT + def_bool y if EFI_SBAT_FILE!="" + +config EFI_SBAT_FILE + string "Embedded SBAT section file path" + depends on EFI_ZBOOT + help + SBAT section provides a way to improve SecureBoot revocations of UEFI + binaries by introducing a generation-based mechanism. With SBAT, older + UEFI binaries can be prevented from booting by bumping the minimal + required generation for the specific component in the bootloader. + + Note: SBAT information is distribution specific, i.e. the owner of the + signing SecureBoot certificate must define the SBAT policy. Linux + kernel upstream does not define SBAT components and their generations. + + See https://github.com/rhboot/shim/blob/main/SBAT.md for the additional + details. + + Specify a file with SBAT data which is going to be embedded as '.sbat' + section into the kernel. + + If unsure, leave blank. + endmenu config UEFI_CPER diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot index 48842b5c106b..92e3c73502ba 100644 --- a/drivers/firmware/efi/libstub/Makefile.zboot +++ b/drivers/firmware/efi/libstub/Makefile.zboot @@ -44,6 +44,10 @@ AFLAGS_zboot-header.o += -DMACHINE_TYPE=IMAGE_FILE_MACHINE_$(EFI_ZBOOT_MACH_TYPE $(obj)/zboot-header.o: $(srctree)/drivers/firmware/efi/libstub/zboot-header.S FORCE $(call if_changed_rule,as_o_S) +ifneq ($(CONFIG_EFI_SBAT_FILE),) +$(obj)/zboot-header.o: $(CONFIG_EFI_SBAT_FILE) +endif + ZBOOT_DEPS := $(obj)/zboot-header.o $(objtree)/drivers/firmware/efi/libstub/lib.a LDFLAGS_vmlinuz.efi.elf := -T $(srctree)/drivers/firmware/efi/libstub/zboot.lds diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index fd6dc790c5a8..7aa2f9ad2935 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -601,6 +601,7 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image, * @image: EFI loaded image protocol * @soft_limit: preferred address for loading the initrd * @hard_limit: upper limit address for loading the initrd + * @out: pointer to store the address of the initrd table * * Return: status code */ diff --git a/drivers/firmware/efi/libstub/zboot-header.S b/drivers/firmware/efi/libstub/zboot-header.S index fb676ded47fa..b6431edd0fc9 100644 --- a/drivers/firmware/efi/libstub/zboot-header.S +++ b/drivers/firmware/efi/libstub/zboot-header.S @@ -4,17 +4,17 @@ #ifdef CONFIG_64BIT .set .Lextra_characteristics, 0x0 - .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32PLUS + .set .Lpe_opt_magic, IMAGE_NT_OPTIONAL_HDR64_MAGIC #else .set .Lextra_characteristics, IMAGE_FILE_32BIT_MACHINE - .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32 + .set .Lpe_opt_magic, IMAGE_NT_OPTIONAL_HDR32_MAGIC #endif .section ".head", "a" .globl __efistub_efi_zboot_header __efistub_efi_zboot_header: .Ldoshdr: - .long MZ_MAGIC + .long IMAGE_DOS_SIGNATURE .ascii "zimg" // image type .long __efistub__gzdata_start - .Ldoshdr // payload offset .long __efistub__gzdata_size - ZBOOT_SIZE_LEN // payload size @@ -25,7 +25,7 @@ __efistub_efi_zboot_header: .long .Lpehdr - .Ldoshdr // PE header offset .Lpehdr: - .long PE_MAGIC + .long IMAGE_NT_SIGNATURE .short MACHINE_TYPE .short .Lsection_count .long 0 @@ -63,7 +63,7 @@ __efistub_efi_zboot_header: .long .Lefi_header_end - .Ldoshdr .long 0 .short IMAGE_SUBSYSTEM_EFI_APPLICATION - .short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT + .short IMAGE_DLLCHARACTERISTICS_NX_COMPAT #ifdef CONFIG_64BIT .quad 0, 0, 0, 0 #else @@ -123,11 +123,29 @@ __efistub_efi_zboot_header: IMAGE_SCN_MEM_READ | \ IMAGE_SCN_MEM_EXECUTE +#ifdef CONFIG_EFI_SBAT + .ascii ".sbat\0\0\0" + .long __sbat_size + .long _sbat - .Ldoshdr + .long __sbat_size + .long _sbat - .Ldoshdr + + .long 0, 0 + .short 0, 0 + .long IMAGE_SCN_CNT_INITIALIZED_DATA | \ + IMAGE_SCN_MEM_READ | \ + IMAGE_SCN_MEM_DISCARDABLE + + .pushsection ".sbat", "a", @progbits + .incbin CONFIG_EFI_SBAT_FILE + .popsection +#endif + .ascii ".data\0\0\0" .long __data_size - .long _etext - .Ldoshdr + .long _data - .Ldoshdr .long __data_rawsize - .long _etext - .Ldoshdr + .long _data - .Ldoshdr .long 0, 0 .short 0, 0 diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds index 9ecc57ff5b45..c3a166675450 100644 --- a/drivers/firmware/efi/libstub/zboot.lds +++ b/drivers/firmware/efi/libstub/zboot.lds @@ -29,7 +29,17 @@ SECTIONS . = _etext; } +#ifdef CONFIG_EFI_SBAT + .sbat : ALIGN(4096) { + _sbat = .; + *(.sbat) + _esbat = ALIGN(4096); + . = _esbat; + } +#endif + .data : ALIGN(4096) { + _data = .; *(.data* .init.data*) _edata = ALIGN(512); . = _edata; @@ -52,3 +62,4 @@ PROVIDE(__efistub__gzdata_size = PROVIDE(__data_rawsize = ABSOLUTE(_edata - _etext)); PROVIDE(__data_size = ABSOLUTE(_end - _etext)); +PROVIDE(__sbat_size = ABSOLUTE(_esbat - _sbat)); diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c index 34109fd86c55..f1c04d7cfd71 100644 --- a/drivers/firmware/efi/memmap.c +++ b/drivers/firmware/efi/memmap.c @@ -43,7 +43,8 @@ int __init __efi_memmap_init(struct efi_memory_map_data *data) map.map = early_memremap(phys_map, data->size); if (!map.map) { - pr_err("Could not map the memory map!\n"); + pr_err("Could not map the memory map! phys_map=%pa, size=0x%lx\n", + &phys_map, data->size); return -ENOMEM; } diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c index 9e2628728aad..77b5f7ac3e20 100644 --- a/drivers/firmware/efi/test/efi_test.c +++ b/drivers/firmware/efi/test/efi_test.c @@ -361,6 +361,10 @@ static long efi_runtime_get_waketime(unsigned long arg) getwakeuptime.enabled)) return -EFAULT; + if (getwakeuptime.pending && put_user(pending, + getwakeuptime.pending)) + return -EFAULT; + if (getwakeuptime.time) { if (copy_to_user(getwakeuptime.time, &efi_time, sizeof(efi_time_t))) diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig index c964f4924359..127ad752acf8 100644 --- a/drivers/firmware/imx/Kconfig +++ b/drivers/firmware/imx/Kconfig @@ -23,6 +23,28 @@ config IMX_SCU This driver manages the IPC interface between host CPU and the SCU firmware running on M4. +config IMX_SCMI_CPU_DRV + tristate "IMX SCMI CPU Protocol driver" + depends on ARCH_MXC || COMPILE_TEST + default y if ARCH_MXC + help + The System Controller Management Interface firmware (SCMI FW) is + a low-level system function which runs on a dedicated Cortex-M + core that could provide cpu management features. + + This driver can also be built as a module. + +config IMX_SCMI_LMM_DRV + tristate "IMX SCMI LMM Protocol driver" + depends on ARCH_MXC || COMPILE_TEST + default y if ARCH_MXC + help + The System Controller Management Interface firmware (SCMI FW) is + a low-level system function which runs on a dedicated Cortex-M + core that could provide Logical Machine management features. + + This driver can also be built as a module. + config IMX_SCMI_MISC_DRV tristate "IMX SCMI MISC Protocol driver" depends on ARCH_MXC || COMPILE_TEST diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile index 8d046c341be8..3bbaffa6e347 100644 --- a/drivers/firmware/imx/Makefile +++ b/drivers/firmware/imx/Makefile @@ -1,4 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_IMX_DSP) += imx-dsp.o obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o rm.o imx-scu-soc.o +obj-${CONFIG_IMX_SCMI_CPU_DRV} += sm-cpu.o obj-${CONFIG_IMX_SCMI_MISC_DRV} += sm-misc.o +obj-${CONFIG_IMX_SCMI_LMM_DRV} += sm-lmm.o diff --git a/drivers/firmware/imx/sm-cpu.c b/drivers/firmware/imx/sm-cpu.c new file mode 100644 index 000000000000..091b014f739f --- /dev/null +++ b/drivers/firmware/imx/sm-cpu.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2025 NXP + */ + +#include <linux/firmware/imx/sm.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/scmi_protocol.h> +#include <linux/scmi_imx_protocol.h> + +static const struct scmi_imx_cpu_proto_ops *imx_cpu_ops; +static struct scmi_protocol_handle *ph; + +int scmi_imx_cpu_reset_vector_set(u32 cpuid, u64 vector, bool start, bool boot, + bool resume) +{ + if (!ph) + return -EPROBE_DEFER; + + return imx_cpu_ops->cpu_reset_vector_set(ph, cpuid, vector, start, + boot, resume); +} +EXPORT_SYMBOL(scmi_imx_cpu_reset_vector_set); + +int scmi_imx_cpu_start(u32 cpuid, bool start) +{ + if (!ph) + return -EPROBE_DEFER; + + if (start) + return imx_cpu_ops->cpu_start(ph, cpuid, true); + + return imx_cpu_ops->cpu_start(ph, cpuid, false); +}; +EXPORT_SYMBOL(scmi_imx_cpu_start); + +int scmi_imx_cpu_started(u32 cpuid, bool *started) +{ + if (!ph) + return -EPROBE_DEFER; + + if (!started) + return -EINVAL; + + return imx_cpu_ops->cpu_started(ph, cpuid, started); +}; +EXPORT_SYMBOL(scmi_imx_cpu_started); + +static int scmi_imx_cpu_probe(struct scmi_device *sdev) +{ + const struct scmi_handle *handle = sdev->handle; + + if (!handle) + return -ENODEV; + + if (imx_cpu_ops) { + dev_err(&sdev->dev, "sm cpu already initialized\n"); + return -EEXIST; + } + + imx_cpu_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_CPU, &ph); + if (IS_ERR(imx_cpu_ops)) + return PTR_ERR(imx_cpu_ops); + + return 0; +} + +static const struct scmi_device_id scmi_id_table[] = { + { SCMI_PROTOCOL_IMX_CPU, "imx-cpu" }, + { }, +}; +MODULE_DEVICE_TABLE(scmi, scmi_id_table); + +static struct scmi_driver scmi_imx_cpu_driver = { + .name = "scmi-imx-cpu", + .probe = scmi_imx_cpu_probe, + .id_table = scmi_id_table, +}; +module_scmi_driver(scmi_imx_cpu_driver); + +MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>"); +MODULE_DESCRIPTION("IMX SM CPU driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/imx/sm-lmm.c b/drivers/firmware/imx/sm-lmm.c new file mode 100644 index 000000000000..6807bf563c03 --- /dev/null +++ b/drivers/firmware/imx/sm-lmm.c @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2025 NXP + */ + +#include <linux/firmware/imx/sm.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/scmi_protocol.h> +#include <linux/scmi_imx_protocol.h> + +static const struct scmi_imx_lmm_proto_ops *imx_lmm_ops; +static struct scmi_protocol_handle *ph; + +int scmi_imx_lmm_info(u32 lmid, struct scmi_imx_lmm_info *info) +{ + if (!ph) + return -EPROBE_DEFER; + + if (!info) + return -EINVAL; + + return imx_lmm_ops->lmm_info(ph, lmid, info); +}; +EXPORT_SYMBOL(scmi_imx_lmm_info); + +int scmi_imx_lmm_reset_vector_set(u32 lmid, u32 cpuid, u32 flags, u64 vector) +{ + if (!ph) + return -EPROBE_DEFER; + + return imx_lmm_ops->lmm_reset_vector_set(ph, lmid, cpuid, flags, vector); +} +EXPORT_SYMBOL(scmi_imx_lmm_reset_vector_set); + +int scmi_imx_lmm_operation(u32 lmid, enum scmi_imx_lmm_op op, u32 flags) +{ + if (!ph) + return -EPROBE_DEFER; + + switch (op) { + case SCMI_IMX_LMM_BOOT: + return imx_lmm_ops->lmm_power_boot(ph, lmid, true); + case SCMI_IMX_LMM_POWER_ON: + return imx_lmm_ops->lmm_power_boot(ph, lmid, false); + case SCMI_IMX_LMM_SHUTDOWN: + return imx_lmm_ops->lmm_shutdown(ph, lmid, flags); + default: + break; + } + + return -EINVAL; +} +EXPORT_SYMBOL(scmi_imx_lmm_operation); + +static int scmi_imx_lmm_probe(struct scmi_device *sdev) +{ + const struct scmi_handle *handle = sdev->handle; + + if (!handle) + return -ENODEV; + + if (imx_lmm_ops) { + dev_err(&sdev->dev, "lmm already initialized\n"); + return -EEXIST; + } + + imx_lmm_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_IMX_LMM, &ph); + if (IS_ERR(imx_lmm_ops)) + return PTR_ERR(imx_lmm_ops); + + return 0; +} + +static const struct scmi_device_id scmi_id_table[] = { + { SCMI_PROTOCOL_IMX_LMM, "imx-lmm" }, + { }, +}; +MODULE_DEVICE_TABLE(scmi, scmi_id_table); + +static struct scmi_driver scmi_imx_lmm_driver = { + .name = "scmi-imx-lmm", + .probe = scmi_imx_lmm_probe, + .id_table = scmi_id_table, +}; +module_scmi_driver(scmi_imx_lmm_driver); + +MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>"); +MODULE_DESCRIPTION("IMX SM LMM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c index fc4d67e4c4a6..f63b716be5b0 100644 --- a/drivers/firmware/qcom/qcom_scm.c +++ b/drivers/firmware/qcom/qcom_scm.c @@ -1986,7 +1986,10 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send); */ static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = { { .compatible = "asus,vivobook-s15" }, + { .compatible = "asus,zenbook-a14-ux3407qa" }, + { .compatible = "asus,zenbook-a14-ux3407ra" }, { .compatible = "dell,xps13-9345" }, + { .compatible = "hp,elitebook-ultra-g1q" }, { .compatible = "hp,omnibook-x14" }, { .compatible = "huawei,gaokun3" }, { .compatible = "lenovo,flex-5g" }, diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h index 097369d38b84..3133d826f5fa 100644 --- a/drivers/firmware/qcom/qcom_scm.h +++ b/drivers/firmware/qcom/qcom_scm.h @@ -44,8 +44,11 @@ enum qcom_scm_arg_types { /** * struct qcom_scm_desc + * @svc: Service identifier + * @cmd: Command identifier * @arginfo: Metadata describing the arguments in args[] * @args: The array of arguments for the secure syscall + * @owner: Owner identifier */ struct qcom_scm_desc { u32 svc; diff --git a/drivers/firmware/qcom/qcom_tzmem.c b/drivers/firmware/qcom/qcom_tzmem.c index 92b365178235..94196ad87105 100644 --- a/drivers/firmware/qcom/qcom_tzmem.c +++ b/drivers/firmware/qcom/qcom_tzmem.c @@ -79,6 +79,7 @@ static const char *const qcom_tzmem_blacklist[] = { "qcom,sc8180x", "qcom,sdm670", /* failure in GPU firmware loading */ "qcom,sdm845", /* reset in rmtfs memory assignment */ + "qcom,sm7150", /* reset in rmtfs memory assignment */ "qcom,sm8150", /* reset in rmtfs memory assignment */ NULL }; diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.c b/drivers/firmware/samsung/exynos-acpm-pmic.c index 85e90d236da2..39b33a356ebd 100644 --- a/drivers/firmware/samsung/exynos-acpm-pmic.c +++ b/drivers/firmware/samsung/exynos-acpm-pmic.c @@ -43,13 +43,13 @@ static inline u32 acpm_pmic_get_bulk(u32 data, unsigned int i) return (data >> (ACPM_PMIC_BULK_SHIFT * i)) & ACPM_PMIC_BULK_MASK; } -static void acpm_pmic_set_xfer(struct acpm_xfer *xfer, u32 *cmd, +static void acpm_pmic_set_xfer(struct acpm_xfer *xfer, u32 *cmd, size_t cmdlen, unsigned int acpm_chan_id) { xfer->txd = cmd; xfer->rxd = cmd; - xfer->txlen = sizeof(cmd); - xfer->rxlen = sizeof(cmd); + xfer->txlen = cmdlen; + xfer->rxlen = cmdlen; xfer->acpm_chan_id = acpm_chan_id; } @@ -71,7 +71,7 @@ int acpm_pmic_read_reg(const struct acpm_handle *handle, int ret; acpm_pmic_init_read_cmd(cmd, type, reg, chan); - acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id); + acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id); ret = acpm_do_xfer(handle, &xfer); if (ret) @@ -104,7 +104,7 @@ int acpm_pmic_bulk_read(const struct acpm_handle *handle, return -EINVAL; acpm_pmic_init_bulk_read_cmd(cmd, type, reg, chan, count); - acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id); + acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id); ret = acpm_do_xfer(handle, &xfer); if (ret) @@ -144,7 +144,7 @@ int acpm_pmic_write_reg(const struct acpm_handle *handle, int ret; acpm_pmic_init_write_cmd(cmd, type, reg, chan, value); - acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id); + acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id); ret = acpm_do_xfer(handle, &xfer); if (ret) @@ -184,7 +184,7 @@ int acpm_pmic_bulk_write(const struct acpm_handle *handle, return -EINVAL; acpm_pmic_init_bulk_write_cmd(cmd, type, reg, chan, count, buf); - acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id); + acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id); ret = acpm_do_xfer(handle, &xfer); if (ret) @@ -214,7 +214,7 @@ int acpm_pmic_update_reg(const struct acpm_handle *handle, int ret; acpm_pmic_init_update_cmd(cmd, type, reg, chan, value, mask); - acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id); + acpm_pmic_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id); ret = acpm_do_xfer(handle, &xfer); if (ret) diff --git a/drivers/firmware/samsung/exynos-acpm.c b/drivers/firmware/samsung/exynos-acpm.c index 15e991b99f5a..e02f14f4bd7c 100644 --- a/drivers/firmware/samsung/exynos-acpm.c +++ b/drivers/firmware/samsung/exynos-acpm.c @@ -15,6 +15,7 @@ #include <linux/firmware/samsung/exynos-acpm-protocol.h> #include <linux/io.h> #include <linux/iopoll.h> +#include <linux/ktime.h> #include <linux/mailbox/exynos-message.h> #include <linux/mailbox_client.h> #include <linux/module.h> @@ -32,8 +33,7 @@ #define ACPM_PROTOCOL_SEQNUM GENMASK(21, 16) -/* The unit of counter is 20 us. 5000 * 20 = 100 ms */ -#define ACPM_POLL_TIMEOUT 5000 +#define ACPM_POLL_TIMEOUT_US (100 * USEC_PER_MSEC) #define ACPM_TX_TIMEOUT_US 500000 #define ACPM_GS101_INITDATA_BASE 0xa000 @@ -300,12 +300,13 @@ static int acpm_dequeue_by_polling(struct acpm_chan *achan, const struct acpm_xfer *xfer) { struct device *dev = achan->acpm->dev; - unsigned int cnt_20us = 0; + ktime_t timeout; u32 seqnum; int ret; seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]); + timeout = ktime_add_us(ktime_get(), ACPM_POLL_TIMEOUT_US); do { ret = acpm_get_rx(achan, xfer); if (ret) @@ -315,12 +316,11 @@ static int acpm_dequeue_by_polling(struct acpm_chan *achan, return 0; /* Determined experimentally. */ - usleep_range(20, 30); - cnt_20us++; - } while (cnt_20us < ACPM_POLL_TIMEOUT); + udelay(20); + } while (ktime_before(ktime_get(), timeout)); - dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx, cnt_20us = %d.\n", - achan->id, seqnum, achan->bitmap_seqnum[0], cnt_20us); + dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx.\n", + achan->id, seqnum, achan->bitmap_seqnum[0]); return -ETIME; } @@ -649,7 +649,7 @@ static int acpm_probe(struct platform_device *pdev) platform_set_drvdata(pdev, acpm); - return 0; + return devm_of_platform_populate(dev); } /** @@ -677,43 +677,30 @@ static void devm_acpm_release(struct device *dev, void *res) } /** - * acpm_get_by_phandle() - get the ACPM handle using DT phandle. - * @dev: device pointer requesting ACPM handle. - * @property: property name containing phandle on ACPM node. + * acpm_get_by_node() - get the ACPM handle using node pointer. + * @dev: device pointer requesting ACPM handle. + * @np: ACPM device tree node. * * Return: pointer to handle on success, ERR_PTR(-errno) otherwise. */ -static const struct acpm_handle *acpm_get_by_phandle(struct device *dev, - const char *property) +static const struct acpm_handle *acpm_get_by_node(struct device *dev, + struct device_node *np) { struct platform_device *pdev; - struct device_node *acpm_np; struct device_link *link; struct acpm_info *acpm; - acpm_np = of_parse_phandle(dev->of_node, property, 0); - if (!acpm_np) - return ERR_PTR(-ENODEV); - - pdev = of_find_device_by_node(acpm_np); - if (!pdev) { - dev_err(dev, "Cannot find device node %s\n", acpm_np->name); - of_node_put(acpm_np); + pdev = of_find_device_by_node(np); + if (!pdev) return ERR_PTR(-EPROBE_DEFER); - } - - of_node_put(acpm_np); acpm = platform_get_drvdata(pdev); if (!acpm) { - dev_err(dev, "Cannot get drvdata from %s\n", - dev_name(&pdev->dev)); platform_device_put(pdev); return ERR_PTR(-EPROBE_DEFER); } if (!try_module_get(pdev->dev.driver->owner)) { - dev_err(dev, "Cannot get module reference.\n"); platform_device_put(pdev); return ERR_PTR(-EPROBE_DEFER); } @@ -732,14 +719,14 @@ static const struct acpm_handle *acpm_get_by_phandle(struct device *dev, } /** - * devm_acpm_get_by_phandle() - managed get handle using phandle. - * @dev: device pointer requesting ACPM handle. - * @property: property name containing phandle on ACPM node. + * devm_acpm_get_by_node() - managed get handle using node pointer. + * @dev: device pointer requesting ACPM handle. + * @np: ACPM device tree node. * * Return: pointer to handle on success, ERR_PTR(-errno) otherwise. */ -const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev, - const char *property) +const struct acpm_handle *devm_acpm_get_by_node(struct device *dev, + struct device_node *np) { const struct acpm_handle **ptr, *handle; @@ -747,7 +734,7 @@ const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev, if (!ptr) return ERR_PTR(-ENOMEM); - handle = acpm_get_by_phandle(dev, property); + handle = acpm_get_by_node(dev, np); if (!IS_ERR(handle)) { *ptr = handle; devres_add(dev, ptr); @@ -757,6 +744,7 @@ const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev, return handle; } +EXPORT_SYMBOL_GPL(devm_acpm_get_by_node); static const struct acpm_match_data acpm_gs101 = { .initdata_base = ACPM_GS101_INITDATA_BASE, diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 806a975fff22..ae5fd1936ad3 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -2,7 +2,7 @@ /* * Texas Instruments System Control Interface Protocol Driver * - * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) 2015-2025 Texas Instruments Incorporated - https://www.ti.com/ * Nishanth Menon */ @@ -3670,6 +3670,7 @@ static int __maybe_unused ti_sci_suspend(struct device *dev) struct ti_sci_info *info = dev_get_drvdata(dev); struct device *cpu_dev, *cpu_dev_max = NULL; s32 val, cpu_lat = 0; + u16 cpu_lat_ms; int i, ret; if (info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED) { @@ -3682,9 +3683,16 @@ static int __maybe_unused ti_sci_suspend(struct device *dev) } } if (cpu_dev_max) { - dev_dbg(cpu_dev_max, "%s: sending max CPU latency=%u\n", __func__, cpu_lat); + /* + * PM QoS latency unit is usecs, device manager uses msecs. + * Convert to msecs and round down for device manager. + */ + cpu_lat_ms = cpu_lat / USEC_PER_MSEC; + dev_dbg(cpu_dev_max, "%s: sending max CPU latency=%u ms\n", __func__, + cpu_lat_ms); ret = ti_sci_cmd_set_latency_constraint(&info->handle, - cpu_lat, TISCI_MSG_CONSTRAINT_SET); + cpu_lat_ms, + TISCI_MSG_CONSTRAINT_SET); if (ret) return ret; } diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c index 47fe6261f5a3..1eac9948148f 100644 --- a/drivers/firmware/turris-mox-rwtm.c +++ b/drivers/firmware/turris-mox-rwtm.c @@ -2,29 +2,31 @@ /* * Turris Mox rWTM firmware driver * - * Copyright (C) 2019, 2024 Marek Behún <kabel@kernel.org> + * Copyright (C) 2019, 2024, 2025 Marek Behún <kabel@kernel.org> */ #include <crypto/sha2.h> #include <linux/align.h> #include <linux/armada-37xx-rwtm-mailbox.h> +#include <linux/cleanup.h> #include <linux/completion.h> #include <linux/container_of.h> -#include <linux/debugfs.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/err.h> -#include <linux/fs.h> #include <linux/hw_random.h> #include <linux/if_ether.h> +#include <linux/key.h> #include <linux/kobject.h> #include <linux/mailbox_client.h> +#include <linux/math.h> #include <linux/minmax.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/sizes.h> #include <linux/sysfs.h> +#include <linux/turris-signing-key.h> #include <linux/types.h> #define DRIVER_NAME "turris-mox-rwtm" @@ -37,10 +39,13 @@ * https://gitlab.labs.nic.cz/turris/mox-boot-builder/tree/master/wtmi. */ -#define MOX_ECC_NUMBER_WORDS 17 -#define MOX_ECC_NUMBER_LEN (MOX_ECC_NUMBER_WORDS * sizeof(u32)) - -#define MOX_ECC_SIGNATURE_WORDS (2 * MOX_ECC_NUMBER_WORDS) +enum { + MOX_ECC_NUM_BITS = 521, + MOX_ECC_NUM_LEN = DIV_ROUND_UP(MOX_ECC_NUM_BITS, 8), + MOX_ECC_NUM_WORDS = DIV_ROUND_UP(MOX_ECC_NUM_BITS, 32), + MOX_ECC_SIG_LEN = 2 * MOX_ECC_NUM_LEN, + MOX_ECC_PUBKEY_LEN = 1 + MOX_ECC_NUM_LEN, +}; #define MBOX_STS_SUCCESS (0 << 30) #define MBOX_STS_FAIL (1 << 30) @@ -77,10 +82,7 @@ enum mbox_cmd { * @ram_size: RAM size of the device * @mac_address1: first MAC address of the device * @mac_address2: second MAC address of the device - * @has_pubkey: whether board ECDSA public key is present * @pubkey: board ECDSA public key - * @last_sig: last ECDSA signature generated with board ECDSA private key - * @last_sig_done: whether the last ECDSA signing is complete */ struct mox_rwtm { struct mbox_client mbox_client; @@ -100,18 +102,8 @@ struct mox_rwtm { int board_version, ram_size; u8 mac_address1[ETH_ALEN], mac_address2[ETH_ALEN]; - bool has_pubkey; - u8 pubkey[135]; - -#ifdef CONFIG_DEBUG_FS - /* - * Signature process. This is currently done via debugfs, because it - * does not conform to the sysfs standard "one file per attribute". - * It should be rewritten via crypto API once akcipher API is available - * from userspace. - */ - u32 last_sig[MOX_ECC_SIGNATURE_WORDS]; - bool last_sig_done; +#ifdef CONFIG_TURRIS_MOX_RWTM_KEYCTL + u8 pubkey[MOX_ECC_PUBKEY_LEN]; #endif }; @@ -120,24 +112,23 @@ static inline struct device *rwtm_dev(struct mox_rwtm *rwtm) return rwtm->mbox_client.dev; } -#define MOX_ATTR_RO(name, format, cat) \ +#define MOX_ATTR_RO(name, format) \ static ssize_t \ name##_show(struct device *dev, struct device_attribute *a, \ char *buf) \ { \ struct mox_rwtm *rwtm = dev_get_drvdata(dev); \ - if (!rwtm->has_##cat) \ + if (!rwtm->has_board_info) \ return -ENODATA; \ return sysfs_emit(buf, format, rwtm->name); \ } \ static DEVICE_ATTR_RO(name) -MOX_ATTR_RO(serial_number, "%016llX\n", board_info); -MOX_ATTR_RO(board_version, "%i\n", board_info); -MOX_ATTR_RO(ram_size, "%i\n", board_info); -MOX_ATTR_RO(mac_address1, "%pM\n", board_info); -MOX_ATTR_RO(mac_address2, "%pM\n", board_info); -MOX_ATTR_RO(pubkey, "%s\n", pubkey); +MOX_ATTR_RO(serial_number, "%016llX\n"); +MOX_ATTR_RO(board_version, "%i\n"); +MOX_ATTR_RO(ram_size, "%i\n"); +MOX_ATTR_RO(mac_address1, "%pM\n"); +MOX_ATTR_RO(mac_address2, "%pM\n"); static struct attribute *turris_mox_rwtm_attrs[] = { &dev_attr_serial_number.attr, @@ -145,7 +136,6 @@ static struct attribute *turris_mox_rwtm_attrs[] = { &dev_attr_ram_size.attr, &dev_attr_mac_address1.attr, &dev_attr_mac_address2.attr, - &dev_attr_pubkey.attr, NULL }; ATTRIBUTE_GROUPS(turris_mox_rwtm); @@ -247,24 +237,6 @@ static int mox_get_board_info(struct mox_rwtm *rwtm) pr_info(" burned RAM size %i MiB\n", rwtm->ram_size); } - ret = mox_rwtm_exec(rwtm, MBOX_CMD_ECDSA_PUB_KEY, NULL, false); - if (ret == -ENODATA) { - dev_warn(dev, "Board has no public key burned!\n"); - } else if (ret == -EOPNOTSUPP) { - dev_notice(dev, - "Firmware does not support the ECDSA_PUB_KEY command\n"); - } else if (ret < 0) { - return ret; - } else { - u32 *s = reply->status; - - rwtm->has_pubkey = true; - sprintf(rwtm->pubkey, - "%06x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x", - ret, s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], - s[8], s[9], s[10], s[11], s[12], s[13], s[14], s[15]); - } - return 0; } @@ -306,127 +278,139 @@ unlock_mutex: return ret; } -#ifdef CONFIG_DEBUG_FS -static int rwtm_debug_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; +#ifdef CONFIG_TURRIS_MOX_RWTM_KEYCTL - return nonseekable_open(inode, file); -} - -static ssize_t do_sign_read(struct file *file, char __user *buf, size_t len, - loff_t *ppos) +static void mox_ecc_number_to_bin(void *dst, const u32 *src) { - struct mox_rwtm *rwtm = file->private_data; - ssize_t ret; + __be32 tmp[MOX_ECC_NUM_WORDS]; - /* only allow one read, of whole signature, from position 0 */ - if (*ppos != 0) - return 0; + cpu_to_be32_array(tmp, src, MOX_ECC_NUM_WORDS); - if (len < sizeof(rwtm->last_sig)) - return -EINVAL; + memcpy(dst, (void *)tmp + 2, MOX_ECC_NUM_LEN); +} - if (!rwtm->last_sig_done) - return -ENODATA; +static void mox_ecc_public_key_to_bin(void *dst, u32 src_first, + const u32 *src_rest) +{ + __be32 tmp[MOX_ECC_NUM_WORDS - 1]; + u8 *p = dst; - ret = simple_read_from_buffer(buf, len, ppos, rwtm->last_sig, - sizeof(rwtm->last_sig)); - rwtm->last_sig_done = false; + /* take 3 bytes from the first word */ + *p++ = src_first >> 16; + *p++ = src_first >> 8; + *p++ = src_first; - return ret; + /* take the rest of the words */ + cpu_to_be32_array(tmp, src_rest, MOX_ECC_NUM_WORDS - 1); + memcpy(p, tmp, sizeof(tmp)); } -static ssize_t do_sign_write(struct file *file, const char __user *buf, - size_t len, loff_t *ppos) +static int mox_rwtm_sign(const struct key *key, const void *data, void *signature) { - struct mox_rwtm *rwtm = file->private_data; - struct armada_37xx_rwtm_tx_msg msg; - loff_t dummy = 0; - ssize_t ret; - - if (len != SHA512_DIGEST_SIZE) - return -EINVAL; - - /* if last result is not zero user has not read that information yet */ - if (rwtm->last_sig_done) - return -EBUSY; + struct mox_rwtm *rwtm = dev_get_drvdata(turris_signing_key_get_dev(key)); + struct armada_37xx_rwtm_tx_msg msg = {}; + u32 offset_r, offset_s; + int ret; - if (!mutex_trylock(&rwtm->busy)) - return -EBUSY; + guard(mutex)(&rwtm->busy); /* - * Here we have to send: - * 1. Address of the input to sign. - * The input is an array of 17 32-bit words, the first (most - * significat) is 0, the rest 16 words are copied from the SHA-512 - * hash given by the user and converted from BE to LE. - * 2. Address of the buffer where ECDSA signature value R shall be - * stored by the rWTM firmware. - * 3. Address of the buffer where ECDSA signature value S shall be - * stored by the rWTM firmware. + * For MBOX_CMD_SIGN command: + * args[0] - must be 1 + * args[1] - address of message M to sign; message is a 521-bit number + * args[2] - address where the R part of the signature will be stored + * args[3] - address where the S part of the signature will be stored + * + * M, R and S are 521-bit numbers encoded as seventeen 32-bit words, + * most significat word first. + * Since the message in @data is a sha512 digest, the most significat + * word is always zero. */ + + offset_r = MOX_ECC_NUM_WORDS * sizeof(u32); + offset_s = 2 * MOX_ECC_NUM_WORDS * sizeof(u32); + memset(rwtm->buf, 0, sizeof(u32)); - ret = simple_write_to_buffer(rwtm->buf + sizeof(u32), - SHA512_DIGEST_SIZE, &dummy, buf, len); - if (ret < 0) - goto unlock_mutex; - be32_to_cpu_array(rwtm->buf, rwtm->buf, MOX_ECC_NUMBER_WORDS); + memcpy(rwtm->buf + sizeof(u32), data, SHA512_DIGEST_SIZE); + be32_to_cpu_array(rwtm->buf, rwtm->buf, MOX_ECC_NUM_WORDS); msg.args[0] = 1; msg.args[1] = rwtm->buf_phys; - msg.args[2] = rwtm->buf_phys + MOX_ECC_NUMBER_LEN; - msg.args[3] = rwtm->buf_phys + 2 * MOX_ECC_NUMBER_LEN; + msg.args[2] = rwtm->buf_phys + offset_r; + msg.args[3] = rwtm->buf_phys + offset_s; ret = mox_rwtm_exec(rwtm, MBOX_CMD_SIGN, &msg, true); if (ret < 0) - goto unlock_mutex; + return ret; - /* - * Here we read the R and S values of the ECDSA signature - * computed by the rWTM firmware and convert their words from - * LE to BE. - */ - memcpy(rwtm->last_sig, rwtm->buf + MOX_ECC_NUMBER_LEN, - sizeof(rwtm->last_sig)); - cpu_to_be32_array(rwtm->last_sig, rwtm->last_sig, - MOX_ECC_SIGNATURE_WORDS); - rwtm->last_sig_done = true; + /* convert R and S parts of the signature */ + mox_ecc_number_to_bin(signature, rwtm->buf + offset_r); + mox_ecc_number_to_bin(signature + MOX_ECC_NUM_LEN, rwtm->buf + offset_s); - mutex_unlock(&rwtm->busy); - return len; -unlock_mutex: - mutex_unlock(&rwtm->busy); - return ret; + return 0; } -static const struct file_operations do_sign_fops = { - .owner = THIS_MODULE, - .open = rwtm_debug_open, - .read = do_sign_read, - .write = do_sign_write, -}; - -static void rwtm_debugfs_release(void *root) +static const void *mox_rwtm_get_public_key(const struct key *key) { - debugfs_remove_recursive(root); + struct mox_rwtm *rwtm = dev_get_drvdata(turris_signing_key_get_dev(key)); + + return rwtm->pubkey; } -static void rwtm_register_debugfs(struct mox_rwtm *rwtm) +static const struct turris_signing_key_subtype mox_signing_key_subtype = { + .key_size = MOX_ECC_NUM_BITS, + .data_size = SHA512_DIGEST_SIZE, + .sig_size = MOX_ECC_SIG_LEN, + .public_key_size = MOX_ECC_PUBKEY_LEN, + .hash_algo = "sha512", + .get_public_key = mox_rwtm_get_public_key, + .sign = mox_rwtm_sign, +}; + +static int mox_register_signing_key(struct mox_rwtm *rwtm) { - struct dentry *root; + struct armada_37xx_rwtm_rx_msg *reply = &rwtm->reply; + struct device *dev = rwtm_dev(rwtm); + int ret; - root = debugfs_create_dir("turris-mox-rwtm", NULL); + ret = mox_rwtm_exec(rwtm, MBOX_CMD_ECDSA_PUB_KEY, NULL, false); + if (ret == -ENODATA) { + dev_warn(dev, "Board has no public key burned!\n"); + } else if (ret == -EOPNOTSUPP) { + dev_notice(dev, + "Firmware does not support the ECDSA_PUB_KEY command\n"); + } else if (ret < 0) { + return ret; + } else { + char sn[17] = "unknown"; + char desc[46]; + + if (rwtm->has_board_info) + sprintf(sn, "%016llX", rwtm->serial_number); + + sprintf(desc, "Turris MOX SN %s rWTM ECDSA key", sn); - debugfs_create_file_unsafe("do_sign", 0600, root, rwtm, &do_sign_fops); + mox_ecc_public_key_to_bin(rwtm->pubkey, ret, reply->status); - devm_add_action_or_reset(rwtm_dev(rwtm), rwtm_debugfs_release, root); + ret = devm_turris_signing_key_create(dev, + &mox_signing_key_subtype, + desc); + if (ret) + return dev_err_probe(dev, ret, + "Cannot create signing key\n"); + } + + return 0; } -#else -static inline void rwtm_register_debugfs(struct mox_rwtm *rwtm) + +#else /* CONFIG_TURRIS_MOX_RWTM_KEYCTL */ + +static inline int mox_register_signing_key(struct mox_rwtm *rwtm) { + return 0; } -#endif + +#endif /* !CONFIG_TURRIS_MOX_RWTM_KEYCTL */ static void rwtm_devm_mbox_release(void *mbox) { @@ -477,6 +461,10 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev) if (ret < 0) dev_warn(dev, "Cannot read board information: %i\n", ret); + ret = mox_register_signing_key(rwtm); + if (ret < 0) + return ret; + ret = check_get_random_support(rwtm); if (ret < 0) { dev_notice(dev, @@ -491,8 +479,6 @@ static int turris_mox_rwtm_probe(struct platform_device *pdev) if (ret) return dev_err_probe(dev, ret, "Cannot register HWRNG!\n"); - rwtm_register_debugfs(rwtm); - dev_info(dev, "HWRNG successfully registered\n"); /* diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c index 76e2801619f0..c33bd3d83069 100644 --- a/drivers/gpu/drm/i915/i915_mm.c +++ b/drivers/gpu/drm/i915/i915_mm.c @@ -100,7 +100,7 @@ int remap_io_mapping(struct vm_area_struct *vma, GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); - /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ + /* We rely on prevalidation of the io-mapping to skip pfnmap tracking. */ r.mm = vma->vm_mm; r.pfn = pfn; r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) | @@ -140,7 +140,7 @@ int remap_io_sg(struct vm_area_struct *vma, }; int err; - /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ + /* We rely on prevalidation of the io-mapping to skip pfnmap tracking. */ GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); while (offset >= r.sgt.max >> PAGE_SHIFT) { diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c index 210a25afe82b..d92ae6b6100f 100644 --- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c +++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2020 Caleb Connolly <caleb@connolly.tech> +/* Copyright (c) 2020 Casey Connolly <casey.connolly@linaro.org> * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree: * Copyright (c) 2020, The Linux Foundation. All rights reserved. */ @@ -260,6 +260,6 @@ static struct mipi_dsi_driver sofef00_panel_driver = { module_mipi_dsi_driver(sofef00_panel_driver); -MODULE_AUTHOR("Caleb Connolly <caleb@connolly.tech>"); +MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>"); MODULE_DESCRIPTION("DRM driver for Samsung AMOLED DSI panels found in OnePlus 6/6T phones"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c index 4dbf8b88f264..11d460d2ea19 100644 --- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c @@ -86,11 +86,7 @@ struct td028ttec1_panel { #define to_td028ttec1_device(p) container_of(p, struct td028ttec1_panel, panel) -/* - * noinline_for_stack so we don't get multiple copies of tx_buf - * on the stack in case of gcc-plugin-structleak - */ -static int noinline_for_stack +static int jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err) { struct spi_device *spi = lcd->spi; diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c index 7a01f2687b4c..740066ceaea3 100644 --- a/drivers/i2c/algos/i2c-algo-pcf.c +++ b/drivers/i2c/algos/i2c-algo-pcf.c @@ -19,6 +19,7 @@ #include <linux/errno.h> #include <linux/i2c.h> #include <linux/i2c-algo-pcf.h> +#include <linux/string_choices.h> #include "i2c-algo-pcf.h" @@ -316,7 +317,7 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap, pmsg = &msgs[i]; DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: Doing %s %d bytes to 0x%02x - %d of %d messages\n", - pmsg->flags & I2C_M_RD ? "read" : "write", + str_read_write(pmsg->flags & I2C_M_RD), pmsg->len, pmsg->addr, i + 1, num);) ret = pcf_doAddress(adap, pmsg); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index bbbd6240fa6e..48c5ab832009 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -592,6 +592,17 @@ config I2C_DESIGNWARE_PLATFORM This driver can also be built as a module. If so, the module will be called i2c-designware-platform. +config I2C_DESIGNWARE_AMDISP + tristate "Synopsys DesignWare Platform for AMDISP" + depends on DRM_AMD_ISP || COMPILE_TEST + depends on I2C_DESIGNWARE_CORE + help + If you say yes to this option, support will be included for the + AMDISP Synopsys DesignWare I2C adapter. + + This driver can also be built as a module. If so, the module + will be called amd_isp_i2c_designware. + config I2C_DESIGNWARE_AMDPSP bool "AMD PSP I2C semaphore support" depends on ACPI @@ -845,7 +856,7 @@ config I2C_LS2X config I2C_MLXBF tristate "Mellanox BlueField I2C controller" - depends on MELLANOX_PLATFORM && ARM64 + depends on (MELLANOX_PLATFORM && ARM64) || COMPILE_TEST depends on ACPI select I2C_SLAVE help diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index c1252e2b779e..04db855fdfd6 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -58,6 +58,7 @@ obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o i2c-designware-platform-y := i2c-designware-platdrv.o i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_AMDPSP) += i2c-designware-amdpsp.o i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-baytrail.o +obj-$(CONFIG_I2C_DESIGNWARE_AMDISP) += i2c-designware-amdisp.o obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o i2c-designware-pci-y := i2c-designware-pcidrv.o obj-$(CONFIG_I2C_DIGICOLOR) += i2c-digicolor.o diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c index ee3b469ddfb9..374fc50bb205 100644 --- a/drivers/i2c/busses/i2c-at91-master.c +++ b/drivers/i2c/busses/i2c-at91-master.c @@ -26,6 +26,7 @@ #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/string_choices.h> #include "i2c-at91.h" @@ -523,7 +524,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) */ dev_dbg(dev->dev, "transfer: %s %zu bytes.\n", - (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len); + str_read_write(dev->msg->flags & I2C_M_RD), dev->buf_len); reinit_completion(&dev->cmd_complete); dev->transfer_status = 0; diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 332a0fcca28d..63bc3c8f49d3 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -224,11 +224,6 @@ static void slave_rx_tasklet_fn(unsigned long); | BIT(IS_S_TX_UNDERRUN_SHIFT) | BIT(IS_S_RX_FIFO_FULL_SHIFT)\ | BIT(IS_S_RX_THLD_SHIFT)) -static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave); -static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave); -static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c, - bool enable); - static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c, u32 offset) { @@ -264,8 +259,8 @@ static inline void iproc_i2c_wr_reg(struct bcm_iproc_i2c_dev *iproc_i2c, } } -static void bcm_iproc_i2c_slave_init( - struct bcm_iproc_i2c_dev *iproc_i2c, bool need_reset) +static void bcm_iproc_i2c_slave_init(struct bcm_iproc_i2c_dev *iproc_i2c, + bool need_reset) { u32 val; @@ -276,8 +271,8 @@ static void bcm_iproc_i2c_slave_init( val |= BIT(CFG_RESET_SHIFT); iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val); - /* wait 100 usec per spec */ - udelay(100); + /* wait approximately 100 usec as per spec */ + usleep_range(100, 200); /* bring controller out of reset */ val &= ~(BIT(CFG_RESET_SHIFT)); @@ -316,6 +311,19 @@ static void bcm_iproc_i2c_slave_init( iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val); } +static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c, + bool enable) +{ + u32 val; + + val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET); + if (enable) + val |= BIT(CFG_EN_SHIFT); + else + val &= ~BIT(CFG_EN_SHIFT); + iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val); +} + static bool bcm_iproc_i2c_check_slave_status (struct bcm_iproc_i2c_dev *iproc_i2c, u32 status) { @@ -438,7 +446,6 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c, u32 val; u8 value; - if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) { iproc_i2c->tx_underrun++; if (iproc_i2c->tx_underrun == 1) @@ -542,7 +549,7 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c, static void bcm_iproc_i2c_read_valid_bytes(struct bcm_iproc_i2c_dev *iproc_i2c) { struct i2c_msg *msg = iproc_i2c->msg; - uint32_t val; + u32 val; /* Read valid data from RX FIFO */ while (iproc_i2c->rx_bytes < msg->len) { @@ -688,8 +695,8 @@ static void bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c) val &= ~(BIT(CFG_EN_SHIFT)); iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val); - /* wait 100 usec per spec */ - udelay(100); + /* wait approximately 100 usec as per spec */ + usleep_range(100, 200); /* bring controller out of reset */ val &= ~(BIT(CFG_RESET_SHIFT)); @@ -708,19 +715,6 @@ static void bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c) iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, 0xffffffff); } -static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c, - bool enable) -{ - u32 val; - - val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET); - if (enable) - val |= BIT(CFG_EN_SHIFT); - else - val &= ~BIT(CFG_EN_SHIFT); - iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val); -} - static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c, struct i2c_msg *msg) { @@ -734,31 +728,31 @@ static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c, return 0; case M_CMD_STATUS_LOST_ARB: - dev_dbg(iproc_i2c->device, "lost bus arbitration\n"); + dev_err(iproc_i2c->device, "lost bus arbitration\n"); return -EAGAIN; case M_CMD_STATUS_NACK_ADDR: - dev_dbg(iproc_i2c->device, "NAK addr:0x%02x\n", msg->addr); + dev_err(iproc_i2c->device, "NAK addr:0x%02x\n", msg->addr); return -ENXIO; case M_CMD_STATUS_NACK_DATA: - dev_dbg(iproc_i2c->device, "NAK data\n"); + dev_err(iproc_i2c->device, "NAK data\n"); return -ENXIO; case M_CMD_STATUS_TIMEOUT: - dev_dbg(iproc_i2c->device, "bus timeout\n"); + dev_err(iproc_i2c->device, "bus timeout\n"); return -ETIMEDOUT; case M_CMD_STATUS_FIFO_UNDERRUN: - dev_dbg(iproc_i2c->device, "FIFO under-run\n"); + dev_err(iproc_i2c->device, "FIFO under-run\n"); return -ENXIO; case M_CMD_STATUS_RX_FIFO_FULL: - dev_dbg(iproc_i2c->device, "RX FIFO full\n"); + dev_err(iproc_i2c->device, "RX FIFO full\n"); return -ETIMEDOUT; default: - dev_dbg(iproc_i2c->device, "unknown error code=%d\n", val); + dev_err(iproc_i2c->device, "unknown error code=%d\n", val); /* re-initialize i2c for recovery */ bcm_iproc_i2c_enable_disable(iproc_i2c, false); @@ -833,7 +827,7 @@ static int bcm_iproc_i2c_xfer_wait(struct bcm_iproc_i2c_dev *iproc_i2c, * The i2c quirks are set to enforce this rule. */ static int bcm_iproc_i2c_xfer_internal(struct bcm_iproc_i2c_dev *iproc_i2c, - struct i2c_msg *msgs, bool process_call) + struct i2c_msg *msgs, bool process_call) { int i; u8 addr; @@ -842,8 +836,8 @@ static int bcm_iproc_i2c_xfer_internal(struct bcm_iproc_i2c_dev *iproc_i2c, struct i2c_msg *msg = &msgs[0]; /* check if bus is busy */ - if (!!(iproc_i2c_rd_reg(iproc_i2c, - M_CMD_OFFSET) & BIT(M_CMD_START_BUSY_SHIFT))) { + if (iproc_i2c_rd_reg(iproc_i2c, + M_CMD_OFFSET) & BIT(M_CMD_START_BUSY_SHIFT)) { dev_warn(iproc_i2c->device, "bus is busy\n"); return -EBUSY; } @@ -970,14 +964,14 @@ static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter, ret = bcm_iproc_i2c_xfer_internal(iproc_i2c, msgs, process_call); if (ret) { - dev_dbg(iproc_i2c->device, "xfer failed\n"); + dev_err(iproc_i2c->device, "xfer failed\n"); return ret; } return num; } -static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap) +static u32 bcm_iproc_i2c_functionality(struct i2c_adapter *adap) { u32 val; @@ -989,6 +983,63 @@ static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap) return val; } +static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave) +{ + struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter); + + if (iproc_i2c->slave) + return -EBUSY; + + if (slave->flags & I2C_CLIENT_TEN) + return -EAFNOSUPPORT; + + iproc_i2c->slave = slave; + + tasklet_init(&iproc_i2c->slave_rx_tasklet, slave_rx_tasklet_fn, + (unsigned long)iproc_i2c); + + bcm_iproc_i2c_slave_init(iproc_i2c, false); + + return 0; +} + +static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave) +{ + struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter); + u32 tmp; + + if (!iproc_i2c->slave) + return -EINVAL; + + disable_irq(iproc_i2c->irq); + + tasklet_kill(&iproc_i2c->slave_rx_tasklet); + + /* disable all slave interrupts */ + tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); + tmp &= ~(IE_S_ALL_INTERRUPT_MASK << + IE_S_ALL_INTERRUPT_SHIFT); + iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp); + + /* Erase the slave address programmed */ + tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET); + tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT); + iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp); + + /* flush TX/RX FIFOs */ + tmp = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT)); + iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, tmp); + + /* clear all pending slave interrupts */ + iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE); + + iproc_i2c->slave = NULL; + + enable_irq(iproc_i2c->irq); + + return 0; +} + static struct i2c_algorithm bcm_iproc_algo = { .master_xfer = bcm_iproc_i2c_xfer, .functionality = bcm_iproc_i2c_functionality, @@ -1010,21 +1061,18 @@ static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c) "clock-frequency", &bus_speed); if (ret < 0) { dev_info(iproc_i2c->device, - "unable to interpret clock-frequency DT property\n"); + "unable to interpret clock-frequency DT property\n"); bus_speed = I2C_MAX_STANDARD_MODE_FREQ; } - if (bus_speed < I2C_MAX_STANDARD_MODE_FREQ) { - dev_err(iproc_i2c->device, "%d Hz bus speed not supported\n", - bus_speed); - dev_err(iproc_i2c->device, - "valid speeds are 100khz and 400khz\n"); - return -EINVAL; - } else if (bus_speed < I2C_MAX_FAST_MODE_FREQ) { + if (bus_speed < I2C_MAX_STANDARD_MODE_FREQ) + return dev_err_probe(iproc_i2c->device, -EINVAL, + "%d Hz not supported (out of 100-400 kHz range)\n", + bus_speed); + else if (bus_speed < I2C_MAX_FAST_MODE_FREQ) bus_speed = I2C_MAX_STANDARD_MODE_FREQ; - } else { + else bus_speed = I2C_MAX_FAST_MODE_FREQ; - } iproc_i2c->bus_speed = bus_speed; val = iproc_i2c_rd_reg(iproc_i2c, TIM_CFG_OFFSET); @@ -1039,9 +1087,9 @@ static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c) static int bcm_iproc_i2c_probe(struct platform_device *pdev) { - int irq, ret = 0; struct bcm_iproc_i2c_dev *iproc_i2c; struct i2c_adapter *adap; + int irq, ret; iproc_i2c = devm_kzalloc(&pdev->dev, sizeof(*iproc_i2c), GFP_KERNEL); @@ -1066,11 +1114,9 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev) ret = of_property_read_u32(iproc_i2c->device->of_node, "brcm,ape-hsls-addr-mask", &iproc_i2c->ape_addr_mask); - if (ret < 0) { - dev_err(iproc_i2c->device, - "'brcm,ape-hsls-addr-mask' missing\n"); - return -EINVAL; - } + if (ret < 0) + return dev_err_probe(iproc_i2c->device, ret, + "'brcm,ape-hsls-addr-mask' missing\n"); spin_lock_init(&iproc_i2c->idm_lock); @@ -1090,11 +1136,9 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev) ret = devm_request_irq(iproc_i2c->device, irq, bcm_iproc_i2c_isr, 0, pdev->name, iproc_i2c); - if (ret < 0) { - dev_err(iproc_i2c->device, - "unable to request irq %i\n", irq); - return ret; - } + if (ret < 0) + return dev_err_probe(iproc_i2c->device, ret, + "unable to request irq %i\n", irq); iproc_i2c->irq = irq; } else { @@ -1106,9 +1150,8 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev) adap = &iproc_i2c->adapter; i2c_set_adapdata(adap, iproc_i2c); - snprintf(adap->name, sizeof(adap->name), - "Broadcom iProc (%s)", - of_node_full_name(iproc_i2c->device->of_node)); + snprintf(adap->name, sizeof(adap->name), "Broadcom iProc (%s)", + of_node_full_name(iproc_i2c->device->of_node)); adap->algo = &bcm_iproc_algo; adap->quirks = &bcm_iproc_i2c_quirks; adap->dev.parent = &pdev->dev; @@ -1182,62 +1225,6 @@ static const struct dev_pm_ops bcm_iproc_i2c_pm_ops = { .resume_early = &bcm_iproc_i2c_resume }; -static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave) -{ - struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter); - - if (iproc_i2c->slave) - return -EBUSY; - - if (slave->flags & I2C_CLIENT_TEN) - return -EAFNOSUPPORT; - - iproc_i2c->slave = slave; - - tasklet_init(&iproc_i2c->slave_rx_tasklet, slave_rx_tasklet_fn, - (unsigned long)iproc_i2c); - - bcm_iproc_i2c_slave_init(iproc_i2c, false); - return 0; -} - -static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave) -{ - u32 tmp; - struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter); - - if (!iproc_i2c->slave) - return -EINVAL; - - disable_irq(iproc_i2c->irq); - - tasklet_kill(&iproc_i2c->slave_rx_tasklet); - - /* disable all slave interrupts */ - tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); - tmp &= ~(IE_S_ALL_INTERRUPT_MASK << - IE_S_ALL_INTERRUPT_SHIFT); - iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp); - - /* Erase the slave address programmed */ - tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET); - tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT); - iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp); - - /* flush TX/RX FIFOs */ - tmp = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT)); - iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, tmp); - - /* clear all pending slave interrupts */ - iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE); - - iproc_i2c->slave = NULL; - - enable_irq(iproc_i2c->irq); - - return 0; -} - static const struct of_device_id bcm_iproc_i2c_of_match[] = { { .compatible = "brcm,iproc-i2c", diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index 6a909d339681..6a3d4e9e07f4 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -551,7 +551,8 @@ out: static u32 i2c_davinci_func(struct i2c_adapter *adap) { - return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | + I2C_FUNC_PROTOCOL_MANGLING; } static void terminate_read(struct davinci_i2c_dev *dev) diff --git a/drivers/i2c/busses/i2c-designware-amdisp.c b/drivers/i2c/busses/i2c-designware-amdisp.c new file mode 100644 index 000000000000..ad6f08338124 --- /dev/null +++ b/drivers/i2c/busses/i2c-designware-amdisp.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Based on Synopsys DesignWare I2C adapter driver. + * + * Copyright (C) 2025 Advanced Micro Devices, Inc. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include "i2c-designware-core.h" + +#define DRV_NAME "amd_isp_i2c_designware" +#define AMD_ISP_I2C_INPUT_CLK 100 /* Mhz */ + +static void amd_isp_dw_i2c_plat_pm_cleanup(struct dw_i2c_dev *i2c_dev) +{ + pm_runtime_disable(i2c_dev->dev); + + if (i2c_dev->shared_with_punit) + pm_runtime_put_noidle(i2c_dev->dev); +} + +static inline u32 amd_isp_dw_i2c_get_clk_rate(struct dw_i2c_dev *i2c_dev) +{ + return AMD_ISP_I2C_INPUT_CLK * 1000; +} + +static int amd_isp_dw_i2c_plat_probe(struct platform_device *pdev) +{ + struct dw_i2c_dev *isp_i2c_dev; + struct i2c_adapter *adap; + int ret; + + isp_i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*isp_i2c_dev), GFP_KERNEL); + if (!isp_i2c_dev) + return -ENOMEM; + isp_i2c_dev->dev = &pdev->dev; + + pdev->dev.init_name = DRV_NAME; + + /* + * Use the polling mode to send/receive the data, because + * no IRQ connection from ISP I2C + */ + isp_i2c_dev->flags |= ACCESS_POLLING; + platform_set_drvdata(pdev, isp_i2c_dev); + + isp_i2c_dev->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(isp_i2c_dev->base)) + return dev_err_probe(&pdev->dev, PTR_ERR(isp_i2c_dev->base), + "failed to get IOMEM resource\n"); + + isp_i2c_dev->get_clk_rate_khz = amd_isp_dw_i2c_get_clk_rate; + ret = i2c_dw_fw_parse_and_configure(isp_i2c_dev); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "failed to parse i2c dw fwnode and configure\n"); + + i2c_dw_configure(isp_i2c_dev); + + adap = &isp_i2c_dev->adapter; + adap->owner = THIS_MODULE; + ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); + adap->dev.of_node = pdev->dev.of_node; + /* use dynamically allocated adapter id */ + adap->nr = -1; + + if (isp_i2c_dev->flags & ACCESS_NO_IRQ_SUSPEND) + dev_pm_set_driver_flags(&pdev->dev, + DPM_FLAG_SMART_PREPARE); + else + dev_pm_set_driver_flags(&pdev->dev, + DPM_FLAG_SMART_PREPARE | + DPM_FLAG_SMART_SUSPEND); + + device_enable_async_suspend(&pdev->dev); + + if (isp_i2c_dev->shared_with_punit) + pm_runtime_get_noresume(&pdev->dev); + + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + + ret = i2c_dw_probe(isp_i2c_dev); + if (ret) { + dev_err_probe(&pdev->dev, ret, "i2c_dw_probe failed\n"); + goto error_release_rpm; + } + + pm_runtime_put_sync(&pdev->dev); + + return 0; + +error_release_rpm: + amd_isp_dw_i2c_plat_pm_cleanup(isp_i2c_dev); + pm_runtime_put_sync(&pdev->dev); + return ret; +} + +static void amd_isp_dw_i2c_plat_remove(struct platform_device *pdev) +{ + struct dw_i2c_dev *isp_i2c_dev = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&pdev->dev); + + i2c_del_adapter(&isp_i2c_dev->adapter); + + i2c_dw_disable(isp_i2c_dev); + + pm_runtime_put_sync(&pdev->dev); + amd_isp_dw_i2c_plat_pm_cleanup(isp_i2c_dev); +} + +static int amd_isp_dw_i2c_plat_prepare(struct device *dev) +{ + /* + * If the ACPI companion device object is present for this device, it + * may be accessed during suspend and resume of other devices via I2C + * operation regions, so tell the PM core and middle layers to avoid + * skipping system suspend/resume callbacks for it in that case. + */ + return !has_acpi_companion(dev); +} + +static int amd_isp_dw_i2c_plat_runtime_suspend(struct device *dev) +{ + struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); + + if (i_dev->shared_with_punit) + return 0; + + i2c_dw_disable(i_dev); + i2c_dw_prepare_clk(i_dev, false); + + return 0; +} + +static int amd_isp_dw_i2c_plat_suspend(struct device *dev) +{ + struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); + int ret; + + if (!i_dev) + return -ENODEV; + + ret = amd_isp_dw_i2c_plat_runtime_suspend(dev); + if (!ret) + i2c_mark_adapter_suspended(&i_dev->adapter); + + return ret; +} + +static int amd_isp_dw_i2c_plat_runtime_resume(struct device *dev) +{ + struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); + + if (!i_dev) + return -ENODEV; + + if (!i_dev->shared_with_punit) + i2c_dw_prepare_clk(i_dev, true); + if (i_dev->init) + i_dev->init(i_dev); + + return 0; +} + +static int amd_isp_dw_i2c_plat_resume(struct device *dev) +{ + struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); + + amd_isp_dw_i2c_plat_runtime_resume(dev); + i2c_mark_adapter_resumed(&i_dev->adapter); + + return 0; +} + +static const struct dev_pm_ops amd_isp_dw_i2c_dev_pm_ops = { + .prepare = pm_sleep_ptr(amd_isp_dw_i2c_plat_prepare), + LATE_SYSTEM_SLEEP_PM_OPS(amd_isp_dw_i2c_plat_suspend, amd_isp_dw_i2c_plat_resume) + RUNTIME_PM_OPS(amd_isp_dw_i2c_plat_runtime_suspend, amd_isp_dw_i2c_plat_runtime_resume, NULL) +}; + +/* Work with hotplug and coldplug */ +MODULE_ALIAS("platform:amd_isp_i2c_designware"); + +static struct platform_driver amd_isp_dw_i2c_driver = { + .probe = amd_isp_dw_i2c_plat_probe, + .remove = amd_isp_dw_i2c_plat_remove, + .driver = { + .name = DRV_NAME, + .pm = pm_ptr(&amd_isp_dw_i2c_dev_pm_ops), + }, +}; +module_platform_driver(amd_isp_dw_i2c_driver); + +MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter in AMD ISP"); +MODULE_IMPORT_NS("I2C_DW"); +MODULE_IMPORT_NS("I2C_DW_COMMON"); +MODULE_AUTHOR("Venkata Narendra Kumar Gutta <vengutta@amd.com>"); +MODULE_AUTHOR("Pratap Nirujogi <pratap.nirujogi@amd.com>"); +MODULE_AUTHOR("Bin Du <bin.du@amd.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index 8eb7bd640f8d..5b1e8f74c4ac 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -572,8 +572,10 @@ u32 i2c_dw_clk_rate(struct dw_i2c_dev *dev) * Clock is not necessary if we got LCNT/HCNT values directly from * the platform code. */ - if (WARN_ON_ONCE(!dev->get_clk_rate_khz)) + if (!dev->get_clk_rate_khz) { + dev_dbg_once(dev->dev, "Callback get_clk_rate_khz() is not defined\n"); return 0; + } return dev->get_clk_rate_khz(dev); } diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index d6e1ee935399..879719e91df2 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -34,7 +34,7 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev) { - return clk_get_rate(dev->clk) / KILO; + return clk_get_rate(dev->clk) / HZ_PER_KHZ; } #ifdef CONFIG_OF diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c index 5cd4a5f7a472..b936a240db0a 100644 --- a/drivers/i2c/busses/i2c-designware-slave.c +++ b/drivers/i2c/busses/i2c-designware-slave.c @@ -96,7 +96,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave) i2c_dw_disable(dev); synchronize_irq(dev->irq); dev->slave = NULL; - pm_runtime_put(dev->dev); + pm_runtime_put_sync_suspend(dev->dev); return 0; } diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 48e1af544b75..a7f89946dad4 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1180,7 +1180,7 @@ static void i801_probe_optional_targets(struct i801_priv *priv) #ifdef CONFIG_I2C_I801_MUX if (!priv->mux_pdev) #endif - i2c_register_spd(&priv->adapter); + i2c_register_spd_write_enable(&priv->adapter); } #else static void __init input_apanel_init(void) {} @@ -1283,7 +1283,7 @@ static int i801_notifier_call(struct notifier_block *nb, unsigned long action, return NOTIFY_DONE; /* Call i2c_register_spd for muxed child segments */ - i2c_register_spd(to_i2c_adapter(dev)); + i2c_register_spd_write_enable(to_i2c_adapter(dev)); return NOTIFY_OK; } diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 9e5d454d8318..de01dfecb16e 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -1711,11 +1711,11 @@ static int i2c_imx_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) - return irq; + return dev_err_probe(&pdev->dev, irq, "can't get IRQ\n"); base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(base)) - return PTR_ERR(base); + return dev_err_probe(&pdev->dev, PTR_ERR(base), "can't get IO memory\n"); phy_addr = (dma_addr_t)res->start; i2c_imx = devm_kzalloc(&pdev->dev, sizeof(*i2c_imx), GFP_KERNEL); @@ -1810,13 +1810,15 @@ static int i2c_imx_probe(struct platform_device *pdev) */ ret = i2c_imx_dma_request(i2c_imx, phy_addr); if (ret) { - if (ret == -EPROBE_DEFER) + if (ret == -EPROBE_DEFER) { + dev_err_probe(&pdev->dev, ret, "can't get DMA channels\n"); goto clk_notifier_unregister; - else if (ret == -ENODEV) + } else if (ret == -ENODEV) { dev_dbg(&pdev->dev, "Only use PIO mode\n"); - else + } else { dev_warn(&pdev->dev, "Failed to setup DMA (%pe), only use PIO mode\n", ERR_PTR(ret)); + } } /* Add I2C adapter */ diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index c93c02aa6ac8..7aaefb21416a 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c @@ -933,7 +933,7 @@ ismt_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - err = pci_request_region(pdev, SMBBAR, ismt_driver.name); + err = pcim_request_region(pdev, SMBBAR, ismt_driver.name); if (err) { dev_err(&pdev->dev, "Failed to request SMBus region 0x%lx-0x%lx\n", diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c index 6943a0de860a..ccd13c4fb83e 100644 --- a/drivers/i2c/busses/i2c-lpc2k.c +++ b/drivers/i2c/busses/i2c-lpc2k.c @@ -442,8 +442,13 @@ static int i2c_lpc2k_suspend(struct device *dev) static int i2c_lpc2k_resume(struct device *dev) { struct lpc2k_i2c *i2c = dev_get_drvdata(dev); + int ret; - clk_enable(i2c->clk); + ret = clk_enable(i2c->clk); + if (ret) { + dev_err(dev, "failed to enable clock.\n"); + return ret; + } i2c_lpc2k_reset(i2c); return 0; diff --git a/drivers/i2c/busses/i2c-microchip-corei2c.c b/drivers/i2c/busses/i2c-microchip-corei2c.c index 5db73429125c..492bf4c34722 100644 --- a/drivers/i2c/busses/i2c-microchip-corei2c.c +++ b/drivers/i2c/busses/i2c-microchip-corei2c.c @@ -76,6 +76,8 @@ #define CORE_I2C_FREQ (0x14) #define CORE_I2C_GLITCHREG (0x18) #define CORE_I2C_SLAVE1_ADDR (0x1c) +#define CORE_I2C_SMBUS_MSG_WR (0x0) +#define CORE_I2C_SMBUS_MSG_RD (0x1) #define PCLK_DIV_960 (CTRL_CR2) #define PCLK_DIV_256 (0) @@ -424,9 +426,109 @@ static u32 mchp_corei2c_func(struct i2c_adapter *adap) return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } +static int mchp_corei2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, + char read_write, u8 command, + int size, union i2c_smbus_data *data) +{ + struct i2c_msg msgs[2]; + struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap); + u8 tx_buf[I2C_SMBUS_BLOCK_MAX + 2]; + u8 rx_buf[I2C_SMBUS_BLOCK_MAX + 1]; + int num_msgs = 1; + + msgs[CORE_I2C_SMBUS_MSG_WR].addr = addr; + msgs[CORE_I2C_SMBUS_MSG_WR].flags = 0; + + if (read_write == I2C_SMBUS_READ && size <= I2C_SMBUS_BYTE) + msgs[CORE_I2C_SMBUS_MSG_WR].flags = I2C_M_RD; + + if (read_write == I2C_SMBUS_WRITE && size <= I2C_SMBUS_WORD_DATA) + msgs[CORE_I2C_SMBUS_MSG_WR].len = size; + + if (read_write == I2C_SMBUS_WRITE && size > I2C_SMBUS_BYTE) { + msgs[CORE_I2C_SMBUS_MSG_WR].buf = tx_buf; + msgs[CORE_I2C_SMBUS_MSG_WR].buf[0] = command; + } + + if (read_write == I2C_SMBUS_READ && size >= I2C_SMBUS_BYTE_DATA) { + msgs[CORE_I2C_SMBUS_MSG_WR].buf = tx_buf; + msgs[CORE_I2C_SMBUS_MSG_WR].buf[0] = command; + msgs[CORE_I2C_SMBUS_MSG_RD].addr = addr; + msgs[CORE_I2C_SMBUS_MSG_RD].flags = I2C_M_RD; + num_msgs = 2; + } + + if (read_write == I2C_SMBUS_READ && size > I2C_SMBUS_QUICK) + msgs[CORE_I2C_SMBUS_MSG_WR].len = 1; + + switch (size) { + case I2C_SMBUS_QUICK: + msgs[CORE_I2C_SMBUS_MSG_WR].buf = NULL; + return 0; + case I2C_SMBUS_BYTE: + if (read_write == I2C_SMBUS_WRITE) + msgs[CORE_I2C_SMBUS_MSG_WR].buf = &command; + else + msgs[CORE_I2C_SMBUS_MSG_WR].buf = &data->byte; + break; + case I2C_SMBUS_BYTE_DATA: + if (read_write == I2C_SMBUS_WRITE) { + msgs[CORE_I2C_SMBUS_MSG_WR].buf[1] = data->byte; + } else { + msgs[CORE_I2C_SMBUS_MSG_RD].len = size - 1; + msgs[CORE_I2C_SMBUS_MSG_RD].buf = &data->byte; + } + break; + case I2C_SMBUS_WORD_DATA: + if (read_write == I2C_SMBUS_WRITE) { + msgs[CORE_I2C_SMBUS_MSG_WR].buf[1] = data->word & 0xFF; + msgs[CORE_I2C_SMBUS_MSG_WR].buf[2] = (data->word >> 8) & 0xFF; + } else { + msgs[CORE_I2C_SMBUS_MSG_RD].len = size - 1; + msgs[CORE_I2C_SMBUS_MSG_RD].buf = rx_buf; + } + break; + case I2C_SMBUS_BLOCK_DATA: + if (read_write == I2C_SMBUS_WRITE) { + int data_len; + + data_len = data->block[0]; + msgs[CORE_I2C_SMBUS_MSG_WR].len = data_len + 2; + for (int i = 0; i <= data_len; i++) + msgs[CORE_I2C_SMBUS_MSG_WR].buf[i + 1] = data->block[i]; + } else { + msgs[CORE_I2C_SMBUS_MSG_RD].len = I2C_SMBUS_BLOCK_MAX + 1; + msgs[CORE_I2C_SMBUS_MSG_RD].buf = rx_buf; + } + break; + default: + return -EOPNOTSUPP; + } + + mchp_corei2c_xfer(&idev->adapter, msgs, num_msgs); + if (read_write == I2C_SMBUS_WRITE || size <= I2C_SMBUS_BYTE_DATA) + return 0; + + switch (size) { + case I2C_SMBUS_WORD_DATA: + data->word = (rx_buf[0] | (rx_buf[1] << 8)); + break; + case I2C_SMBUS_BLOCK_DATA: + if (rx_buf[0] > I2C_SMBUS_BLOCK_MAX) + rx_buf[0] = I2C_SMBUS_BLOCK_MAX; + /* As per protocol first member of block is size of the block. */ + for (int i = 0; i <= rx_buf[0]; i++) + data->block[i] = rx_buf[i]; + break; + } + + return 0; +} + static const struct i2c_algorithm mchp_corei2c_algo = { .master_xfer = mchp_corei2c_xfer, .functionality = mchp_corei2c_func, + .smbus_xfer = mchp_corei2c_smbus_xfer, }; static int mchp_corei2c_probe(struct platform_device *pdev) diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c index 280dde53d7f3..8345f7e6385d 100644 --- a/drivers/i2c/busses/i2c-mlxbf.c +++ b/drivers/i2c/busses/i2c-mlxbf.c @@ -19,6 +19,7 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/string.h> +#include <linux/string_choices.h> /* Defines what functionality is present. */ #define MLXBF_I2C_FUNC_SMBUS_BLOCK \ @@ -197,6 +198,7 @@ #define MLXBF_I2C_MASK_8 GENMASK(7, 0) #define MLXBF_I2C_MASK_16 GENMASK(15, 0) +#define MLXBF_I2C_MASK_32 GENMASK(31, 0) #define MLXBF_I2C_MST_ADDR_OFFSET 0x200 @@ -223,7 +225,7 @@ #define MLXBF_I2C_MASTER_ENABLE \ (MLXBF_I2C_MASTER_LOCK_BIT | MLXBF_I2C_MASTER_BUSY_BIT | \ - MLXBF_I2C_MASTER_START_BIT | MLXBF_I2C_MASTER_STOP_BIT) + MLXBF_I2C_MASTER_START_BIT) #define MLXBF_I2C_MASTER_ENABLE_WRITE \ (MLXBF_I2C_MASTER_ENABLE | MLXBF_I2C_MASTER_CTL_WRITE_BIT) @@ -337,6 +339,7 @@ enum { MLXBF_I2C_F_SMBUS_BLOCK = BIT(5), MLXBF_I2C_F_SMBUS_PEC = BIT(6), MLXBF_I2C_F_SMBUS_PROCESS_CALL = BIT(7), + MLXBF_I2C_F_WRITE_WITHOUT_STOP = BIT(8), }; /* Mellanox BlueField chip type. */ @@ -637,16 +640,19 @@ static void mlxbf_i2c_smbus_read_data(struct mlxbf_i2c_priv *priv, } static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave, - u8 len, u8 block_en, u8 pec_en, bool read) + u8 len, u8 block_en, u8 pec_en, bool read, + bool stop) { - u32 command; + u32 command = 0; /* Set Master GW control word. */ + if (stop) + command |= MLXBF_I2C_MASTER_STOP_BIT; if (read) { - command = MLXBF_I2C_MASTER_ENABLE_READ; + command |= MLXBF_I2C_MASTER_ENABLE_READ; command |= rol32(len, MLXBF_I2C_MASTER_READ_SHIFT); } else { - command = MLXBF_I2C_MASTER_ENABLE_WRITE; + command |= MLXBF_I2C_MASTER_ENABLE_WRITE; command |= rol32(len, MLXBF_I2C_MASTER_WRITE_SHIFT); } command |= rol32(slave, MLXBF_I2C_MASTER_SLV_ADDR_SHIFT); @@ -681,8 +687,10 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv, u8 op_idx, data_idx, data_len, write_len, read_len; struct mlxbf_i2c_smbus_operation *operation; u8 read_en, write_en, block_en, pec_en; - u8 slave, flags, addr; + bool stop_after_write = true; + u8 slave, addr; u8 *read_buf; + u32 flags; u32 bits; int ret; @@ -754,7 +762,16 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv, memcpy(data_desc + data_idx, operation->buffer, operation->length); data_idx += operation->length; + + /* + * The stop condition can be skipped when writing on the bus + * to implement a repeated start condition on the next read + * as required for several SMBus and I2C operations. + */ + if (flags & MLXBF_I2C_F_WRITE_WITHOUT_STOP) + stop_after_write = false; } + /* * We assume that read operations are performed only once per * SMBus transaction. *TBD* protect this statement so it won't @@ -780,7 +797,7 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv, if (write_en) { ret = mlxbf_i2c_smbus_enable(priv, slave, write_len, block_en, - pec_en, 0); + pec_en, 0, stop_after_write); if (ret) goto out_unlock; } @@ -790,7 +807,7 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv, mlxbf_i2c_smbus_write_data(priv, (const u8 *)&addr, 1, MLXBF_I2C_MASTER_DATA_DESC_ADDR, true); ret = mlxbf_i2c_smbus_enable(priv, slave, read_len, block_en, - pec_en, 1); + pec_en, 1, true); if (!ret) { /* Get Master GW data descriptor. */ mlxbf_i2c_smbus_read_data(priv, data_desc, read_len + 1, @@ -896,6 +913,9 @@ mlxbf_i2c_smbus_i2c_block_func(struct mlxbf_i2c_smbus_request *request, request->operation[0].flags |= pec_check ? MLXBF_I2C_F_SMBUS_PEC : 0; request->operation[0].buffer = command; + if (read) + request->operation[0].flags |= MLXBF_I2C_F_WRITE_WITHOUT_STOP; + /* * As specified in the standard, the max number of bytes to read/write * per block operation is 32 bytes. In Golan code, the controller can @@ -1063,7 +1083,7 @@ static u32 mlxbf_i2c_get_ticks(struct mlxbf_i2c_priv *priv, u64 nanoseconds, * Frequency */ frequency = priv->frequency; - ticks = (nanoseconds * frequency) / MLXBF_I2C_FREQUENCY_1GHZ; + ticks = div_u64(nanoseconds * frequency, MLXBF_I2C_FREQUENCY_1GHZ); /* * The number of ticks is rounded down and if minimum is equal to 1 * then add one tick. @@ -1130,7 +1150,8 @@ static void mlxbf_i2c_set_timings(struct mlxbf_i2c_priv *priv, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16); writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_THIGH_MAX_TBUF); - timer = timings->timeout; + timer = mlxbf_i2c_set_timer(priv, timings->timeout, false, + MLXBF_I2C_MASK_32, MLXBF_I2C_SHIFT_0); writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT); } @@ -1140,11 +1161,7 @@ enum mlxbf_i2c_timings_config { MLXBF_I2C_TIMING_CONFIG_1000KHZ, }; -/* - * Note that the mlxbf_i2c_timings->timeout value is not related to the - * bus frequency, it is impacted by the time it takes the driver to - * complete data transmission before transaction abort. - */ +/* Timing values are in nanoseconds */ static const struct mlxbf_i2c_timings mlxbf_i2c_timings[] = { [MLXBF_I2C_TIMING_CONFIG_100KHZ] = { .scl_high = 4810, @@ -1159,8 +1176,8 @@ static const struct mlxbf_i2c_timings mlxbf_i2c_timings[] = { .scl_fall = 50, .hold_data = 300, .buf = 20000, - .thigh_max = 5000, - .timeout = 106500 + .thigh_max = 50000, + .timeout = 35000000 }, [MLXBF_I2C_TIMING_CONFIG_400KHZ] = { .scl_high = 1011, @@ -1175,24 +1192,24 @@ static const struct mlxbf_i2c_timings mlxbf_i2c_timings[] = { .scl_fall = 50, .hold_data = 300, .buf = 20000, - .thigh_max = 5000, - .timeout = 106500 + .thigh_max = 50000, + .timeout = 35000000 }, [MLXBF_I2C_TIMING_CONFIG_1000KHZ] = { - .scl_high = 600, - .scl_low = 1300, + .scl_high = 383, + .scl_low = 460, .hold_start = 600, - .setup_start = 600, - .setup_stop = 600, - .setup_data = 100, + .setup_start = 260, + .setup_stop = 260, + .setup_data = 50, .sda_rise = 50, .sda_fall = 50, .scl_rise = 50, .scl_fall = 50, .hold_data = 300, - .buf = 20000, - .thigh_max = 5000, - .timeout = 106500 + .buf = 500, + .thigh_max = 50000, + .timeout = 35000000 } }; @@ -1443,9 +1460,8 @@ static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_ * and PadFrequency, respectively. */ core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f); - core_frequency /= (++core_r) * (++core_od); - return core_frequency; + return div_u64(core_frequency, (++core_r) * (++core_od)); } static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res) @@ -1474,9 +1490,8 @@ static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_r * and PadFrequency, respectively. */ corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST; - corepll_frequency /= (++core_r) * (++core_od); - return corepll_frequency; + return div_u64(corepll_frequency, (++core_r) * (++core_od)); } static int mlxbf_i2c_calculate_corepll_freq(struct platform_device *pdev, @@ -2038,21 +2053,21 @@ static s32 mlxbf_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, read ? &data->byte : &command, read, pec); dev_dbg(&adap->dev, "smbus %s byte, slave 0x%02x.\n", - read ? "read" : "write", addr); + str_read_write(read), addr); break; case I2C_SMBUS_BYTE_DATA: mlxbf_i2c_smbus_data_byte_func(&request, &command, &data->byte, read, pec); dev_dbg(&adap->dev, "smbus %s byte data at 0x%02x, slave 0x%02x.\n", - read ? "read" : "write", command, addr); + str_read_write(read), command, addr); break; case I2C_SMBUS_WORD_DATA: mlxbf_i2c_smbus_data_word_func(&request, &command, (u8 *)&data->word, read, pec); dev_dbg(&adap->dev, "smbus %s word data at 0x%02x, slave 0x%02x.\n", - read ? "read" : "write", command, addr); + str_read_write(read), command, addr); break; case I2C_SMBUS_I2C_BLOCK_DATA: @@ -2060,7 +2075,7 @@ static s32 mlxbf_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, mlxbf_i2c_smbus_i2c_block_func(&request, &command, data->block, &byte_cnt, read, pec); dev_dbg(&adap->dev, "i2c %s block data, %d bytes at 0x%02x, slave 0x%02x.\n", - read ? "read" : "write", byte_cnt, command, addr); + str_read_write(read), byte_cnt, command, addr); break; case I2C_SMBUS_BLOCK_DATA: @@ -2068,7 +2083,7 @@ static s32 mlxbf_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, mlxbf_i2c_smbus_block_func(&request, &command, data->block, &byte_cnt, read, pec); dev_dbg(&adap->dev, "smbus %s block data, %d bytes at 0x%02x, slave 0x%02x.\n", - read ? "read" : "write", byte_cnt, command, addr); + str_read_write(read), byte_cnt, command, addr); break; case I2C_FUNC_SMBUS_PROC_CALL: diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index de713b5747fe..892e2d2988a7 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -1115,14 +1115,10 @@ static void npcm_i2c_master_abort(struct npcm_i2c *bus) #if IS_ENABLED(CONFIG_I2C_SLAVE) static u8 npcm_i2c_get_slave_addr(struct npcm_i2c *bus, enum i2c_addr addr_type) { - u8 slave_add; - if (addr_type > I2C_SLAVE_ADDR2 && addr_type <= I2C_SLAVE_ADDR10) dev_err(bus->dev, "get slave: try to use more than 2 SA not supported\n"); - slave_add = ioread8(bus->reg + npcm_i2caddr[(int)addr_type]); - - return slave_add; + return ioread8(bus->reg + npcm_i2caddr[addr_type]); } static int npcm_i2c_remove_slave_addr(struct npcm_i2c *bus, u8 slave_add) @@ -2178,10 +2174,14 @@ static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode, /* Check HW is OK: SDA and SCL should be high at this point. */ if ((npcm_i2c_get_SDA(&bus->adap) == 0) || (npcm_i2c_get_SCL(&bus->adap) == 0)) { - dev_err(bus->dev, "I2C%d init fail: lines are low\n", bus->num); - dev_err(bus->dev, "SDA=%d SCL=%d\n", npcm_i2c_get_SDA(&bus->adap), - npcm_i2c_get_SCL(&bus->adap)); - return -ENXIO; + dev_warn(bus->dev, " I2C%d SDA=%d SCL=%d, attempting to recover\n", bus->num, + npcm_i2c_get_SDA(&bus->adap), npcm_i2c_get_SCL(&bus->adap)); + if (npcm_i2c_recovery_tgclk(&bus->adap)) { + dev_err(bus->dev, "I2C%d init fail: SDA=%d SCL=%d\n", + bus->num, npcm_i2c_get_SDA(&bus->adap), + npcm_i2c_get_SCL(&bus->adap)); + return -ENXIO; + } } npcm_i2c_int_enable(bus, true); diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c index baf6b27f3752..93a49e4637ec 100644 --- a/drivers/i2c/busses/i2c-octeon-core.c +++ b/drivers/i2c/busses/i2c-octeon-core.c @@ -135,6 +135,32 @@ static void octeon_i2c_hlc_disable(struct octeon_i2c *i2c) octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB); } +static void octeon_i2c_block_enable(struct octeon_i2c *i2c) +{ + u64 mode; + + if (i2c->block_enabled || !OCTEON_REG_BLOCK_CTL(i2c)) + return; + + i2c->block_enabled = true; + mode = __raw_readq(i2c->twsi_base + OCTEON_REG_MODE(i2c)); + mode |= TWSX_MODE_BLOCK_MODE; + octeon_i2c_writeq_flush(mode, i2c->twsi_base + OCTEON_REG_MODE(i2c)); +} + +static void octeon_i2c_block_disable(struct octeon_i2c *i2c) +{ + u64 mode; + + if (!i2c->block_enabled || !OCTEON_REG_BLOCK_CTL(i2c)) + return; + + i2c->block_enabled = false; + mode = __raw_readq(i2c->twsi_base + OCTEON_REG_MODE(i2c)); + mode &= ~TWSX_MODE_BLOCK_MODE; + octeon_i2c_writeq_flush(mode, i2c->twsi_base + OCTEON_REG_MODE(i2c)); +} + /** * octeon_i2c_hlc_wait - wait for an HLC operation to complete * @i2c: The struct octeon_i2c @@ -281,6 +307,7 @@ static int octeon_i2c_start(struct octeon_i2c *i2c) u8 stat; octeon_i2c_hlc_disable(i2c); + octeon_i2c_block_disable(i2c); octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_STA); ret = octeon_i2c_wait(i2c); @@ -605,6 +632,125 @@ err: } /** + * octeon_i2c_hlc_block_comp_read - high-level-controller composite block read + * @i2c: The struct octeon_i2c + * @msgs: msg[0] contains address, place read data into msg[1] + * + * i2c core command is constructed and written into the SW_TWSI register. + * The execution of the command will result in requested data being + * placed into a FIFO buffer, ready to be read. + * Used in the case where the i2c xfer is for greater than 8 bytes of read data. + * + * Returns: 0 on success, otherwise a negative errno. + */ +static int octeon_i2c_hlc_block_comp_read(struct octeon_i2c *i2c, struct i2c_msg *msgs) +{ + int ret; + u16 len, i; + u64 cmd; + + octeon_i2c_hlc_enable(i2c); + octeon_i2c_block_enable(i2c); + + /* Write (size - 1) into block control register */ + len = msgs[1].len - 1; + octeon_i2c_writeq_flush((u64)len, i2c->twsi_base + OCTEON_REG_BLOCK_CTL(i2c)); + + /* Prepare core command */ + cmd = SW_TWSI_V | SW_TWSI_R | SW_TWSI_SOVR | SW_TWSI_OP_7_IA; + cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT; + + /* Send core command */ + ret = octeon_i2c_hlc_read_cmd(i2c, msgs[0], cmd); + if (ret) + goto err; + + cmd = __raw_readq(i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c)); + if ((cmd & SW_TWSI_R) == 0) { + octeon_i2c_block_disable(i2c); + return octeon_i2c_check_status(i2c, false); + } + + /* read data in FIFO */ + octeon_i2c_writeq_flush(TWSX_BLOCK_STS_RESET_PTR, + i2c->twsi_base + OCTEON_REG_BLOCK_STS(i2c)); + for (i = 0; i <= len; i += 8) { + /* Byte-swap FIFO data and copy into msg buffer */ + __be64 rd = cpu_to_be64(__raw_readq(i2c->twsi_base + OCTEON_REG_BLOCK_FIFO(i2c))); + + memcpy(&msgs[1].buf[i], &rd, min(8, msgs[1].len - i)); + } + +err: + octeon_i2c_block_disable(i2c); + return ret; +} + +/** + * octeon_i2c_hlc_block_comp_write - high-level-controller composite block write + * @i2c: The struct octeon_i2c + * @msgs: msg[0] contains address, msg[1] contains data to be written + * + * i2c core command is constructed and write data is written into the FIFO buffer. + * The execution of the command will result in HW write, using the data in FIFO. + * Used in the case where the i2c xfer is for greater than 8 bytes of write data. + * + * Returns: 0 on success, otherwise a negative errno. + */ +static int octeon_i2c_hlc_block_comp_write(struct octeon_i2c *i2c, struct i2c_msg *msgs) +{ + bool set_ext; + int ret; + u16 len, i; + u64 cmd, ext = 0; + + octeon_i2c_hlc_enable(i2c); + octeon_i2c_block_enable(i2c); + + /* Write (size - 1) into block control register */ + len = msgs[1].len - 1; + octeon_i2c_writeq_flush((u64)len, i2c->twsi_base + OCTEON_REG_BLOCK_CTL(i2c)); + + /* Prepare core command */ + cmd = SW_TWSI_V | SW_TWSI_SOVR | SW_TWSI_OP_7_IA; + cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT; + + /* Set parameters for extended message (if required) */ + set_ext = octeon_i2c_hlc_ext(i2c, msgs[0], &cmd, &ext); + + /* Write msg into FIFO buffer */ + octeon_i2c_writeq_flush(TWSX_BLOCK_STS_RESET_PTR, + i2c->twsi_base + OCTEON_REG_BLOCK_STS(i2c)); + for (i = 0; i <= len; i += 8) { + __be64 buf = 0; + + /* Copy 8 bytes or remaining bytes from message buffer */ + memcpy(&buf, &msgs[1].buf[i], min(8, msgs[1].len - i)); + + /* Byte-swap message data and write into FIFO */ + buf = cpu_to_be64(buf); + octeon_i2c_writeq_flush((u64)buf, i2c->twsi_base + OCTEON_REG_BLOCK_FIFO(i2c)); + } + if (set_ext) + octeon_i2c_writeq_flush(ext, i2c->twsi_base + OCTEON_REG_SW_TWSI_EXT(i2c)); + + /* Send command to core (send data in FIFO) */ + ret = octeon_i2c_hlc_cmd_send(i2c, cmd); + if (ret) + goto err; + + cmd = __raw_readq(i2c->twsi_base + OCTEON_REG_SW_TWSI(i2c)); + if ((cmd & SW_TWSI_R) == 0) { + octeon_i2c_block_disable(i2c); + return octeon_i2c_check_status(i2c, false); + } + +err: + octeon_i2c_block_disable(i2c); + return ret; +} + +/** * octeon_i2c_xfer - The driver's xfer function * @adap: Pointer to the i2c_adapter structure * @msgs: Pointer to the messages to be processed @@ -630,13 +776,21 @@ int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) if ((msgs[0].flags & I2C_M_RD) == 0 && (msgs[1].flags & I2C_M_RECV_LEN) == 0 && msgs[0].len > 0 && msgs[0].len <= 2 && - msgs[1].len > 0 && msgs[1].len <= 8 && + msgs[1].len > 0 && msgs[0].addr == msgs[1].addr) { - if (msgs[1].flags & I2C_M_RD) - ret = octeon_i2c_hlc_comp_read(i2c, msgs); - else - ret = octeon_i2c_hlc_comp_write(i2c, msgs); - goto out; + if (msgs[1].len <= 8) { + if (msgs[1].flags & I2C_M_RD) + ret = octeon_i2c_hlc_comp_read(i2c, msgs); + else + ret = octeon_i2c_hlc_comp_write(i2c, msgs); + goto out; + } else if (msgs[1].len <= 1024 && OCTEON_REG_BLOCK_CTL(i2c)) { + if (msgs[1].flags & I2C_M_RD) + ret = octeon_i2c_hlc_block_comp_read(i2c, msgs); + else + ret = octeon_i2c_hlc_block_comp_write(i2c, msgs); + goto out; + } } } } diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h index b265e21189a1..32a44f2d6274 100644 --- a/drivers/i2c/busses/i2c-octeon-core.h +++ b/drivers/i2c/busses/i2c-octeon-core.h @@ -96,18 +96,28 @@ struct octeon_i2c_reg_offset { unsigned int twsi_int; unsigned int sw_twsi_ext; unsigned int mode; + unsigned int block_ctl; + unsigned int block_sts; + unsigned int block_fifo; }; #define OCTEON_REG_SW_TWSI(x) ((x)->roff.sw_twsi) #define OCTEON_REG_TWSI_INT(x) ((x)->roff.twsi_int) #define OCTEON_REG_SW_TWSI_EXT(x) ((x)->roff.sw_twsi_ext) #define OCTEON_REG_MODE(x) ((x)->roff.mode) +#define OCTEON_REG_BLOCK_CTL(x) ((x)->roff.block_ctl) +#define OCTEON_REG_BLOCK_STS(x) ((x)->roff.block_sts) +#define OCTEON_REG_BLOCK_FIFO(x) ((x)->roff.block_fifo) -/* Set REFCLK_SRC and HS_MODE in TWSX_MODE register */ +/* TWSX_MODE register */ #define TWSX_MODE_REFCLK_SRC BIT(4) +#define TWSX_MODE_BLOCK_MODE BIT(2) #define TWSX_MODE_HS_MODE BIT(0) #define TWSX_MODE_HS_MASK (TWSX_MODE_REFCLK_SRC | TWSX_MODE_HS_MODE) +/* TWSX_BLOCK_STS register */ +#define TWSX_BLOCK_STS_RESET_PTR BIT(0) + /* Set BUS_MON_RST to reset bus monitor */ #define BUS_MON_RST_MASK BIT(3) @@ -123,6 +133,7 @@ struct octeon_i2c { void __iomem *twsi_base; struct device *dev; bool hlc_enabled; + bool block_enabled; bool broken_irq_mode; bool broken_irq_check; void (*int_enable)(struct octeon_i2c *); diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c index bd128ab2e2eb..f4eca44ed183 100644 --- a/drivers/i2c/busses/i2c-pasemi-core.c +++ b/drivers/i2c/busses/i2c-pasemi-core.c @@ -5,22 +5,24 @@ * SMBus host driver for PA Semi PWRficient */ -#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/delay.h> +#include <linux/i2c.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/kernel.h> -#include <linux/stddef.h> #include <linux/sched.h> -#include <linux/i2c.h> -#include <linux/delay.h> #include <linux/slab.h> -#include <linux/io.h> +#include <linux/stddef.h> #include "i2c-pasemi-core.h" /* Register offsets */ #define REG_MTXFIFO 0x00 #define REG_MRXFIFO 0x04 +#define REG_XFSTA 0x0c #define REG_SMSTA 0x14 #define REG_IMASK 0x18 #define REG_CTL 0x1c @@ -52,6 +54,12 @@ #define CTL_UJM BIT(8) #define CTL_CLK_M GENMASK(7, 0) +/* + * The hardware (supposedly) has a 25ms timeout for clock stretching, thus + * use 100ms here which should be plenty. + */ +#define PASEMI_TRANSFER_TIMEOUT_MS 100 + static inline void reg_write(struct pasemi_smbus *smbus, int reg, int val) { dev_dbg(smbus->dev, "smbus write reg %x val %08x\n", reg, val); @@ -71,7 +79,7 @@ static inline int reg_read(struct pasemi_smbus *smbus, int reg) static void pasemi_reset(struct pasemi_smbus *smbus) { - u32 val = (CTL_MTR | CTL_MRR | (smbus->clk_div & CTL_CLK_M)); + u32 val = (CTL_MTR | CTL_MRR | CTL_UJM | (smbus->clk_div & CTL_CLK_M)); if (smbus->hw_rev >= 6) val |= CTL_EN; @@ -80,43 +88,102 @@ static void pasemi_reset(struct pasemi_smbus *smbus) reinit_completion(&smbus->irq_completion); } -static void pasemi_smb_clear(struct pasemi_smbus *smbus) +static int pasemi_smb_clear(struct pasemi_smbus *smbus) { unsigned int status; + int ret; + + /* First wait for the bus to go idle */ + ret = readx_poll_timeout(ioread32, smbus->ioaddr + REG_SMSTA, + status, !(status & (SMSTA_XIP | SMSTA_JAM)), + USEC_PER_MSEC, + USEC_PER_MSEC * PASEMI_TRANSFER_TIMEOUT_MS); + + if (ret < 0) { + dev_err(smbus->dev, "Bus is still stuck (status 0x%08x xfstatus 0x%08x)\n", + status, reg_read(smbus, REG_XFSTA)); + return -EIO; + } + + /* If any badness happened or there is data in the FIFOs, reset the FIFOs */ + if ((status & (SMSTA_MRNE | SMSTA_JMD | SMSTA_MTO | SMSTA_TOM | SMSTA_MTN | SMSTA_MTA)) || + !(status & SMSTA_MTE)) { + dev_warn(smbus->dev, "Issuing reset due to status 0x%08x (xfstatus 0x%08x)\n", + status, reg_read(smbus, REG_XFSTA)); + pasemi_reset(smbus); + } - status = reg_read(smbus, REG_SMSTA); + /* Clear the flags */ reg_write(smbus, REG_SMSTA, status); + + return 0; } static int pasemi_smb_waitready(struct pasemi_smbus *smbus) { - int timeout = 100; unsigned int status; if (smbus->use_irq) { reinit_completion(&smbus->irq_completion); reg_write(smbus, REG_IMASK, SMSTA_XEN | SMSTA_MTN); - wait_for_completion_timeout(&smbus->irq_completion, msecs_to_jiffies(100)); + int ret = wait_for_completion_timeout( + &smbus->irq_completion, + msecs_to_jiffies(PASEMI_TRANSFER_TIMEOUT_MS)); reg_write(smbus, REG_IMASK, 0); status = reg_read(smbus, REG_SMSTA); + + if (ret < 0) { + dev_err(smbus->dev, + "Completion wait failed with %d, status 0x%08x\n", + ret, status); + return ret; + } else if (ret == 0) { + dev_err(smbus->dev, "Timeout, status 0x%08x\n", status); + return -ETIME; + } } else { - status = reg_read(smbus, REG_SMSTA); - while (!(status & SMSTA_XEN) && timeout--) { - msleep(1); - status = reg_read(smbus, REG_SMSTA); + int ret = readx_poll_timeout( + ioread32, smbus->ioaddr + REG_SMSTA, + status, status & SMSTA_XEN, + USEC_PER_MSEC, + USEC_PER_MSEC * PASEMI_TRANSFER_TIMEOUT_MS); + + if (ret < 0) { + dev_err(smbus->dev, "Timeout, status 0x%08x\n", status); + return -ETIME; } } - /* Got NACK? */ - if (status & SMSTA_MTN) - return -ENXIO; + /* Controller timeout? */ + if (status & SMSTA_TOM) { + dev_err(smbus->dev, "Controller timeout, status 0x%08x\n", status); + return -EIO; + } - if (timeout < 0) { - dev_warn(smbus->dev, "Timeout, status 0x%08x\n", status); - reg_write(smbus, REG_SMSTA, status); + /* Peripheral timeout? */ + if (status & SMSTA_MTO) { + dev_err(smbus->dev, "Peripheral timeout, status 0x%08x\n", status); return -ETIME; } + /* Still stuck in a transaction? */ + if (status & SMSTA_XIP) { + dev_err(smbus->dev, "Bus stuck, status 0x%08x\n", status); + return -EIO; + } + + /* Arbitration loss? */ + if (status & SMSTA_MTA) { + dev_err(smbus->dev, "Arbitration loss, status 0x%08x\n", status); + return -EBUSY; + } + + /* Got NACK? */ + if (status & SMSTA_MTN) { + dev_err(smbus->dev, "NACK, status 0x%08x\n", status); + return -ENXIO; + } + /* Clear XEN */ reg_write(smbus, REG_SMSTA, SMSTA_XEN); @@ -177,9 +244,9 @@ static int pasemi_i2c_xfer(struct i2c_adapter *adapter, struct pasemi_smbus *smbus = adapter->algo_data; int ret, i; - pasemi_smb_clear(smbus); - - ret = 0; + ret = pasemi_smb_clear(smbus); + if (ret) + return ret; for (i = 0; i < num && !ret; i++) ret = pasemi_i2c_xfer_msg(adapter, &msgs[i], (i == (num - 1))); @@ -200,7 +267,9 @@ static int pasemi_smb_xfer(struct i2c_adapter *adapter, addr <<= 1; read_flag = read_write == I2C_SMBUS_READ; - pasemi_smb_clear(smbus); + err = pasemi_smb_clear(smbus); + if (err) + return err; switch (size) { case I2C_SMBUS_QUICK: diff --git a/drivers/i2c/busses/i2c-pasemi-pci.c b/drivers/i2c/busses/i2c-pasemi-pci.c index 77f90c7436ed..b9ccb54ec77e 100644 --- a/drivers/i2c/busses/i2c-pasemi-pci.c +++ b/drivers/i2c/busses/i2c-pasemi-pci.c @@ -5,15 +5,15 @@ * SMBus host driver for PA Semi PWRficient */ +#include <linux/delay.h> +#include <linux/i2c.h> +#include <linux/io.h> +#include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/kernel.h> -#include <linux/stddef.h> #include <linux/sched.h> -#include <linux/i2c.h> -#include <linux/delay.h> #include <linux/slab.h> -#include <linux/io.h> +#include <linux/stddef.h> #include "i2c-pasemi-core.h" diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 59ecaa990bce..9d3a4dc2bd60 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -971,7 +971,7 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, * This would allow the ee1004 to be probed incorrectly. */ if (port == 0) - i2c_register_spd(adap); + i2c_register_spd_write_enable(adap); *padap = adap; return 0; diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c index 9a867c817db0..f99a2cc721a8 100644 --- a/drivers/i2c/busses/i2c-powermac.c +++ b/drivers/i2c/busses/i2c-powermac.c @@ -349,7 +349,7 @@ static void i2c_powermac_register_devices(struct i2c_adapter *adap, /* Fill out the rest of the info structure */ info.addr = addr; info.irq = irq_of_parse_and_map(node, 0); - info.of_node = of_node_get(node); + info.fwnode = of_fwnode_handle(of_node_get(node)); newdev = i2c_new_client_device(adap, &info); if (IS_ERR(newdev)) { diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 515a784c951c..ccea575fb783 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -71,7 +71,6 @@ enum geni_i2c_err_code { << 5) #define I2C_AUTO_SUSPEND_DELAY 250 -#define KHZ(freq) (1000 * freq) #define PACKING_BYTES_PW 4 #define ABORT_TIMEOUT HZ @@ -148,18 +147,18 @@ struct geni_i2c_clk_fld { * source_clock = 19.2 MHz */ static const struct geni_i2c_clk_fld geni_i2c_clk_map_19p2mhz[] = { - {KHZ(100), 7, 10, 12, 26}, - {KHZ(400), 2, 5, 11, 22}, - {KHZ(1000), 1, 2, 8, 18}, - {}, + { I2C_MAX_STANDARD_MODE_FREQ, 7, 10, 12, 26 }, + { I2C_MAX_FAST_MODE_FREQ, 2, 5, 11, 22 }, + { I2C_MAX_FAST_MODE_PLUS_FREQ, 1, 2, 8, 18 }, + {} }; /* source_clock = 32 MHz */ static const struct geni_i2c_clk_fld geni_i2c_clk_map_32mhz[] = { - {KHZ(100), 8, 14, 18, 40}, - {KHZ(400), 4, 3, 11, 20}, - {KHZ(1000), 2, 3, 6, 15}, - {}, + { I2C_MAX_STANDARD_MODE_FREQ, 8, 14, 18, 40 }, + { I2C_MAX_FAST_MODE_FREQ, 4, 3, 11, 20 }, + { I2C_MAX_FAST_MODE_PLUS_FREQ, 2, 3, 6, 15 }, + {} }; static int geni_i2c_clk_map_idx(struct geni_i2c_dev *gi2c) @@ -812,7 +811,7 @@ static int geni_i2c_probe(struct platform_device *pdev) &gi2c->clk_freq_out); if (ret) { dev_info(dev, "Bus frequency not specified, default to 100kHz.\n"); - gi2c->clk_freq_out = KHZ(100); + gi2c->clk_freq_out = I2C_MAX_STANDARD_MODE_FREQ; } if (has_acpi_companion(dev)) diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c index d7dddd6c296a..23375f7fe3ad 100644 --- a/drivers/i2c/busses/i2c-riic.c +++ b/drivers/i2c/busses/i2c-riic.c @@ -52,6 +52,8 @@ #define ICCR1_ICE BIT(7) #define ICCR1_IICRST BIT(6) #define ICCR1_SOWP BIT(4) +#define ICCR1_SCLO BIT(3) +#define ICCR1_SDAO BIT(2) #define ICCR1_SCLI BIT(1) #define ICCR1_SDAI BIT(0) @@ -151,11 +153,11 @@ static int riic_bus_barrier(struct riic_dev *riic) ret = readb_poll_timeout(riic->base + riic->info->regs[RIIC_ICCR2], val, !(val & ICCR2_BBSY), 10, riic->adapter.timeout); if (ret) - return ret; + return i2c_recover_bus(&riic->adapter); if ((riic_readb(riic, RIIC_ICCR1) & (ICCR1_SDAI | ICCR1_SCLI)) != (ICCR1_SDAI | ICCR1_SCLI)) - return -EBUSY; + return i2c_recover_bus(&riic->adapter); return 0; } @@ -439,6 +441,52 @@ static int riic_init_hw(struct riic_dev *riic) return 0; } +static int riic_get_scl(struct i2c_adapter *adap) +{ + struct riic_dev *riic = i2c_get_adapdata(adap); + + return !!(riic_readb(riic, RIIC_ICCR1) & ICCR1_SCLI); +} + +static int riic_get_sda(struct i2c_adapter *adap) +{ + struct riic_dev *riic = i2c_get_adapdata(adap); + + return !!(riic_readb(riic, RIIC_ICCR1) & ICCR1_SDAI); +} + +static void riic_set_scl(struct i2c_adapter *adap, int val) +{ + struct riic_dev *riic = i2c_get_adapdata(adap); + + if (val) + riic_clear_set_bit(riic, ICCR1_SOWP, ICCR1_SCLO, RIIC_ICCR1); + else + riic_clear_set_bit(riic, ICCR1_SOWP | ICCR1_SCLO, 0, RIIC_ICCR1); + + riic_clear_set_bit(riic, 0, ICCR1_SOWP, RIIC_ICCR1); +} + +static void riic_set_sda(struct i2c_adapter *adap, int val) +{ + struct riic_dev *riic = i2c_get_adapdata(adap); + + if (val) + riic_clear_set_bit(riic, ICCR1_SOWP, ICCR1_SDAO, RIIC_ICCR1); + else + riic_clear_set_bit(riic, ICCR1_SOWP | ICCR1_SDAO, 0, RIIC_ICCR1); + + riic_clear_set_bit(riic, 0, ICCR1_SOWP, RIIC_ICCR1); +} + +static struct i2c_bus_recovery_info riic_bri = { + .recover_bus = i2c_generic_scl_recovery, + .get_scl = riic_get_scl, + .set_scl = riic_set_scl, + .get_sda = riic_get_sda, + .set_sda = riic_set_sda, +}; + static const struct riic_irq_desc riic_irqs[] = { { .res_num = 0, .isr = riic_tend_isr, .name = "riic-tend" }, { .res_num = 1, .isr = riic_rdrf_isr, .name = "riic-rdrf" }, @@ -495,6 +543,7 @@ static int riic_i2c_probe(struct platform_device *pdev) adap->algo = &riic_algo; adap->dev.parent = dev; adap->dev.of_node = dev->of_node; + adap->bus_recovery_info = &riic_bri; init_completion(&riic->msg_done); diff --git a/drivers/i2c/busses/i2c-rzv2m.c b/drivers/i2c/busses/i2c-rzv2m.c index 53762cc56d28..b0e9c0b62429 100644 --- a/drivers/i2c/busses/i2c-rzv2m.c +++ b/drivers/i2c/busses/i2c-rzv2m.c @@ -402,7 +402,7 @@ static const struct i2c_adapter_quirks rzv2m_i2c_quirks = { .flags = I2C_AQ_NO_ZERO_LEN, }; -static struct i2c_algorithm rzv2m_i2c_algo = { +static const struct i2c_algorithm rzv2m_i2c_algo = { .xfer = rzv2m_i2c_xfer, .functionality = rzv2m_i2c_func, }; diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index efe29621b8d7..adfcee6c9fdc 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -24,6 +24,7 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> +#include <linux/string_choices.h> /* Transmit operation: */ /* */ @@ -409,7 +410,7 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id) pd->sr |= sr; /* remember state */ dev_dbg(pd->dev, "i2c_isr 0x%02x 0x%02x %s %d %d!\n", sr, pd->sr, - (pd->msg->flags & I2C_M_RD) ? "read" : "write", + str_read_write(pd->msg->flags & I2C_M_RD), pd->pos, pd->msg->len); /* Kick off TxDMA after preface was done */ diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 87976e99e6d0..049b4d154c23 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -1395,6 +1395,11 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], MSG_END_CONTINUE); if (ret) break; + + /* Validate message length before proceeding */ + if (msgs[i].buf[0] == 0 || msgs[i].buf[0] > I2C_SMBUS_BLOCK_MAX) + break; + /* Set the msg length from first byte */ msgs[i].len += msgs[i].buf[0]; dev_dbg(i2c_dev->dev, "reading %d bytes\n", msgs[i].len); diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c index 143d012fa43e..3959f23fc440 100644 --- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c +++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c @@ -168,6 +168,9 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev, i2c->roff.twsi_int = 0x1010; i2c->roff.sw_twsi_ext = 0x1018; i2c->roff.mode = 0x1038; + i2c->roff.block_ctl = 0x1048; + i2c->roff.block_sts = 0x1050; + i2c->roff.block_fifo = 0x1058; i2c->dev = dev; pci_set_drvdata(pdev, i2c); @@ -175,7 +178,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev, if (ret) return ret; - ret = pci_request_regions(pdev, DRV_NAME); + ret = pcim_request_all_regions(pdev, DRV_NAME); if (ret) return ret; diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c index 0f2ed181b266..a18eab0992a1 100644 --- a/drivers/i2c/busses/i2c-tiny-usb.c +++ b/drivers/i2c/busses/i2c-tiny-usb.c @@ -10,6 +10,7 @@ #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include <linux/types.h> /* include interfaces to usb layer */ @@ -71,7 +72,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) dev_dbg(&adapter->dev, " %d: %s (flags %d) %d bytes to 0x%02x\n", - i, pmsg->flags & I2C_M_RD ? "read" : "write", + i, str_read_write(pmsg->flags & I2C_M_RD), pmsg->flags, pmsg->len, pmsg->addr); /* and directly send the message */ diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c index d877f5a1f579..ca0358e8f928 100644 --- a/drivers/i2c/busses/i2c-uniphier-f.c +++ b/drivers/i2c/busses/i2c-uniphier-f.c @@ -532,22 +532,16 @@ static int uniphier_fi2c_probe(struct platform_device *pdev) if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed)) bus_speed = I2C_MAX_STANDARD_MODE_FREQ; - if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ) { - dev_err(dev, "invalid clock-frequency %d\n", bus_speed); - return -EINVAL; - } + if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ) + return dev_err_probe(dev, -EINVAL, "invalid clock-frequency %d\n", bus_speed); priv->clk = devm_clk_get_enabled(dev, NULL); - if (IS_ERR(priv->clk)) { - dev_err(dev, "failed to enable clock\n"); - return PTR_ERR(priv->clk); - } + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), "failed to enable clock\n"); clk_rate = clk_get_rate(priv->clk); - if (!clk_rate) { - dev_err(dev, "input clock rate should not be zero\n"); - return -EINVAL; - } + if (!clk_rate) + return dev_err_probe(dev, -EINVAL, "input clock rate should not be zero\n"); priv->clk_cycle = clk_rate / bus_speed; init_completion(&priv->comp); @@ -565,10 +559,8 @@ static int uniphier_fi2c_probe(struct platform_device *pdev) ret = devm_request_irq(dev, irq, uniphier_fi2c_interrupt, 0, pdev->name, priv); - if (ret) { - dev_err(dev, "failed to request irq %d\n", irq); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "failed to request irq %d\n", irq); return i2c_add_adapter(&priv->adap); } diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c index b95d50d4d7db..9d49a3d5d612 100644 --- a/drivers/i2c/busses/i2c-uniphier.c +++ b/drivers/i2c/busses/i2c-uniphier.c @@ -327,22 +327,16 @@ static int uniphier_i2c_probe(struct platform_device *pdev) if (of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed)) bus_speed = I2C_MAX_STANDARD_MODE_FREQ; - if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ) { - dev_err(dev, "invalid clock-frequency %d\n", bus_speed); - return -EINVAL; - } + if (!bus_speed || bus_speed > I2C_MAX_FAST_MODE_FREQ) + return dev_err_probe(dev, -EINVAL, "invalid clock-frequency %d\n", bus_speed); priv->clk = devm_clk_get_enabled(dev, NULL); - if (IS_ERR(priv->clk)) { - dev_err(dev, "failed to enable clock\n"); - return PTR_ERR(priv->clk); - } + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), "failed to enable clock\n"); clk_rate = clk_get_rate(priv->clk); - if (!clk_rate) { - dev_err(dev, "input clock rate should not be zero\n"); - return -EINVAL; - } + if (!clk_rate) + return dev_err_probe(dev, -EINVAL, "input clock rate should not be zero\n"); priv->clk_cycle = clk_rate / bus_speed; init_completion(&priv->comp); @@ -359,10 +353,8 @@ static int uniphier_i2c_probe(struct platform_device *pdev) ret = devm_request_irq(dev, irq, uniphier_i2c_interrupt, 0, pdev->name, priv); - if (ret) { - dev_err(dev, "failed to request irq %d\n", irq); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "failed to request irq %d\n", irq); return i2c_add_adapter(&priv->adap); } diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c index 7ed29992a97f..2c26a57883f2 100644 --- a/drivers/i2c/busses/i2c-via.c +++ b/drivers/i2c/busses/i2c-via.c @@ -89,10 +89,9 @@ static int vt586b_probe(struct pci_dev *dev, const struct pci_device_id *id) u8 rev; int res; - if (pm_io_base) { - dev_err(&dev->dev, "i2c-via: Will only support one host\n"); - return -ENODEV; - } + if (pm_io_base) + return dev_err_probe(&dev->dev, -ENODEV, + "Will only support one host\n"); pci_read_config_byte(dev, PM_CFG_REVID, &rev); @@ -113,10 +112,10 @@ static int vt586b_probe(struct pci_dev *dev, const struct pci_device_id *id) pci_read_config_word(dev, base, &pm_io_base); pm_io_base &= (0xff << 8); - if (!request_region(I2C_DIR, IOSPACE, vt586b_driver.name)) { - dev_err(&dev->dev, "IO 0x%x-0x%x already in use\n", I2C_DIR, I2C_DIR + IOSPACE); - return -ENODEV; - } + if (!request_region(I2C_DIR, IOSPACE, vt586b_driver.name)) + return dev_err_probe(&dev->dev, -ENODEV, + "IO 0x%x-0x%x already in use\n", + I2C_DIR, I2C_DIR + IOSPACE); outb(inb(I2C_DIR) & ~(I2C_SDA | I2C_SCL), I2C_DIR); outb(inb(I2C_OUT) & ~(I2C_SDA | I2C_SCL), I2C_OUT); diff --git a/drivers/i2c/busses/i2c-viai2c-wmt.c b/drivers/i2c/busses/i2c-viai2c-wmt.c index 4eb740faf268..2cf3cc0165fb 100644 --- a/drivers/i2c/busses/i2c-viai2c-wmt.c +++ b/drivers/i2c/busses/i2c-viai2c-wmt.c @@ -44,16 +44,13 @@ static int wmt_i2c_reset_hardware(struct viai2c *i2c) int err; err = clk_prepare_enable(i2c->clk); - if (err) { - dev_err(i2c->dev, "failed to enable clock\n"); - return err; - } + if (err) + return dev_err_probe(i2c->dev, err, "failed to enable clock\n"); err = clk_set_rate(i2c->clk, 20000000); if (err) { - dev_err(i2c->dev, "failed to set clock = 20Mhz\n"); clk_disable_unprepare(i2c->clk); - return err; + return dev_err_probe(i2c->dev, err, "failed to set clock = 20Mhz\n"); } writew(0, i2c->base + VIAI2C_REG_CR); @@ -121,10 +118,9 @@ static int wmt_i2c_probe(struct platform_device *pdev) "failed to request irq %i\n", i2c->irq); i2c->clk = of_clk_get(np, 0); - if (IS_ERR(i2c->clk)) { - dev_err(&pdev->dev, "unable to request clock\n"); - return PTR_ERR(i2c->clk); - } + if (IS_ERR(i2c->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(i2c->clk), + "unable to request clock\n"); err = of_property_read_u32(np, "clock-frequency", &clk_rate); if (!err && clk_rate == I2C_MAX_FAST_MODE_FREQ) @@ -139,10 +135,8 @@ static int wmt_i2c_probe(struct platform_device *pdev) adap->dev.of_node = pdev->dev.of_node; err = wmt_i2c_reset_hardware(i2c); - if (err) { - dev_err(&pdev->dev, "error initializing hardware\n"); + if (err) return err; - } err = i2c_add_adapter(adap); if (err) diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c index 2cc7bba3b8bf..c58843609107 100644 --- a/drivers/i2c/busses/i2c-viapro.c +++ b/drivers/i2c/busses/i2c-viapro.c @@ -330,30 +330,27 @@ static int vt596_probe(struct pci_dev *pdev, SMBHSTCFG = 0x84; } else { /* no matches at all */ - dev_err(&pdev->dev, "Cannot configure " - "SMBus I/O Base address\n"); - return -ENODEV; + return dev_err_probe(&pdev->dev, -ENODEV, + "Cannot configure " + "SMBus I/O Base address\n"); } } vt596_smba &= 0xfff0; - if (vt596_smba == 0) { - dev_err(&pdev->dev, "SMBus base address " - "uninitialized - upgrade BIOS or use " - "force_addr=0xaddr\n"); - return -ENODEV; - } + if (vt596_smba == 0) + return dev_err_probe(&pdev->dev, -ENODEV, "SMBus base address " + "uninitialized - upgrade BIOS or use " + "force_addr=0xaddr\n"); found: error = acpi_check_region(vt596_smba, 8, vt596_driver.name); if (error) return -ENODEV; - if (!request_region(vt596_smba, 8, vt596_driver.name)) { - dev_err(&pdev->dev, "SMBus region 0x%x already in use!\n", - vt596_smba); - return -ENODEV; - } + if (!request_region(vt596_smba, 8, vt596_driver.name)) + return dev_err_probe(&pdev->dev, -ENODEV, + "SMBus region 0x%x already in use!\n", + vt596_smba); pci_read_config_byte(pdev, SMBHSTCFG, &temp); /* If force_addr is set, we program the new address here. Just to make @@ -375,10 +372,10 @@ found: pci_write_config_byte(pdev, SMBHSTCFG, temp | 0x01); dev_info(&pdev->dev, "Enabling SMBus device\n"); } else { - dev_err(&pdev->dev, "SMBUS: Error: Host SMBus " - "controller not enabled! - upgrade BIOS or " - "use force=1\n"); - error = -ENODEV; + error = dev_err_probe(&pdev->dev, -ENODEV, + "SMBUS: Error: Host SMBus " + "controller not enabled! - " + "upgrade BIOS or use force=1\n"); goto release_region; } } diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c index 503e2f4d6f84..1bd602852e35 100644 --- a/drivers/i2c/busses/i2c-viperboard.c +++ b/drivers/i2c/busses/i2c-viperboard.c @@ -11,6 +11,7 @@ #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include <linux/types.h> #include <linux/mutex.h> #include <linux/platform_device.h> @@ -278,7 +279,7 @@ static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs, dev_dbg(&i2c->dev, " %d: %s (flags %d) %d bytes to 0x%02x\n", - i, pmsg->flags & I2C_M_RD ? "read" : "write", + i, str_read_write(pmsg->flags & I2C_M_RD), pmsg->flags, pmsg->len, pmsg->addr); mutex_lock(&vb->lock); @@ -384,15 +385,13 @@ static int vprbrd_i2c_probe(struct platform_device *pdev) VPRBRD_USB_REQUEST_I2C_FREQ, VPRBRD_USB_TYPE_OUT, 0x0000, 0x0000, &vb_i2c->bus_freq_param, 1, VPRBRD_USB_TIMEOUT_MS); - if (ret != 1) { - dev_err(&pdev->dev, "failure setting i2c_bus_freq to %d\n", - i2c_bus_freq); - return -EIO; - } + if (ret != 1) + return dev_err_probe(&pdev->dev, -EIO, + "failure setting i2c_bus_freq to %d\n", + i2c_bus_freq); } else { - dev_err(&pdev->dev, - "invalid i2c_bus_freq setting:%d\n", i2c_bus_freq); - return -EIO; + return dev_err_probe(&pdev->dev, -EIO, + "invalid i2c_bus_freq setting:%d\n", i2c_bus_freq); } vb_i2c->i2c.dev.parent = &pdev->dev; diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c index 2a351f961b89..9b05ff53d3d7 100644 --- a/drivers/i2c/busses/i2c-virtio.c +++ b/drivers/i2c/busses/i2c-virtio.c @@ -192,10 +192,9 @@ static int virtio_i2c_probe(struct virtio_device *vdev) struct virtio_i2c *vi; int ret; - if (!virtio_has_feature(vdev, VIRTIO_I2C_F_ZERO_LENGTH_REQUEST)) { - dev_err(&vdev->dev, "Zero-length request feature is mandatory\n"); - return -EINVAL; - } + if (!virtio_has_feature(vdev, VIRTIO_I2C_F_ZERO_LENGTH_REQUEST)) + return dev_err_probe(&vdev->dev, -EINVAL, + "Zero-length request feature is mandatory\n"); vi = devm_kzalloc(&vdev->dev, sizeof(*vi), GFP_KERNEL); if (!vi) diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c index 663fe5604dd6..b29dec66b2c3 100644 --- a/drivers/i2c/busses/i2c-xgene-slimpro.c +++ b/drivers/i2c/busses/i2c-xgene-slimpro.c @@ -101,8 +101,6 @@ struct slimpro_i2c_dev { struct completion rd_complete; u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */ u32 *resp_msg; - phys_addr_t comm_base_addr; - void *pcc_comm_addr; }; #define to_slimpro_i2c_dev(cl) \ @@ -148,7 +146,8 @@ static void slimpro_i2c_rx_cb(struct mbox_client *cl, void *mssg) static void slimpro_i2c_pcc_rx_cb(struct mbox_client *cl, void *msg) { struct slimpro_i2c_dev *ctx = to_slimpro_i2c_dev(cl); - struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr; + struct acpi_pcct_shared_memory __iomem *generic_comm_base = + ctx->pcc_chan->shmem; /* Check if platform sends interrupt */ if (!xgene_word_tst_and_clr(&generic_comm_base->status, @@ -169,7 +168,8 @@ static void slimpro_i2c_pcc_rx_cb(struct mbox_client *cl, void *msg) static void slimpro_i2c_pcc_tx_prepare(struct slimpro_i2c_dev *ctx, u32 *msg) { - struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr; + struct acpi_pcct_shared_memory __iomem *generic_comm_base = + ctx->pcc_chan->shmem; u32 *ptr = (void *)(generic_comm_base + 1); u16 status; int i; @@ -457,22 +457,18 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev) cl->tx_block = true; cl->rx_callback = slimpro_i2c_rx_cb; ctx->mbox_chan = mbox_request_channel(cl, MAILBOX_I2C_INDEX); - if (IS_ERR(ctx->mbox_chan)) { - dev_err(&pdev->dev, "i2c mailbox channel request failed\n"); - return PTR_ERR(ctx->mbox_chan); - } + if (IS_ERR(ctx->mbox_chan)) + return dev_err_probe(&pdev->dev, PTR_ERR(ctx->mbox_chan), + "i2c mailbox channel request failed\n"); } else { struct pcc_mbox_chan *pcc_chan; const struct acpi_device_id *acpi_id; - int version = XGENE_SLIMPRO_I2C_V1; acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev); if (!acpi_id) return -EINVAL; - version = (int)acpi_id->driver_data; - if (device_property_read_u32(&pdev->dev, "pcc-channel", &ctx->mbox_idx)) ctx->mbox_idx = MAILBOX_I2C_INDEX; @@ -480,48 +476,19 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev) cl->tx_block = false; cl->rx_callback = slimpro_i2c_pcc_rx_cb; pcc_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx); - if (IS_ERR(pcc_chan)) { - dev_err(&pdev->dev, "PCC mailbox channel request failed\n"); - return PTR_ERR(pcc_chan); - } + if (IS_ERR(pcc_chan)) + return dev_err_probe(&pdev->dev, PTR_ERR(pcc_chan), + "PCC mailbox channel request failed\n"); ctx->pcc_chan = pcc_chan; ctx->mbox_chan = pcc_chan->mchan; if (!ctx->mbox_chan->mbox->txdone_irq) { - dev_err(&pdev->dev, "PCC IRQ not supported\n"); - rc = -ENOENT; + rc = dev_err_probe(&pdev->dev, -ENOENT, + "PCC IRQ not supported\n"); goto mbox_err; } - /* - * This is the shared communication region - * for the OS and Platform to communicate over. - */ - ctx->comm_base_addr = pcc_chan->shmem_base_addr; - if (ctx->comm_base_addr) { - if (version == XGENE_SLIMPRO_I2C_V2) - ctx->pcc_comm_addr = memremap( - ctx->comm_base_addr, - pcc_chan->shmem_size, - MEMREMAP_WT); - else - ctx->pcc_comm_addr = memremap( - ctx->comm_base_addr, - pcc_chan->shmem_size, - MEMREMAP_WB); - } else { - dev_err(&pdev->dev, "Failed to get PCC comm region\n"); - rc = -ENOENT; - goto mbox_err; - } - - if (!ctx->pcc_comm_addr) { - dev_err(&pdev->dev, - "Failed to ioremap PCC comm region\n"); - rc = -ENOMEM; - goto mbox_err; - } } rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (rc) diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index dc1e46d834dc..6bc1575cea6c 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -1489,7 +1489,7 @@ static int xiic_i2c_probe(struct platform_device *pdev) pdev->name, i2c); if (ret < 0) { - dev_err(&pdev->dev, "Cannot claim IRQ\n"); + dev_err_probe(&pdev->dev, ret, "Cannot claim IRQ\n"); goto err_pm_disable; } @@ -1510,7 +1510,7 @@ static int xiic_i2c_probe(struct platform_device *pdev) ret = xiic_reinit(i2c); if (ret < 0) { - dev_err(&pdev->dev, "Cannot xiic_reinit\n"); + dev_err_probe(&pdev->dev, ret, "Cannot xiic_reinit\n"); goto err_pm_disable; } diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c index 4d6abd7e92ce..06cf221557f2 100644 --- a/drivers/i2c/busses/scx200_acb.c +++ b/drivers/i2c/busses/scx200_acb.c @@ -500,10 +500,8 @@ static int scx200_probe(struct platform_device *pdev) struct resource *res; res = platform_get_resource(pdev, IORESOURCE_IO, 0); - if (!res) { - dev_err(&pdev->dev, "can't fetch device resource info\n"); - return -ENODEV; - } + if (!res) + return dev_err_probe(&pdev->dev, -ENODEV, "can't fetch device resource info\n"); iface = scx200_create_dev("CS5535", res->start, 0, &pdev->dev); if (!iface) diff --git a/drivers/i2c/i2c-atr.c b/drivers/i2c/i2c-atr.c index 783fb8df2ebe..be7d6d41e0b2 100644 --- a/drivers/i2c/i2c-atr.c +++ b/drivers/i2c/i2c-atr.c @@ -16,32 +16,65 @@ #include <linux/property.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/lockdep.h> #define ATR_MAX_ADAPTERS 100 /* Just a sanity limit */ #define ATR_MAX_SYMLINK_LEN 11 /* Longest name is 10 chars: "channel-99" */ /** - * struct i2c_atr_alias_pair - Holds the alias assigned to a client. + * struct i2c_atr_alias_pair - Holds the alias assigned to a client address. * @node: List node - * @client: Pointer to the client on the child bus + * @addr: Address of the client on the child bus. * @alias: I2C alias address assigned by the driver. * This is the address that will be used to issue I2C transactions * on the parent (physical) bus. + * @fixed: Alias pair cannot be replaced during dynamic address attachment. + * This flag is necessary for situations where a single I2C transaction + * contains more distinct target addresses than the ATR channel can handle. + * It marks addresses that have already been attached to an alias so + * that their alias pair is not evicted by a subsequent address in the same + * transaction. + * */ struct i2c_atr_alias_pair { struct list_head node; - const struct i2c_client *client; + bool fixed; + u16 addr; u16 alias; }; /** + * struct i2c_atr_alias_pool - Pool of client aliases available for an ATR. + * @size: Total number of aliases + * @shared: Indicates if this alias pool is shared by multiple channels + * + * @lock: Lock protecting @aliases and @use_mask + * @aliases: Array of aliases, must hold exactly @size elements + * @use_mask: Mask of used aliases + */ +struct i2c_atr_alias_pool { + size_t size; + bool shared; + + /* Protects aliases and use_mask */ + spinlock_t lock; + u16 *aliases; + unsigned long *use_mask; +}; + +/** * struct i2c_atr_chan - Data for a channel. * @adap: The &struct i2c_adapter for the channel * @atr: The parent I2C ATR * @chan_id: The ID of this channel - * @alias_list: List of @struct i2c_atr_alias_pair containing the + * @alias_pairs_lock: Mutex protecting @alias_pairs + * @alias_pairs_lock_key: Lock key for @alias_pairs_lock + * @alias_pairs: List of @struct i2c_atr_alias_pair containing the * assigned aliases + * @alias_pool: Pool of available client aliases + * * @orig_addrs_lock: Mutex protecting @orig_addrs + * @orig_addrs_lock_key: Lock key for @orig_addrs_lock * @orig_addrs: Buffer used to store the original addresses during transmit * @orig_addrs_size: Size of @orig_addrs */ @@ -50,10 +83,15 @@ struct i2c_atr_chan { struct i2c_atr *atr; u32 chan_id; - struct list_head alias_list; + /* Lock alias_pairs during attach/detach */ + struct mutex alias_pairs_lock; + struct lock_class_key alias_pairs_lock_key; + struct list_head alias_pairs; + struct i2c_atr_alias_pool *alias_pool; /* Lock orig_addrs during xfer */ struct mutex orig_addrs_lock; + struct lock_class_key orig_addrs_lock_key; u16 *orig_addrs; unsigned int orig_addrs_size; }; @@ -66,11 +104,10 @@ struct i2c_atr_chan { * @priv: Private driver data, set with i2c_atr_set_driver_data() * @algo: The &struct i2c_algorithm for adapters * @lock: Lock for the I2C bus segment (see &struct i2c_lock_operations) + * @lock_key: Lock key for @lock * @max_adapters: Maximum number of adapters this I2C ATR can have - * @num_aliases: Number of aliases in the aliases array - * @aliases: The aliases array - * @alias_mask_lock: Lock protecting alias_use_mask - * @alias_use_mask: Bitmask for used aliases in aliases array + * @flags: Flags for ATR + * @alias_pool: Optional common pool of available client aliases * @i2c_nb: Notifier for remote client add & del events * @adapter: Array of adapters */ @@ -84,27 +121,135 @@ struct i2c_atr { struct i2c_algorithm algo; /* lock for the I2C bus segment (see struct i2c_lock_operations) */ struct mutex lock; + struct lock_class_key lock_key; int max_adapters; + u32 flags; - size_t num_aliases; - const u16 *aliases; - /* Protects alias_use_mask */ - spinlock_t alias_mask_lock; - unsigned long *alias_use_mask; + struct i2c_atr_alias_pool *alias_pool; struct notifier_block i2c_nb; struct i2c_adapter *adapter[] __counted_by(max_adapters); }; +static struct i2c_atr_alias_pool *i2c_atr_alloc_alias_pool(size_t num_aliases, bool shared) +{ + struct i2c_atr_alias_pool *alias_pool; + int ret; + + alias_pool = kzalloc(sizeof(*alias_pool), GFP_KERNEL); + if (!alias_pool) + return ERR_PTR(-ENOMEM); + + alias_pool->size = num_aliases; + + alias_pool->aliases = kcalloc(num_aliases, sizeof(*alias_pool->aliases), GFP_KERNEL); + if (!alias_pool->aliases) { + ret = -ENOMEM; + goto err_free_alias_pool; + } + + alias_pool->use_mask = bitmap_zalloc(num_aliases, GFP_KERNEL); + if (!alias_pool->use_mask) { + ret = -ENOMEM; + goto err_free_aliases; + } + + alias_pool->shared = shared; + + spin_lock_init(&alias_pool->lock); + + return alias_pool; + +err_free_aliases: + kfree(alias_pool->aliases); +err_free_alias_pool: + kfree(alias_pool); + return ERR_PTR(ret); +} + +static void i2c_atr_free_alias_pool(struct i2c_atr_alias_pool *alias_pool) +{ + bitmap_free(alias_pool->use_mask); + kfree(alias_pool->aliases); + kfree(alias_pool); +} + +/* Must be called with alias_pairs_lock held */ +static struct i2c_atr_alias_pair *i2c_atr_create_c2a(struct i2c_atr_chan *chan, + u16 alias, u16 addr) +{ + struct i2c_atr_alias_pair *c2a; + + lockdep_assert_held(&chan->alias_pairs_lock); + + c2a = kzalloc(sizeof(*c2a), GFP_KERNEL); + if (!c2a) + return NULL; + + c2a->addr = addr; + c2a->alias = alias; + + list_add(&c2a->node, &chan->alias_pairs); + + return c2a; +} + +/* Must be called with alias_pairs_lock held */ +static void i2c_atr_destroy_c2a(struct i2c_atr_alias_pair **pc2a) +{ + list_del(&(*pc2a)->node); + kfree(*pc2a); + *pc2a = NULL; +} + +static int i2c_atr_reserve_alias(struct i2c_atr_alias_pool *alias_pool) +{ + unsigned long idx; + u16 alias; + + spin_lock(&alias_pool->lock); + + idx = find_first_zero_bit(alias_pool->use_mask, alias_pool->size); + if (idx >= alias_pool->size) { + spin_unlock(&alias_pool->lock); + return -EBUSY; + } + + set_bit(idx, alias_pool->use_mask); + + alias = alias_pool->aliases[idx]; + + spin_unlock(&alias_pool->lock); + return alias; +} + +static void i2c_atr_release_alias(struct i2c_atr_alias_pool *alias_pool, u16 alias) +{ + unsigned int idx; + + spin_lock(&alias_pool->lock); + + for (idx = 0; idx < alias_pool->size; ++idx) { + if (alias_pool->aliases[idx] == alias) { + clear_bit(idx, alias_pool->use_mask); + spin_unlock(&alias_pool->lock); + return; + } + } + + spin_unlock(&alias_pool->lock); +} + static struct i2c_atr_alias_pair * -i2c_atr_find_mapping_by_client(const struct list_head *list, - const struct i2c_client *client) +i2c_atr_find_mapping_by_addr(struct i2c_atr_chan *chan, u16 addr) { struct i2c_atr_alias_pair *c2a; - list_for_each_entry(c2a, list, node) { - if (c2a->client == client) + lockdep_assert_held(&chan->alias_pairs_lock); + + list_for_each_entry(c2a, &chan->alias_pairs, node) { + if (c2a->addr == addr) return c2a; } @@ -112,18 +257,107 @@ i2c_atr_find_mapping_by_client(const struct list_head *list, } static struct i2c_atr_alias_pair * -i2c_atr_find_mapping_by_addr(const struct list_head *list, u16 phys_addr) +i2c_atr_create_mapping_by_addr(struct i2c_atr_chan *chan, u16 addr) { + struct i2c_atr *atr = chan->atr; struct i2c_atr_alias_pair *c2a; + u16 alias; + int ret; - list_for_each_entry(c2a, list, node) { - if (c2a->client->addr == phys_addr) - return c2a; + lockdep_assert_held(&chan->alias_pairs_lock); + + ret = i2c_atr_reserve_alias(chan->alias_pool); + if (ret < 0) + return NULL; + + alias = ret; + + c2a = i2c_atr_create_c2a(chan, alias, addr); + if (!c2a) + goto err_release_alias; + + ret = atr->ops->attach_addr(atr, chan->chan_id, c2a->addr, c2a->alias); + if (ret) { + dev_err(atr->dev, "failed to attach 0x%02x on channel %d: err %d\n", + addr, chan->chan_id, ret); + goto err_del_c2a; } + return c2a; + +err_del_c2a: + i2c_atr_destroy_c2a(&c2a); +err_release_alias: + i2c_atr_release_alias(chan->alias_pool, alias); return NULL; } +static struct i2c_atr_alias_pair * +i2c_atr_replace_mapping_by_addr(struct i2c_atr_chan *chan, u16 addr) +{ + struct i2c_atr *atr = chan->atr; + struct i2c_atr_alias_pair *c2a; + struct list_head *alias_pairs; + bool found = false; + u16 alias; + int ret; + + lockdep_assert_held(&chan->alias_pairs_lock); + + alias_pairs = &chan->alias_pairs; + + if (unlikely(list_empty(alias_pairs))) + return NULL; + + list_for_each_entry_reverse(c2a, alias_pairs, node) { + if (!c2a->fixed) { + found = true; + break; + } + } + + if (!found) + return NULL; + + atr->ops->detach_addr(atr, chan->chan_id, c2a->addr); + c2a->addr = addr; + + list_move(&c2a->node, alias_pairs); + + alias = c2a->alias; + + ret = atr->ops->attach_addr(atr, chan->chan_id, c2a->addr, c2a->alias); + if (ret) { + dev_err(atr->dev, "failed to attach 0x%02x on channel %d: err %d\n", + addr, chan->chan_id, ret); + i2c_atr_destroy_c2a(&c2a); + i2c_atr_release_alias(chan->alias_pool, alias); + return NULL; + } + + return c2a; +} + +static struct i2c_atr_alias_pair * +i2c_atr_get_mapping_by_addr(struct i2c_atr_chan *chan, u16 addr) +{ + struct i2c_atr *atr = chan->atr; + struct i2c_atr_alias_pair *c2a; + + c2a = i2c_atr_find_mapping_by_addr(chan, addr); + if (c2a) + return c2a; + + if (atr->flags & I2C_ATR_F_STATIC) + return NULL; + + c2a = i2c_atr_create_mapping_by_addr(chan, addr); + if (c2a) + return c2a; + + return i2c_atr_replace_mapping_by_addr(chan, addr); +} + /* * Replace all message addresses with their aliases, saving the original * addresses. @@ -136,7 +370,7 @@ static int i2c_atr_map_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs, { struct i2c_atr *atr = chan->atr; static struct i2c_atr_alias_pair *c2a; - int i; + int i, ret = 0; /* Ensure we have enough room to save the original addresses */ if (unlikely(chan->orig_addrs_size < num)) { @@ -152,25 +386,36 @@ static int i2c_atr_map_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs, chan->orig_addrs_size = num; } + mutex_lock(&chan->alias_pairs_lock); + for (i = 0; i < num; i++) { chan->orig_addrs[i] = msgs[i].addr; - c2a = i2c_atr_find_mapping_by_addr(&chan->alias_list, - msgs[i].addr); + c2a = i2c_atr_get_mapping_by_addr(chan, msgs[i].addr); + if (!c2a) { + if (atr->flags & I2C_ATR_F_PASSTHROUGH) + continue; + dev_err(atr->dev, "client 0x%02x not mapped!\n", msgs[i].addr); while (i--) msgs[i].addr = chan->orig_addrs[i]; - return -ENXIO; + ret = -ENXIO; + goto out_unlock; } + // Prevent c2a from being overwritten by another client in this transaction + c2a->fixed = true; + msgs[i].addr = c2a->alias; } - return 0; +out_unlock: + mutex_unlock(&chan->alias_pairs_lock); + return ret; } /* @@ -183,10 +428,24 @@ static int i2c_atr_map_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs, static void i2c_atr_unmap_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs, int num) { + struct i2c_atr_alias_pair *c2a; int i; for (i = 0; i < num; i++) msgs[i].addr = chan->orig_addrs[i]; + + mutex_lock(&chan->alias_pairs_lock); + + if (unlikely(list_empty(&chan->alias_pairs))) + goto out_unlock; + + // unfix c2a entries so that subsequent transfers can reuse their aliases + list_for_each_entry(c2a, &chan->alias_pairs, node) { + c2a->fixed = false; + } + +out_unlock: + mutex_unlock(&chan->alias_pairs_lock); } static int i2c_atr_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, @@ -224,14 +483,23 @@ static int i2c_atr_smbus_xfer(struct i2c_adapter *adap, u16 addr, struct i2c_atr *atr = chan->atr; struct i2c_adapter *parent = atr->parent; struct i2c_atr_alias_pair *c2a; + u16 alias; - c2a = i2c_atr_find_mapping_by_addr(&chan->alias_list, addr); - if (!c2a) { + mutex_lock(&chan->alias_pairs_lock); + + c2a = i2c_atr_get_mapping_by_addr(chan, addr); + + if (!c2a && !(atr->flags & I2C_ATR_F_PASSTHROUGH)) { dev_err(atr->dev, "client 0x%02x not mapped!\n", addr); + mutex_unlock(&chan->alias_pairs_lock); return -ENXIO; } - return i2c_smbus_xfer(parent, c2a->alias, flags, read_write, command, + alias = c2a ? c2a->alias : addr; + + mutex_unlock(&chan->alias_pairs_lock); + + return i2c_smbus_xfer(parent, alias, flags, read_write, command, size, data); } @@ -273,112 +541,60 @@ static const struct i2c_lock_operations i2c_atr_lock_ops = { .unlock_bus = i2c_atr_unlock_bus, }; -static int i2c_atr_reserve_alias(struct i2c_atr *atr) -{ - unsigned long idx; - - spin_lock(&atr->alias_mask_lock); - - idx = find_first_zero_bit(atr->alias_use_mask, atr->num_aliases); - if (idx >= atr->num_aliases) { - spin_unlock(&atr->alias_mask_lock); - dev_err(atr->dev, "failed to find a free alias\n"); - return -EBUSY; - } - - set_bit(idx, atr->alias_use_mask); - - spin_unlock(&atr->alias_mask_lock); - - return atr->aliases[idx]; -} - -static void i2c_atr_release_alias(struct i2c_atr *atr, u16 alias) -{ - unsigned int idx; - - spin_lock(&atr->alias_mask_lock); - - for (idx = 0; idx < atr->num_aliases; ++idx) { - if (atr->aliases[idx] == alias) { - clear_bit(idx, atr->alias_use_mask); - spin_unlock(&atr->alias_mask_lock); - return; - } - } - - spin_unlock(&atr->alias_mask_lock); - - /* This should never happen */ - dev_warn(atr->dev, "Unable to find mapped alias\n"); -} - -static int i2c_atr_attach_client(struct i2c_adapter *adapter, - const struct i2c_client *client) +static int i2c_atr_attach_addr(struct i2c_adapter *adapter, + u16 addr) { struct i2c_atr_chan *chan = adapter->algo_data; struct i2c_atr *atr = chan->atr; struct i2c_atr_alias_pair *c2a; - u16 alias; - int ret; + int ret = 0; - ret = i2c_atr_reserve_alias(atr); - if (ret < 0) - return ret; + mutex_lock(&chan->alias_pairs_lock); - alias = ret; + c2a = i2c_atr_create_mapping_by_addr(chan, addr); + if (!c2a && !(atr->flags & I2C_ATR_F_STATIC)) + c2a = i2c_atr_replace_mapping_by_addr(chan, addr); - c2a = kzalloc(sizeof(*c2a), GFP_KERNEL); if (!c2a) { - ret = -ENOMEM; - goto err_release_alias; + dev_err(atr->dev, "failed to find a free alias\n"); + ret = -EBUSY; + goto out_unlock; } - ret = atr->ops->attach_client(atr, chan->chan_id, client, alias); - if (ret) - goto err_free; - - dev_dbg(atr->dev, "chan%u: client 0x%02x mapped at alias 0x%02x (%s)\n", - chan->chan_id, client->addr, alias, client->name); - - c2a->client = client; - c2a->alias = alias; - list_add(&c2a->node, &chan->alias_list); - - return 0; - -err_free: - kfree(c2a); -err_release_alias: - i2c_atr_release_alias(atr, alias); + dev_dbg(atr->dev, "chan%u: using alias 0x%02x for addr 0x%02x\n", + chan->chan_id, c2a->alias, addr); +out_unlock: + mutex_unlock(&chan->alias_pairs_lock); return ret; } -static void i2c_atr_detach_client(struct i2c_adapter *adapter, - const struct i2c_client *client) +static void i2c_atr_detach_addr(struct i2c_adapter *adapter, + u16 addr) { struct i2c_atr_chan *chan = adapter->algo_data; struct i2c_atr *atr = chan->atr; struct i2c_atr_alias_pair *c2a; - atr->ops->detach_client(atr, chan->chan_id, client); + atr->ops->detach_addr(atr, chan->chan_id, addr); + + mutex_lock(&chan->alias_pairs_lock); - c2a = i2c_atr_find_mapping_by_client(&chan->alias_list, client); + c2a = i2c_atr_find_mapping_by_addr(chan, addr); if (!c2a) { - /* This should never happen */ - dev_warn(atr->dev, "Unable to find address mapping\n"); + mutex_unlock(&chan->alias_pairs_lock); return; } - i2c_atr_release_alias(atr, c2a->alias); + i2c_atr_release_alias(chan->alias_pool, c2a->alias); dev_dbg(atr->dev, - "chan%u: client 0x%02x unmapped from alias 0x%02x (%s)\n", - chan->chan_id, client->addr, c2a->alias, client->name); + "chan%u: detached alias 0x%02x from addr 0x%02x\n", + chan->chan_id, c2a->alias, addr); - list_del(&c2a->node); - kfree(c2a); + i2c_atr_destroy_c2a(&c2a); + + mutex_unlock(&chan->alias_pairs_lock); } static int i2c_atr_bus_notifier_call(struct notifier_block *nb, @@ -405,7 +621,7 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb, switch (event) { case BUS_NOTIFY_ADD_DEVICE: - ret = i2c_atr_attach_client(client->adapter, client); + ret = i2c_atr_attach_addr(client->adapter, client->addr); if (ret) dev_err(atr->dev, "Failed to attach remote client '%s': %d\n", @@ -413,7 +629,7 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb, break; case BUS_NOTIFY_REMOVED_DEVICE: - i2c_atr_detach_client(client->adapter, client); + i2c_atr_detach_addr(client->adapter, client->addr); break; default: @@ -425,29 +641,43 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb, static int i2c_atr_parse_alias_pool(struct i2c_atr *atr) { + struct i2c_atr_alias_pool *alias_pool; struct device *dev = atr->dev; - unsigned long *alias_use_mask; size_t num_aliases; unsigned int i; u32 *aliases32; - u16 *aliases16; int ret; - ret = fwnode_property_count_u32(dev_fwnode(dev), "i2c-alias-pool"); - if (ret < 0) { - dev_err(dev, "Failed to count 'i2c-alias-pool' property: %d\n", - ret); + if (!fwnode_property_present(dev_fwnode(dev), "i2c-alias-pool")) { + num_aliases = 0; + } else { + ret = fwnode_property_count_u32(dev_fwnode(dev), "i2c-alias-pool"); + if (ret < 0) { + dev_err(dev, "Failed to count 'i2c-alias-pool' property: %d\n", + ret); + return ret; + } + + num_aliases = ret; + } + + alias_pool = i2c_atr_alloc_alias_pool(num_aliases, true); + if (IS_ERR(alias_pool)) { + ret = PTR_ERR(alias_pool); + dev_err(dev, "Failed to allocate alias pool, err %d\n", ret); return ret; } - num_aliases = ret; + atr->alias_pool = alias_pool; - if (!num_aliases) + if (!alias_pool->size) return 0; aliases32 = kcalloc(num_aliases, sizeof(*aliases32), GFP_KERNEL); - if (!aliases32) - return -ENOMEM; + if (!aliases32) { + ret = -ENOMEM; + goto err_free_alias_pool; + } ret = fwnode_property_read_u32_array(dev_fwnode(dev), "i2c-alias-pool", aliases32, num_aliases); @@ -457,48 +687,33 @@ static int i2c_atr_parse_alias_pool(struct i2c_atr *atr) goto err_free_aliases32; } - aliases16 = kcalloc(num_aliases, sizeof(*aliases16), GFP_KERNEL); - if (!aliases16) { - ret = -ENOMEM; - goto err_free_aliases32; - } - for (i = 0; i < num_aliases; i++) { if (!(aliases32[i] & 0xffff0000)) { - aliases16[i] = aliases32[i]; + alias_pool->aliases[i] = aliases32[i]; continue; } dev_err(dev, "Failed to parse 'i2c-alias-pool' property: I2C flags are not supported\n"); ret = -EINVAL; - goto err_free_aliases16; - } - - alias_use_mask = bitmap_zalloc(num_aliases, GFP_KERNEL); - if (!alias_use_mask) { - ret = -ENOMEM; - goto err_free_aliases16; + goto err_free_aliases32; } kfree(aliases32); - atr->num_aliases = num_aliases; - atr->aliases = aliases16; - atr->alias_use_mask = alias_use_mask; - - dev_dbg(dev, "i2c-alias-pool has %zu aliases", atr->num_aliases); + dev_dbg(dev, "i2c-alias-pool has %zu aliases\n", alias_pool->size); return 0; -err_free_aliases16: - kfree(aliases16); err_free_aliases32: kfree(aliases32); +err_free_alias_pool: + i2c_atr_free_alias_pool(alias_pool); return ret; } struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev, - const struct i2c_atr_ops *ops, int max_adapters) + const struct i2c_atr_ops *ops, int max_adapters, + u32 flags) { struct i2c_atr *atr; int ret; @@ -506,20 +721,21 @@ struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev, if (max_adapters > ATR_MAX_ADAPTERS) return ERR_PTR(-EINVAL); - if (!ops || !ops->attach_client || !ops->detach_client) + if (!ops || !ops->attach_addr || !ops->detach_addr) return ERR_PTR(-EINVAL); atr = kzalloc(struct_size(atr, adapter, max_adapters), GFP_KERNEL); if (!atr) return ERR_PTR(-ENOMEM); - mutex_init(&atr->lock); - spin_lock_init(&atr->alias_mask_lock); + lockdep_register_key(&atr->lock_key); + mutex_init_with_key(&atr->lock, &atr->lock_key); atr->parent = parent; atr->dev = dev; atr->ops = ops; atr->max_adapters = max_adapters; + atr->flags = flags; if (parent->algo->master_xfer) atr->algo.master_xfer = i2c_atr_master_xfer; @@ -534,15 +750,15 @@ struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev, atr->i2c_nb.notifier_call = i2c_atr_bus_notifier_call; ret = bus_register_notifier(&i2c_bus_type, &atr->i2c_nb); if (ret) - goto err_free_aliases; + goto err_free_alias_pool; return atr; -err_free_aliases: - bitmap_free(atr->alias_use_mask); - kfree(atr->aliases); +err_free_alias_pool: + i2c_atr_free_alias_pool(atr->alias_pool); err_destroy_mutex: mutex_destroy(&atr->lock); + lockdep_unregister_key(&atr->lock_key); kfree(atr); return ERR_PTR(ret); @@ -557,22 +773,22 @@ void i2c_atr_delete(struct i2c_atr *atr) WARN_ON(atr->adapter[i]); bus_unregister_notifier(&i2c_bus_type, &atr->i2c_nb); - bitmap_free(atr->alias_use_mask); - kfree(atr->aliases); + i2c_atr_free_alias_pool(atr->alias_pool); mutex_destroy(&atr->lock); + lockdep_unregister_key(&atr->lock_key); kfree(atr); } EXPORT_SYMBOL_NS_GPL(i2c_atr_delete, "I2C_ATR"); -int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id, - struct device *adapter_parent, - struct fwnode_handle *bus_handle) +int i2c_atr_add_adapter(struct i2c_atr *atr, struct i2c_atr_adap_desc *desc) { + struct fwnode_handle *bus_handle = desc->bus_handle; struct i2c_adapter *parent = atr->parent; + char symlink_name[ATR_MAX_SYMLINK_LEN]; struct device *dev = atr->dev; + u32 chan_id = desc->chan_id; struct i2c_atr_chan *chan; - char symlink_name[ATR_MAX_SYMLINK_LEN]; - int ret; + int ret, idx; if (chan_id >= atr->max_adapters) { dev_err(dev, "No room for more i2c-atr adapters\n"); @@ -588,20 +804,23 @@ int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id, if (!chan) return -ENOMEM; - if (!adapter_parent) - adapter_parent = dev; + if (!desc->parent) + desc->parent = dev; chan->atr = atr; chan->chan_id = chan_id; - INIT_LIST_HEAD(&chan->alias_list); - mutex_init(&chan->orig_addrs_lock); + INIT_LIST_HEAD(&chan->alias_pairs); + lockdep_register_key(&chan->alias_pairs_lock_key); + lockdep_register_key(&chan->orig_addrs_lock_key); + mutex_init_with_key(&chan->alias_pairs_lock, &chan->alias_pairs_lock_key); + mutex_init_with_key(&chan->orig_addrs_lock, &chan->orig_addrs_lock_key); snprintf(chan->adap.name, sizeof(chan->adap.name), "i2c-%d-atr-%d", i2c_adapter_id(parent), chan_id); chan->adap.owner = THIS_MODULE; chan->adap.algo = &atr->algo; chan->adap.algo_data = chan; - chan->adap.dev.parent = adapter_parent; + chan->adap.dev.parent = desc->parent; chan->adap.retries = parent->retries; chan->adap.timeout = parent->timeout; chan->adap.quirks = parent->quirks; @@ -628,13 +847,26 @@ int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id, fwnode_handle_put(atr_node); } + if (desc->num_aliases > 0) { + chan->alias_pool = i2c_atr_alloc_alias_pool(desc->num_aliases, false); + if (IS_ERR(chan->alias_pool)) { + ret = PTR_ERR(chan->alias_pool); + goto err_fwnode_put; + } + + for (idx = 0; idx < desc->num_aliases; idx++) + chan->alias_pool->aliases[idx] = desc->aliases[idx]; + } else { + chan->alias_pool = atr->alias_pool; + } + atr->adapter[chan_id] = &chan->adap; ret = i2c_add_adapter(&chan->adap); if (ret) { dev_err(dev, "failed to add atr-adapter %u (error=%d)\n", chan_id, ret); - goto err_fwnode_put; + goto err_free_alias_pool; } snprintf(symlink_name, sizeof(symlink_name), "channel-%u", @@ -651,9 +883,15 @@ int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id, return 0; +err_free_alias_pool: + if (!chan->alias_pool->shared) + i2c_atr_free_alias_pool(chan->alias_pool); err_fwnode_put: fwnode_handle_put(dev_fwnode(&chan->adap.dev)); mutex_destroy(&chan->orig_addrs_lock); + mutex_destroy(&chan->alias_pairs_lock); + lockdep_unregister_key(&chan->orig_addrs_lock_key); + lockdep_unregister_key(&chan->alias_pairs_lock_key); kfree(chan); return ret; } @@ -683,10 +921,16 @@ void i2c_atr_del_adapter(struct i2c_atr *atr, u32 chan_id) i2c_del_adapter(adap); + if (!chan->alias_pool->shared) + i2c_atr_free_alias_pool(chan->alias_pool); + atr->adapter[chan_id] = NULL; fwnode_handle_put(fwnode); mutex_destroy(&chan->orig_addrs_lock); + mutex_destroy(&chan->alias_pairs_lock); + lockdep_unregister_key(&chan->orig_addrs_lock_key); + lockdep_unregister_key(&chan->alias_pairs_lock_key); kfree(chan->orig_addrs); kfree(chan); } diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 7ad1ad5c8c3f..2ad2b1838f0f 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -26,14 +26,13 @@ #include <linux/idr.h> #include <linux/init.h> #include <linux/interrupt.h> -#include <linux/irqflags.h> +#include <linux/irq.h> #include <linux/jump_label.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of_device.h> #include <linux/of.h> -#include <linux/of_irq.h> #include <linux/pinctrl/consumer.h> #include <linux/pinctrl/devinfo.h> #include <linux/pm_domain.h> @@ -42,6 +41,7 @@ #include <linux/property.h> #include <linux/rwsem.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include "i2c-core.h" @@ -490,6 +490,7 @@ static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client) static int i2c_device_probe(struct device *dev) { + struct fwnode_handle *fwnode = dev_fwnode(dev); struct i2c_client *client = i2c_verify_client(dev); struct i2c_driver *driver; bool do_power_on; @@ -508,11 +509,11 @@ static int i2c_device_probe(struct device *dev) /* Keep adapter active when Host Notify is required */ pm_runtime_get_sync(&client->adapter->dev); irq = i2c_smbus_host_notify_to_irq(client); - } else if (dev->of_node) { - irq = of_irq_get_byname(dev->of_node, "irq"); + } else if (is_of_node(fwnode)) { + irq = fwnode_irq_get_byname(fwnode, "irq"); if (irq == -EINVAL || irq == -ENODATA) - irq = of_irq_get(dev->of_node, 0); - } else if (ACPI_COMPANION(dev)) { + irq = fwnode_irq_get(fwnode, 0); + } else if (is_acpi_device_node(fwnode)) { bool wake_capable; irq = i2c_acpi_get_irq(client, &wake_capable); @@ -520,7 +521,7 @@ static int i2c_device_probe(struct device *dev) client->flags |= I2C_CLIENT_WAKE; } if (irq == -EPROBE_DEFER) { - status = irq; + status = dev_err_probe(dev, irq, "can't get irq\n"); goto put_sync_adapter; } @@ -546,9 +547,9 @@ static int i2c_device_probe(struct device *dev) if (client->flags & I2C_CLIENT_WAKE) { int wakeirq; - wakeirq = of_irq_get_byname(dev->of_node, "wakeup"); + wakeirq = fwnode_irq_get_byname(fwnode, "wakeup"); if (wakeirq == -EPROBE_DEFER) { - status = wakeirq; + status = dev_err_probe(dev, wakeirq, "can't get wakeirq\n"); goto put_sync_adapter; } @@ -567,7 +568,7 @@ static int i2c_device_probe(struct device *dev) dev_dbg(dev, "probe\n"); - status = of_clk_set_defaults(dev->of_node, false); + status = of_clk_set_defaults(to_of_node(fwnode), false); if (status < 0) goto err_clear_wakeup_irq; @@ -961,6 +962,7 @@ static void i2c_unlock_addr(struct i2c_adapter *adap, unsigned short addr, struct i2c_client * i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info) { + struct fwnode_handle *fwnode = info->fwnode; struct i2c_client *client; bool need_put = false; int status; @@ -1001,18 +1003,18 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf client->dev.parent = &client->adapter->dev; client->dev.bus = &i2c_bus_type; client->dev.type = &i2c_client_type; - client->dev.of_node = of_node_get(info->of_node); - client->dev.fwnode = info->fwnode; device_enable_async_suspend(&client->dev); + device_set_node(&client->dev, fwnode_handle_get(fwnode)); + if (info->swnode) { status = device_add_software_node(&client->dev, info->swnode); if (status) { dev_err(&adap->dev, "Failed to add software node to client %s: %d\n", client->name, status); - goto out_err_put_of_node; + goto out_err_put_fwnode; } } @@ -1031,8 +1033,8 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf out_remove_swnode: device_remove_software_node(&client->dev); need_put = true; -out_err_put_of_node: - of_node_put(info->of_node); +out_err_put_fwnode: + fwnode_handle_put(fwnode); out_err: dev_err(&adap->dev, "Failed to register i2c client %s at 0x%02x (%d)\n", @@ -1054,16 +1056,17 @@ EXPORT_SYMBOL_GPL(i2c_new_client_device); */ void i2c_unregister_device(struct i2c_client *client) { + struct fwnode_handle *fwnode; + if (IS_ERR_OR_NULL(client)) return; - if (client->dev.of_node) { - of_node_clear_flag(client->dev.of_node, OF_POPULATED); - of_node_put(client->dev.of_node); - } - - if (ACPI_COMPANION(&client->dev)) - acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev)); + fwnode = dev_fwnode(&client->dev); + if (is_of_node(fwnode)) + of_node_clear_flag(to_of_node(fwnode), OF_POPULATED); + else if (is_acpi_device_node(fwnode)) + acpi_device_clear_enumerated(to_acpi_device_node(fwnode)); + fwnode_handle_put(fwnode); device_remove_software_node(&client->dev); device_unregister(&client->dev); @@ -1209,11 +1212,9 @@ struct i2c_client *i2c_new_ancillary_device(struct i2c_client *client, u32 addr = default_addr; int i; - if (np) { - i = of_property_match_string(np, "reg-names", name); - if (i >= 0) - of_property_read_u32_index(np, "reg", i, &addr); - } + i = of_property_match_string(np, "reg-names", name); + if (i >= 0) + of_property_read_u32_index(np, "reg", i, &addr); dev_dbg(&client->adapter->dev, "Address for %s : 0x%x\n", name, addr); return i2c_new_dummy_device(client->adapter, addr); @@ -1651,12 +1652,10 @@ int i2c_add_adapter(struct i2c_adapter *adapter) struct device *dev = &adapter->dev; int id; - if (dev->of_node) { - id = of_alias_get_id(dev->of_node, "i2c"); - if (id >= 0) { - adapter->nr = id; - return __i2c_add_numbered_adapter(adapter); - } + id = of_alias_get_id(dev->of_node, "i2c"); + if (id >= 0) { + adapter->nr = id; + return __i2c_add_numbered_adapter(adapter); } mutex_lock(&core_lock); @@ -2146,7 +2145,7 @@ static int i2c_quirk_error(struct i2c_adapter *adap, struct i2c_msg *msg, char * { dev_err_ratelimited(&adap->dev, "adapter quirk: %s (addr 0x%04x, size %u, %s)\n", err_msg, msg->addr, msg->len, - msg->flags & I2C_M_RD ? "read" : "write"); + str_read_write(msg->flags & I2C_M_RD)); return -EOPNOTSUPP; } diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c index 02feee6c9ba9..eb7fb202355f 100644 --- a/drivers/i2c/i2c-core-of.c +++ b/drivers/i2c/i2c-core-of.c @@ -49,7 +49,6 @@ int of_i2c_get_board_info(struct device *dev, struct device_node *node, } info->addr = addr; - info->of_node = node; info->fwnode = of_fwnode_handle(node); if (of_property_read_bool(node, "host-notify")) diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c index faefe1dfa8e5..7ee6b992b835 100644 --- a/drivers/i2c/i2c-core-slave.c +++ b/drivers/i2c/i2c-core-slave.c @@ -11,6 +11,7 @@ #include <linux/err.h> #include <linux/i2c.h> #include <linux/of.h> +#include <linux/property.h> #include "i2c-core.h" @@ -108,15 +109,18 @@ EXPORT_SYMBOL_GPL(i2c_slave_event); */ bool i2c_detect_slave_mode(struct device *dev) { - if (IS_BUILTIN(CONFIG_OF) && dev->of_node) { + struct fwnode_handle *fwnode = dev_fwnode(dev); + + if (is_of_node(fwnode)) { + struct fwnode_handle *child __free(fwnode_handle) = NULL; u32 reg; - for_each_child_of_node_scoped(dev->of_node, child) { - of_property_read_u32(child, "reg", ®); + fwnode_for_each_child_node(fwnode, child) { + fwnode_property_read_u32(child, "reg", ®); if (reg & I2C_OWN_SLAVE_ADDRESS) return true; } - } else if (IS_BUILTIN(CONFIG_ACPI) && ACPI_HANDLE(dev)) { + } else if (is_acpi_device_node(fwnode)) { dev_dbg(dev, "ACPI slave is not supported yet\n"); } return false; diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c index e73afbefe222..71eb1ef56f0c 100644 --- a/drivers/i2c/i2c-core-smbus.c +++ b/drivers/i2c/i2c-core-smbus.c @@ -16,6 +16,7 @@ #include <linux/i2c-smbus.h> #include <linux/property.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include "i2c-core.h" @@ -433,7 +434,7 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr, case I2C_SMBUS_I2C_BLOCK_DATA: if (data->block[0] > I2C_SMBUS_BLOCK_MAX) { dev_err(&adapter->dev, "Invalid block %s size %d\n", - read_write == I2C_SMBUS_READ ? "read" : "write", + str_read_write(read_write == I2C_SMBUS_READ), data->block[0]); return -EINVAL; } diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c index 7d40e7aa3799..0316b347f9e7 100644 --- a/drivers/i2c/i2c-smbus.c +++ b/drivers/i2c/i2c-smbus.c @@ -372,12 +372,13 @@ EXPORT_SYMBOL_GPL(i2c_free_slave_host_notify_device); * - Only works on systems with 1 to 8 memory slots */ #if IS_ENABLED(CONFIG_DMI) -void i2c_register_spd(struct i2c_adapter *adap) +static void i2c_register_spd(struct i2c_adapter *adap, bool write_disabled) { int n, slot_count = 0, dimm_count = 0; u16 handle; u8 common_mem_type = 0x0, mem_type; u64 mem_size; + bool instantiate = true; const char *name; while ((handle = dmi_memdev_handle(slot_count)) != 0xffff) { @@ -438,6 +439,7 @@ void i2c_register_spd(struct i2c_adapter *adap) case 0x22: /* DDR5 */ case 0x23: /* LPDDR5 */ name = "spd5118"; + instantiate = !write_disabled; break; default: dev_info(&adap->dev, @@ -461,6 +463,9 @@ void i2c_register_spd(struct i2c_adapter *adap) addr_list[0] = 0x50 + n; addr_list[1] = I2C_CLIENT_END; + if (!instantiate) + continue; + if (!IS_ERR(i2c_new_scanned_device(adap, &info, addr_list, NULL))) { dev_info(&adap->dev, "Successfully instantiated SPD at 0x%hx\n", @@ -469,7 +474,19 @@ void i2c_register_spd(struct i2c_adapter *adap) } } } -EXPORT_SYMBOL_GPL(i2c_register_spd); + +void i2c_register_spd_write_disable(struct i2c_adapter *adap) +{ + i2c_register_spd(adap, true); +} +EXPORT_SYMBOL_GPL(i2c_register_spd_write_disable); + +void i2c_register_spd_write_enable(struct i2c_adapter *adap) +{ + i2c_register_spd(adap, false); +} +EXPORT_SYMBOL_GPL(i2c_register_spd_write_enable); + #endif MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>"); diff --git a/drivers/i2c/muxes/i2c-mux-ltc4306.c b/drivers/i2c/muxes/i2c-mux-ltc4306.c index 8a87f19bf5d5..c688af270a11 100644 --- a/drivers/i2c/muxes/i2c-mux-ltc4306.c +++ b/drivers/i2c/muxes/i2c-mux-ltc4306.c @@ -85,13 +85,13 @@ static int ltc4306_gpio_get(struct gpio_chip *chip, unsigned int offset) return !!(val & BIT(1 - offset)); } -static void ltc4306_gpio_set(struct gpio_chip *chip, unsigned int offset, - int value) +static int ltc4306_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct ltc4306 *data = gpiochip_get_data(chip); - regmap_update_bits(data->regmap, LTC_REG_CONFIG, BIT(5 - offset), - value ? BIT(5 - offset) : 0); + return regmap_update_bits(data->regmap, LTC_REG_CONFIG, + BIT(5 - offset), value ? BIT(5 - offset) : 0); } static int ltc4306_gpio_get_direction(struct gpio_chip *chip, @@ -164,7 +164,7 @@ static int ltc4306_gpio_init(struct ltc4306 *data) data->gpiochip.direction_input = ltc4306_gpio_direction_input; data->gpiochip.direction_output = ltc4306_gpio_direction_output; data->gpiochip.get = ltc4306_gpio_get; - data->gpiochip.set = ltc4306_gpio_set; + data->gpiochip.set_rv = ltc4306_gpio_set; data->gpiochip.set_config = ltc4306_gpio_set_config; data->gpiochip.owner = THIS_MODULE; diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig index 77da199c7413..7b30db3253af 100644 --- a/drivers/i3c/master/Kconfig +++ b/drivers/i3c/master/Kconfig @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only config CDNS_I3C_MASTER tristate "Cadence I3C master driver" - depends on I3C depends on HAS_IOMEM depends on !(ALPHA || PARISC) help @@ -9,7 +8,6 @@ config CDNS_I3C_MASTER config DW_I3C_MASTER tristate "Synospsys DesignWare I3C master driver" - depends on I3C depends on HAS_IOMEM depends on !(ALPHA || PARISC) # ALPHA and PARISC needs {read,write}sl() @@ -38,7 +36,6 @@ config AST2600_I3C_MASTER config SVC_I3C_MASTER tristate "Silvaco I3C Dual-Role Master driver" - depends on I3C depends on HAS_IOMEM depends on !(ALPHA || PARISC) help @@ -46,7 +43,6 @@ config SVC_I3C_MASTER config MIPI_I3C_HCI tristate "MIPI I3C Host Controller Interface driver (EXPERIMENTAL)" - depends on I3C depends on HAS_IOMEM help Support for hardware following the MIPI Aliance's I3C Host Controller diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c index a71226d7ca59..bc4538694540 100644 --- a/drivers/i3c/master/mipi-i3c-hci/core.c +++ b/drivers/i3c/master/mipi-i3c-hci/core.c @@ -78,7 +78,7 @@ #define INTR_SIGNAL_ENABLE 0x28 #define INTR_FORCE 0x2c #define INTR_HC_CMD_SEQ_UFLOW_STAT BIT(12) /* Cmd Sequence Underflow */ -#define INTR_HC_RESET_CANCEL BIT(11) /* HC Cancelled Reset */ +#define INTR_HC_SEQ_CANCEL BIT(11) /* HC Cancelled Transaction Sequence */ #define INTR_HC_INTERNAL_ERR BIT(10) /* HC Internal Error */ #define DAT_SECTION 0x30 /* Device Address Table */ @@ -590,26 +590,27 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id) u32 val; val = reg_read(INTR_STATUS); + reg_write(INTR_STATUS, val); DBG("INTR_STATUS = %#x", val); - if (val) { - reg_write(INTR_STATUS, val); - } + if (val) + result = IRQ_HANDLED; - if (val & INTR_HC_RESET_CANCEL) { - DBG("cancelled reset"); - val &= ~INTR_HC_RESET_CANCEL; + if (val & INTR_HC_SEQ_CANCEL) { + dev_dbg(&hci->master.dev, + "Host Controller Cancelled Transaction Sequence\n"); + val &= ~INTR_HC_SEQ_CANCEL; } if (val & INTR_HC_INTERNAL_ERR) { dev_err(&hci->master.dev, "Host Controller Internal Error\n"); val &= ~INTR_HC_INTERNAL_ERR; } - hci->io->irq_handler(hci); - if (val) - dev_err(&hci->master.dev, "unexpected INTR_STATUS %#x\n", val); - else + dev_warn_once(&hci->master.dev, + "unexpected INTR_STATUS %#x\n", val); + + if (hci->io->irq_handler(hci)) result = IRQ_HANDLED; return result; @@ -699,9 +700,14 @@ static int i3c_hci_init(struct i3c_hci *hci) if (ret) return -ENXIO; - /* Disable all interrupts and allow all signal updates */ + /* Disable all interrupts */ reg_write(INTR_SIGNAL_ENABLE, 0x0); - reg_write(INTR_STATUS_ENABLE, 0xffffffff); + /* + * Only allow bit 31:10 signal updates because + * Bit 0:9 are reserved in IP version >= 0.8 + * Bit 0:5 are defined in IP version < 0.8 but not handled by PIO code + */ + reg_write(INTR_STATUS_ENABLE, GENMASK(31, 10)); /* Make sure our data ordering fits the host's */ regval = reg_read(HC_CONTROL); diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c index 85e16de208d3..7e1a7cb94b43 100644 --- a/drivers/i3c/master/svc-i3c-master.c +++ b/drivers/i3c/master/svc-i3c-master.c @@ -201,11 +201,10 @@ struct svc_i3c_drvdata { * @addrs: Array containing the dynamic addresses of each attached device * @descs: Array of descriptors, one per attached device * @hj_work: Hot-join work - * @ibi_work: IBI work * @irq: Main interrupt - * @pclk: System clock + * @num_clks: I3C clock number * @fclk: Fast clock (bus) - * @sclk: Slow clock (other events) + * @clks: I3C clock array * @xferqueue: Transfer queue structure * @xferqueue.list: List member * @xferqueue.cur: Current ongoing transfer @@ -229,11 +228,10 @@ struct svc_i3c_master { u8 addrs[SVC_I3C_MAX_DEVS]; struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS]; struct work_struct hj_work; - struct work_struct ibi_work; int irq; - struct clk *pclk; + int num_clks; struct clk *fclk; - struct clk *sclk; + struct clk_bulk_data *clks; struct { struct list_head list; struct svc_i3c_xfer *cur; @@ -487,9 +485,8 @@ static int svc_i3c_master_handle_ibi_won(struct svc_i3c_master *master, u32 msta return ret; } -static void svc_i3c_master_ibi_work(struct work_struct *work) +static void svc_i3c_master_ibi_isr(struct svc_i3c_master *master) { - struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work); struct svc_i3c_i2c_dev_data *data; unsigned int ibitype, ibiaddr; struct i3c_dev_desc *dev; @@ -504,7 +501,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) * schedule during the whole I3C transaction, otherwise, the I3C bus timeout may happen if * any irq or schedule happen during transaction. */ - guard(spinlock_irqsave)(&master->xferqueue.lock); + guard(spinlock)(&master->xferqueue.lock); /* * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing @@ -530,7 +527,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) if (ret) { dev_err(master->dev, "Timeout when polling for IBIWON\n"); svc_i3c_master_emit_stop(master); - goto reenable_ibis; + return; } status = readl(master->regs + SVC_I3C_MSTATUS); @@ -574,17 +571,17 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) svc_i3c_master_emit_stop(master); - goto reenable_ibis; + return; } /* Handle the non critical tasks */ switch (ibitype) { case SVC_I3C_MSTATUS_IBITYPE_IBI: + svc_i3c_master_emit_stop(master); if (dev) { i3c_master_queue_ibi(dev, master->ibi.tbq_slot); master->ibi.tbq_slot = NULL; } - svc_i3c_master_emit_stop(master); break; case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN: svc_i3c_master_emit_stop(master); @@ -597,9 +594,6 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) default: break; } - -reenable_ibis: - svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART); } static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id) @@ -618,10 +612,12 @@ static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id) !SVC_I3C_MSTATUS_STATE_SLVREQ(active)) return IRQ_HANDLED; - svc_i3c_master_disable_interrupts(master); - - /* Handle the interrupt in a non atomic context */ - queue_work(master->base.wq, &master->ibi_work); + /* + * The SDA line remains low until the request is processed. + * Receive the request in the interrupt context to respond promptly + * and restore the bus to idle state. + */ + svc_i3c_master_ibi_isr(master); return IRQ_HANDLED; } @@ -1281,9 +1277,9 @@ static int svc_i3c_master_write(struct svc_i3c_master *master, static int svc_i3c_master_xfer(struct svc_i3c_master *master, bool rnw, unsigned int xfer_type, u8 addr, u8 *in, const u8 *out, unsigned int xfer_len, - unsigned int *actual_len, bool continued) + unsigned int *actual_len, bool continued, bool repeat_start) { - int retry = 2; + int retry = repeat_start ? 1 : 2; u32 reg; int ret; @@ -1468,7 +1464,7 @@ static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master) ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type, cmd->addr, cmd->in, cmd->out, cmd->len, &cmd->actual_len, - cmd->continued); + cmd->continued, i > 0); /* cmd->xfer is NULL if I2C or CCC transfer */ if (cmd->xfer) cmd->xfer->actual_len = cmd->actual_len; @@ -1875,42 +1871,11 @@ static const struct i3c_master_controller_ops svc_i3c_master_ops = { .set_speed = svc_i3c_master_set_speed, }; -static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master) -{ - int ret = 0; - - ret = clk_prepare_enable(master->pclk); - if (ret) - return ret; - - ret = clk_prepare_enable(master->fclk); - if (ret) { - clk_disable_unprepare(master->pclk); - return ret; - } - - ret = clk_prepare_enable(master->sclk); - if (ret) { - clk_disable_unprepare(master->pclk); - clk_disable_unprepare(master->fclk); - return ret; - } - - return 0; -} - -static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master) -{ - clk_disable_unprepare(master->pclk); - clk_disable_unprepare(master->fclk); - clk_disable_unprepare(master->sclk); -} - static int svc_i3c_master_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct svc_i3c_master *master; - int ret; + int ret, i; master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL); if (!master) @@ -1924,30 +1889,33 @@ static int svc_i3c_master_probe(struct platform_device *pdev) if (IS_ERR(master->regs)) return PTR_ERR(master->regs); - master->pclk = devm_clk_get(dev, "pclk"); - if (IS_ERR(master->pclk)) - return PTR_ERR(master->pclk); + master->num_clks = devm_clk_bulk_get_all(dev, &master->clks); + if (master->num_clks < 0) + return dev_err_probe(dev, -EINVAL, "can't get I3C clocks\n"); - master->fclk = devm_clk_get(dev, "fast_clk"); + for (i = 0; i < master->num_clks; i++) { + if (!strcmp(master->clks[i].id, "fast_clk")) + break; + } + + if (i == master->num_clks) + return dev_err_probe(dev, -EINVAL, + "can't get I3C peripheral clock\n"); + + master->fclk = master->clks[i].clk; if (IS_ERR(master->fclk)) return PTR_ERR(master->fclk); - master->sclk = devm_clk_get(dev, "slow_clk"); - if (IS_ERR(master->sclk)) - return PTR_ERR(master->sclk); - master->irq = platform_get_irq(pdev, 0); if (master->irq < 0) return master->irq; master->dev = dev; - - ret = svc_i3c_master_prepare_clks(master); + ret = clk_bulk_prepare_enable(master->num_clks, master->clks); if (ret) - return ret; + return dev_err_probe(dev, ret, "can't enable I3C clocks\n"); INIT_WORK(&master->hj_work, svc_i3c_master_hj_work); - INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work); mutex_init(&master->lock); ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler, @@ -1998,7 +1966,7 @@ rpm_disable: pm_runtime_set_suspended(&pdev->dev); err_disable_clks: - svc_i3c_master_unprepare_clks(master); + clk_bulk_disable_unprepare(master->num_clks, master->clks); return ret; } @@ -2036,7 +2004,7 @@ static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev) struct svc_i3c_master *master = dev_get_drvdata(dev); svc_i3c_save_regs(master); - svc_i3c_master_unprepare_clks(master); + clk_bulk_disable_unprepare(master->num_clks, master->clks); pinctrl_pm_select_sleep_state(dev); return 0; @@ -2045,9 +2013,12 @@ static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev) static int __maybe_unused svc_i3c_runtime_resume(struct device *dev) { struct svc_i3c_master *master = dev_get_drvdata(dev); + int ret; pinctrl_pm_select_default_state(dev); - svc_i3c_master_prepare_clks(master); + ret = clk_bulk_prepare_enable(master->num_clks, master->clks); + if (ret) + return ret; svc_i3c_restore_regs(master); diff --git a/drivers/iio/adc/qcom-spmi-rradc.c b/drivers/iio/adc/qcom-spmi-rradc.c index 63ebaf13ef19..f61ad0510f04 100644 --- a/drivers/iio/adc/qcom-spmi-rradc.c +++ b/drivers/iio/adc/qcom-spmi-rradc.c @@ -2,7 +2,7 @@ /* * Copyright (c) 2016-2017, 2019, The Linux Foundation. All rights reserved. * Copyright (c) 2022 Linaro Limited. - * Author: Caleb Connolly <caleb.connolly@linaro.org> + * Author: Casey Connolly <casey.connolly@linaro.org> * * This driver is for the Round Robin ADC found in the pmi8998 and pm660 PMICs. */ @@ -1016,5 +1016,5 @@ static struct platform_driver rradc_driver = { module_platform_driver(rradc_driver); MODULE_DESCRIPTION("QCOM SPMI PMIC RR ADC driver"); -MODULE_AUTHOR("Caleb Connolly <caleb.connolly@linaro.org>"); +MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>"); MODULE_LICENSE("GPL"); diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 142170473e75..8670e58675c6 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -36,6 +36,7 @@ MODULE_LICENSE("Dual BSD/GPL"); #define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */ #define CM_DIRECT_RETRY_CTX ((void *) 1UL) +#define CM_MRA_SETTING 24 /* 4.096us * 2^24 = ~68.7 seconds */ static const char * const ibcm_rej_reason_strs[] = { [IB_CM_REJ_NO_QP] = "no QP", @@ -167,7 +168,7 @@ struct cm_port { struct cm_device { struct kref kref; struct list_head list; - spinlock_t mad_agent_lock; + rwlock_t mad_agent_lock; struct ib_device *ib_device; u8 ack_delay; int going_down; @@ -241,7 +242,6 @@ struct cm_id_private { u8 initiator_depth; u8 retry_count; u8 rnr_retry_count; - u8 service_timeout; u8 target_ack_delay; struct list_head work_list; @@ -285,7 +285,7 @@ static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv) if (!cm_id_priv->av.port) return ERR_PTR(-EINVAL); - spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); + read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); mad_agent = cm_id_priv->av.port->mad_agent; if (!mad_agent) { m = ERR_PTR(-EINVAL); @@ -311,7 +311,7 @@ static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv) m->ah = ah; out: - spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); + read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); return m; } @@ -1297,10 +1297,10 @@ static __be64 cm_form_tid(struct cm_id_private *cm_id_priv) if (!cm_id_priv->av.port) return cpu_to_be64(low_tid); - spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); + read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); if (cm_id_priv->av.port->mad_agent) hi_tid = ((u64)cm_id_priv->av.port->mad_agent->hi_tid) << 32; - spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); + read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); return cpu_to_be64(hi_tid | low_tid); } @@ -1872,7 +1872,7 @@ static void cm_process_work(struct cm_id_private *cm_id_priv, static void cm_format_mra(struct cm_mra_msg *mra_msg, struct cm_id_private *cm_id_priv, - enum cm_msg_response msg_mraed, u8 service_timeout, + enum cm_msg_response msg_mraed, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid); @@ -1881,7 +1881,7 @@ static void cm_format_mra(struct cm_mra_msg *mra_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg, be32_to_cpu(cm_id_priv->id.remote_id)); - IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout); + IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, CM_MRA_SETTING); if (private_data && private_data_len) IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data, @@ -1960,7 +1960,7 @@ static void cm_dup_req_handler(struct cm_work *work, switch (cm_id_priv->id.state) { case IB_CM_MRA_REQ_SENT: cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, - CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout, + CM_MSG_RESPONSE_REQ, cm_id_priv->private_data, cm_id_priv->private_data_len); break; @@ -2454,7 +2454,7 @@ static void cm_dup_rep_handler(struct cm_work *work) cm_id_priv->private_data_len); else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT) cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, - CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout, + CM_MSG_RESPONSE_REP, cm_id_priv->private_data, cm_id_priv->private_data_len); else @@ -3094,26 +3094,13 @@ out: return -EINVAL; } -int ib_send_cm_mra(struct ib_cm_id *cm_id, - u8 service_timeout, - const void *private_data, - u8 private_data_len) +int ib_prepare_cm_mra(struct ib_cm_id *cm_id) { struct cm_id_private *cm_id_priv; - struct ib_mad_send_buf *msg; enum ib_cm_state cm_state; enum ib_cm_lap_state lap_state; - enum cm_msg_response msg_response; - void *data; unsigned long flags; - int ret; - - if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE) - return -EINVAL; - - data = cm_copy_private_data(private_data, private_data_len); - if (IS_ERR(data)) - return PTR_ERR(data); + int ret = 0; cm_id_priv = container_of(cm_id, struct cm_id_private, id); @@ -3122,58 +3109,33 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id, case IB_CM_REQ_RCVD: cm_state = IB_CM_MRA_REQ_SENT; lap_state = cm_id->lap_state; - msg_response = CM_MSG_RESPONSE_REQ; break; case IB_CM_REP_RCVD: cm_state = IB_CM_MRA_REP_SENT; lap_state = cm_id->lap_state; - msg_response = CM_MSG_RESPONSE_REP; break; case IB_CM_ESTABLISHED: if (cm_id->lap_state == IB_CM_LAP_RCVD) { cm_state = cm_id->state; lap_state = IB_CM_MRA_LAP_SENT; - msg_response = CM_MSG_RESPONSE_OTHER; break; } fallthrough; default: - trace_icm_send_mra_unknown_err(&cm_id_priv->id); + trace_icm_prepare_mra_unknown_err(&cm_id_priv->id); ret = -EINVAL; goto error_unlock; } - if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) { - msg = cm_alloc_msg(cm_id_priv); - if (IS_ERR(msg)) { - ret = PTR_ERR(msg); - goto error_unlock; - } - - cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, - msg_response, service_timeout, - private_data, private_data_len); - trace_icm_send_mra(cm_id); - ret = ib_post_send_mad(msg, NULL); - if (ret) - goto error_free_msg; - } - cm_id->state = cm_state; cm_id->lap_state = lap_state; - cm_id_priv->service_timeout = service_timeout; - cm_set_private_data(cm_id_priv, data, private_data_len); - spin_unlock_irqrestore(&cm_id_priv->lock, flags); - return 0; + cm_set_private_data(cm_id_priv, NULL, 0); -error_free_msg: - cm_free_msg(msg); error_unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); - kfree(data); return ret; } -EXPORT_SYMBOL(ib_send_cm_mra); +EXPORT_SYMBOL(ib_prepare_cm_mra); static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) { @@ -3377,7 +3339,6 @@ static int cm_lap_handler(struct cm_work *work) cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, CM_MSG_RESPONSE_OTHER, - cm_id_priv->service_timeout, cm_id_priv->private_data, cm_id_priv->private_data_len); spin_unlock_irq(&cm_id_priv->lock); @@ -3786,7 +3747,8 @@ static void cm_process_send_error(struct cm_id_private *cm_id_priv, spin_lock_irq(&cm_id_priv->lock); if (msg != cm_id_priv->msg) { spin_unlock_irq(&cm_id_priv->lock); - cm_free_priv_msg(msg); + cm_free_msg(msg); + cm_deref_id(cm_id_priv); return; } cm_free_priv_msg(msg); @@ -4378,7 +4340,7 @@ static int cm_add_one(struct ib_device *ib_device) return -ENOMEM; kref_init(&cm_dev->kref); - spin_lock_init(&cm_dev->mad_agent_lock); + rwlock_init(&cm_dev->mad_agent_lock); cm_dev->ib_device = ib_device; cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay; cm_dev->going_down = 0; @@ -4494,9 +4456,9 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data) * The above ensures no call paths from the work are running, * the remaining paths all take the mad_agent_lock. */ - spin_lock(&cm_dev->mad_agent_lock); + write_lock(&cm_dev->mad_agent_lock); port->mad_agent = NULL; - spin_unlock(&cm_dev->mad_agent_lock); + write_unlock(&cm_dev->mad_agent_lock); ib_unregister_mad_agent(mad_agent); ib_port_unregister_client_groups(ib_device, i, cm_counter_groups); diff --git a/drivers/infiniband/core/cm_trace.h b/drivers/infiniband/core/cm_trace.h index 944d9071245d..4a4987da69d4 100644 --- a/drivers/infiniband/core/cm_trace.h +++ b/drivers/infiniband/core/cm_trace.h @@ -229,7 +229,7 @@ DEFINE_CM_ERR_EVENT(send_drep); DEFINE_CM_ERR_EVENT(dreq_unknown); DEFINE_CM_ERR_EVENT(send_unknown_rej); DEFINE_CM_ERR_EVENT(rej_unknown); -DEFINE_CM_ERR_EVENT(send_mra_unknown); +DEFINE_CM_ERR_EVENT(prepare_mra_unknown); DEFINE_CM_ERR_EVENT(mra_unknown); DEFINE_CM_ERR_EVENT(qp_init); DEFINE_CM_ERR_EVENT(qp_rtr); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index ab31eefa916b..9b471548e7ae 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -46,7 +46,6 @@ MODULE_LICENSE("Dual BSD/GPL"); #define CMA_CM_RESPONSE_TIMEOUT 20 #define CMA_MAX_CM_RETRIES 15 -#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) #define CMA_IBOE_PACKET_LIFETIME 16 #define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP @@ -146,19 +145,6 @@ struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id) } EXPORT_SYMBOL(rdma_iw_cm_id); -/** - * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack. - * @res: rdma resource tracking entry pointer - */ -struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res) -{ - struct rdma_id_private *id_priv = - container_of(res, struct rdma_id_private, res); - - return &id_priv->id; -} -EXPORT_SYMBOL(rdma_res_to_id); - static int cma_add_one(struct ib_device *device); static void cma_remove_one(struct ib_device *device, void *client_data); @@ -2214,8 +2200,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, case IB_CM_REP_RECEIVED: if (state == RDMA_CM_CONNECT && (id_priv->id.qp_type != IB_QPT_UD)) { - trace_cm_send_mra(id_priv); - ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); + trace_cm_prepare_mra(id_priv); + ib_prepare_cm_mra(cm_id); } if (id_priv->id.qp) { event.status = cma_rep_recv(id_priv); @@ -2476,8 +2462,8 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id, if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT && conn_id->id.qp_type != IB_QPT_UD) { - trace_cm_send_mra(cm_id->context); - ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); + trace_cm_prepare_mra(cm_id->context); + ib_prepare_cm_mra(cm_id); } mutex_unlock(&conn_id->handler_mutex); @@ -5245,7 +5231,8 @@ static int cma_netevent_callback(struct notifier_block *self, neigh->ha, ETH_ALEN)) continue; cma_id_get(current_id); - queue_work(cma_wq, ¤t_id->id.net_work); + if (!queue_work(cma_wq, ¤t_id->id.net_work)) + cma_id_put(current_id); } out: spin_unlock_irqrestore(&id_table_lock, flags); diff --git a/drivers/infiniband/core/cma_trace.h b/drivers/infiniband/core/cma_trace.h index dc622f3778be..3456d5f3aa47 100644 --- a/drivers/infiniband/core/cma_trace.h +++ b/drivers/infiniband/core/cma_trace.h @@ -55,7 +55,7 @@ DECLARE_EVENT_CLASS(cma_fsm_class, DEFINE_CMA_FSM_EVENT(send_rtu); DEFINE_CMA_FSM_EVENT(send_rej); -DEFINE_CMA_FSM_EVENT(send_mra); +DEFINE_CMA_FSM_EVENT(prepare_mra); DEFINE_CMA_FSM_EVENT(send_sidr_req); DEFINE_CMA_FSM_EVENT(send_sidr_rep); DEFINE_CMA_FSM_EVENT(disconnect); diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index f4486cbd8f45..62410578dec3 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -368,12 +368,9 @@ EXPORT_SYMBOL(iw_cm_disconnect); /* * CM_ID <-- DESTROYING * - * Clean up all resources associated with the connection and release - * the initial reference taken by iw_create_cm_id. - * - * Returns true if and only if the last cm_id_priv reference has been dropped. + * Clean up all resources associated with the connection. */ -static bool destroy_cm_id(struct iw_cm_id *cm_id) +static void destroy_cm_id(struct iw_cm_id *cm_id) { struct iwcm_id_private *cm_id_priv; struct ib_qp *qp; @@ -442,20 +439,22 @@ static bool destroy_cm_id(struct iw_cm_id *cm_id) iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr); iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM); } - - return iwcm_deref_id(cm_id_priv); } /* - * This function is only called by the application thread and cannot - * be called by the event thread. The function will wait for all - * references to be released on the cm_id and then kfree the cm_id - * object. + * Destroy cm_id. If the cm_id still has other references, wait for all + * references to be released on the cm_id and then release the initial + * reference taken by iw_create_cm_id. */ void iw_destroy_cm_id(struct iw_cm_id *cm_id) { - if (!destroy_cm_id(cm_id)) + struct iwcm_id_private *cm_id_priv; + + cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); + destroy_cm_id(cm_id); + if (refcount_read(&cm_id_priv->refcount) > 1) flush_workqueue(iwcm_wq); + iwcm_deref_id(cm_id_priv); } EXPORT_SYMBOL(iw_destroy_cm_id); @@ -1035,8 +1034,10 @@ static void cm_work_handler(struct work_struct *_work) if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) { ret = process_event(cm_id_priv, &levent); - if (ret) - WARN_ON_ONCE(destroy_cm_id(&cm_id_priv->id)); + if (ret) { + destroy_cm_id(&cm_id_priv->id); + WARN_ON_ONCE(iwcm_deref_id(cm_id_priv)); + } } else pr_debug("dropping event %d\n", levent.event); if (iwcm_deref_id(cm_id_priv)) diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index 8af0619a39cd..b4b10e8a6495 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c @@ -158,7 +158,7 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent, ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, recv_wc->recv_buf.grh, agent->port_num); if (IS_ERR(ah)) - return (void *) ah; + return ERR_CAST(ah); hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index c48ef6083020..c752ae9fad6c 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -41,67 +41,72 @@ #include <linux/hugetlb.h> #include <linux/interval_tree.h> #include <linux/hmm.h> +#include <linux/hmm-dma.h> #include <linux/pagemap.h> #include <rdma/ib_umem_odp.h> #include "uverbs.h" -static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp, - const struct mmu_interval_notifier_ops *ops) +static void ib_init_umem_implicit_odp(struct ib_umem_odp *umem_odp) { - int ret; + umem_odp->is_implicit_odp = 1; + umem_odp->umem.is_odp = 1; + mutex_init(&umem_odp->umem_mutex); +} + +static int ib_init_umem_odp(struct ib_umem_odp *umem_odp, + const struct mmu_interval_notifier_ops *ops) +{ + struct ib_device *dev = umem_odp->umem.ibdev; + size_t page_size = 1UL << umem_odp->page_shift; + struct hmm_dma_map *map; + unsigned long start; + unsigned long end; + size_t nr_entries; + int ret = 0; umem_odp->umem.is_odp = 1; mutex_init(&umem_odp->umem_mutex); - if (!umem_odp->is_implicit_odp) { - size_t page_size = 1UL << umem_odp->page_shift; - unsigned long start; - unsigned long end; - size_t ndmas, npfns; - - start = ALIGN_DOWN(umem_odp->umem.address, page_size); - if (check_add_overflow(umem_odp->umem.address, - (unsigned long)umem_odp->umem.length, - &end)) - return -EOVERFLOW; - end = ALIGN(end, page_size); - if (unlikely(end < page_size)) - return -EOVERFLOW; - - ndmas = (end - start) >> umem_odp->page_shift; - if (!ndmas) - return -EINVAL; - - npfns = (end - start) >> PAGE_SHIFT; - umem_odp->pfn_list = kvcalloc( - npfns, sizeof(*umem_odp->pfn_list), - GFP_KERNEL | __GFP_NOWARN); - if (!umem_odp->pfn_list) - return -ENOMEM; - - umem_odp->dma_list = kvcalloc( - ndmas, sizeof(*umem_odp->dma_list), - GFP_KERNEL | __GFP_NOWARN); - if (!umem_odp->dma_list) { + start = ALIGN_DOWN(umem_odp->umem.address, page_size); + if (check_add_overflow(umem_odp->umem.address, + (unsigned long)umem_odp->umem.length, &end)) + return -EOVERFLOW; + end = ALIGN(end, page_size); + if (unlikely(end < page_size)) + return -EOVERFLOW; + + nr_entries = (end - start) >> PAGE_SHIFT; + if (!(nr_entries * PAGE_SIZE / page_size)) + return -EINVAL; + + map = &umem_odp->map; + if (ib_uses_virt_dma(dev)) { + map->pfn_list = kvcalloc(nr_entries, sizeof(*map->pfn_list), + GFP_KERNEL | __GFP_NOWARN); + if (!map->pfn_list) ret = -ENOMEM; - goto out_pfn_list; - } + } else + ret = hmm_dma_map_alloc(dev->dma_device, map, + (end - start) >> PAGE_SHIFT, + 1 << umem_odp->page_shift); + if (ret) + return ret; - ret = mmu_interval_notifier_insert(&umem_odp->notifier, - umem_odp->umem.owning_mm, - start, end - start, ops); - if (ret) - goto out_dma_list; - } + ret = mmu_interval_notifier_insert(&umem_odp->notifier, + umem_odp->umem.owning_mm, start, + end - start, ops); + if (ret) + goto out_free_map; return 0; -out_dma_list: - kvfree(umem_odp->dma_list); -out_pfn_list: - kvfree(umem_odp->pfn_list); +out_free_map: + if (ib_uses_virt_dma(dev)) + kfree(map->pfn_list); + else + hmm_dma_map_free(dev->dma_device, map); return ret; } @@ -120,7 +125,6 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device, { struct ib_umem *umem; struct ib_umem_odp *umem_odp; - int ret; if (access & IB_ACCESS_HUGETLB) return ERR_PTR(-EINVAL); @@ -132,16 +136,10 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device, umem->ibdev = device; umem->writable = ib_access_writable(access); umem->owning_mm = current->mm; - umem_odp->is_implicit_odp = 1; umem_odp->page_shift = PAGE_SHIFT; umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); - ret = ib_init_umem_odp(umem_odp, NULL); - if (ret) { - put_pid(umem_odp->tgid); - kfree(umem_odp); - return ERR_PTR(ret); - } + ib_init_umem_implicit_odp(umem_odp); return umem_odp; } EXPORT_SYMBOL(ib_umem_odp_alloc_implicit); @@ -262,74 +260,41 @@ err_put_pid: } EXPORT_SYMBOL(ib_umem_odp_get); -void ib_umem_odp_release(struct ib_umem_odp *umem_odp) +static void ib_umem_odp_free(struct ib_umem_odp *umem_odp) { + struct ib_device *dev = umem_odp->umem.ibdev; + /* * Ensure that no more pages are mapped in the umem. * * It is the driver's responsibility to ensure, before calling us, * that the hardware will not attempt to access the MR any more. */ - if (!umem_odp->is_implicit_odp) { - mutex_lock(&umem_odp->umem_mutex); - ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp), - ib_umem_end(umem_odp)); - mutex_unlock(&umem_odp->umem_mutex); - mmu_interval_notifier_remove(&umem_odp->notifier); - kvfree(umem_odp->dma_list); - kvfree(umem_odp->pfn_list); - } - put_pid(umem_odp->tgid); - kfree(umem_odp); + mutex_lock(&umem_odp->umem_mutex); + ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp), + ib_umem_end(umem_odp)); + mutex_unlock(&umem_odp->umem_mutex); + mmu_interval_notifier_remove(&umem_odp->notifier); + if (ib_uses_virt_dma(dev)) + kfree(umem_odp->map.pfn_list); + else + hmm_dma_map_free(dev->dma_device, &umem_odp->map); } -EXPORT_SYMBOL(ib_umem_odp_release); -/* - * Map for DMA and insert a single page into the on-demand paging page tables. - * - * @umem: the umem to insert the page to. - * @dma_index: index in the umem to add the dma to. - * @page: the page struct to map and add. - * @access_mask: access permissions needed for this page. - * - * The function returns -EFAULT if the DMA mapping operation fails. - * - */ -static int ib_umem_odp_map_dma_single_page( - struct ib_umem_odp *umem_odp, - unsigned int dma_index, - struct page *page, - u64 access_mask) +void ib_umem_odp_release(struct ib_umem_odp *umem_odp) { - struct ib_device *dev = umem_odp->umem.ibdev; - dma_addr_t *dma_addr = &umem_odp->dma_list[dma_index]; - - if (*dma_addr) { - /* - * If the page is already dma mapped it means it went through - * a non-invalidating trasition, like read-only to writable. - * Resync the flags. - */ - *dma_addr = (*dma_addr & ODP_DMA_ADDR_MASK) | access_mask; - return 0; - } + if (!umem_odp->is_implicit_odp) + ib_umem_odp_free(umem_odp); - *dma_addr = ib_dma_map_page(dev, page, 0, 1 << umem_odp->page_shift, - DMA_BIDIRECTIONAL); - if (ib_dma_mapping_error(dev, *dma_addr)) { - *dma_addr = 0; - return -EFAULT; - } - umem_odp->npages++; - *dma_addr |= access_mask; - return 0; + put_pid(umem_odp->tgid); + kfree(umem_odp); } +EXPORT_SYMBOL(ib_umem_odp_release); /** * ib_umem_odp_map_dma_and_lock - DMA map userspace memory in an ODP MR and lock it. * * Maps the range passed in the argument to DMA addresses. - * The DMA addresses of the mapped pages is updated in umem_odp->dma_list. * Upon success the ODP MR will be locked to let caller complete its device * page table update. * @@ -357,9 +322,6 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt, struct hmm_range range = {}; unsigned long timeout; - if (access_mask == 0) - return -EINVAL; - if (user_virt < ib_umem_start(umem_odp) || user_virt + bcnt > ib_umem_end(umem_odp)) return -EFAULT; @@ -385,11 +347,11 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt, if (fault) { range.default_flags = HMM_PFN_REQ_FAULT; - if (access_mask & ODP_WRITE_ALLOWED_BIT) + if (access_mask & HMM_PFN_WRITE) range.default_flags |= HMM_PFN_REQ_WRITE; } - range.hmm_pfns = &(umem_odp->pfn_list[pfn_start_idx]); + range.hmm_pfns = &(umem_odp->map.pfn_list[pfn_start_idx]); timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); retry: @@ -417,22 +379,17 @@ retry: for (pfn_index = 0; pfn_index < num_pfns; pfn_index += 1 << (page_shift - PAGE_SHIFT), dma_index++) { - if (fault) { - /* - * Since we asked for hmm_range_fault() to populate - * pages it shouldn't return an error entry on success. - */ - WARN_ON(range.hmm_pfns[pfn_index] & HMM_PFN_ERROR); - WARN_ON(!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)); - } else { - if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)) { - WARN_ON(umem_odp->dma_list[dma_index]); - continue; - } - access_mask = ODP_READ_ALLOWED_BIT; - if (range.hmm_pfns[pfn_index] & HMM_PFN_WRITE) - access_mask |= ODP_WRITE_ALLOWED_BIT; - } + /* + * Since we asked for hmm_range_fault() to populate + * pages it shouldn't return an error entry on success. + */ + WARN_ON(fault && range.hmm_pfns[pfn_index] & HMM_PFN_ERROR); + WARN_ON(fault && !(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)); + if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)) + continue; + + if (range.hmm_pfns[pfn_index] & HMM_PFN_DMA_MAPPED) + continue; hmm_order = hmm_pfn_to_map_order(range.hmm_pfns[pfn_index]); /* If a hugepage was detected and ODP wasn't set for, the umem @@ -445,15 +402,6 @@ retry: __func__, hmm_order, page_shift); break; } - - ret = ib_umem_odp_map_dma_single_page( - umem_odp, dma_index, hmm_pfn_to_page(range.hmm_pfns[pfn_index]), - access_mask); - if (ret < 0) { - ibdev_dbg(umem_odp->umem.ibdev, - "ib_umem_odp_map_dma_single_page failed with error %d\n", ret); - break; - } } /* upon success lock should stay on hold for the callee */ if (!ret) @@ -473,45 +421,38 @@ EXPORT_SYMBOL(ib_umem_odp_map_dma_and_lock); void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt, u64 bound) { - dma_addr_t dma_addr; - dma_addr_t dma; - int idx; - u64 addr; struct ib_device *dev = umem_odp->umem.ibdev; + u64 addr; lockdep_assert_held(&umem_odp->umem_mutex); virt = max_t(u64, virt, ib_umem_start(umem_odp)); bound = min_t(u64, bound, ib_umem_end(umem_odp)); for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) { - idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; - dma = umem_odp->dma_list[idx]; - - /* The access flags guaranteed a valid DMA address in case was NULL */ - if (dma) { - unsigned long pfn_idx = (addr - ib_umem_start(umem_odp)) >> PAGE_SHIFT; - struct page *page = hmm_pfn_to_page(umem_odp->pfn_list[pfn_idx]); - - dma_addr = dma & ODP_DMA_ADDR_MASK; - ib_dma_unmap_page(dev, dma_addr, - BIT(umem_odp->page_shift), - DMA_BIDIRECTIONAL); - if (dma & ODP_WRITE_ALLOWED_BIT) { - struct page *head_page = compound_head(page); - /* - * set_page_dirty prefers being called with - * the page lock. However, MMU notifiers are - * called sometimes with and sometimes without - * the lock. We rely on the umem_mutex instead - * to prevent other mmu notifiers from - * continuing and allowing the page mapping to - * be removed. - */ - set_page_dirty(head_page); - } - umem_odp->dma_list[idx] = 0; - umem_odp->npages--; + u64 offset = addr - ib_umem_start(umem_odp); + size_t idx = offset >> umem_odp->page_shift; + unsigned long pfn = umem_odp->map.pfn_list[idx]; + + if (!hmm_dma_unmap_pfn(dev->dma_device, &umem_odp->map, idx)) + goto clear; + + if (pfn & HMM_PFN_WRITE) { + struct page *page = hmm_pfn_to_page(pfn); + struct page *head_page = compound_head(page); + /* + * set_page_dirty prefers being called with + * the page lock. However, MMU notifiers are + * called sometimes with and sometimes without + * the lock. We rely on the umem_mutex instead + * to prevent other mmu notifiers from + * continuing and allowing the page mapping to + * be removed. + */ + set_page_dirty(head_page); } + umem_odp->npages--; +clear: + umem_odp->map.pfn_list[idx] &= ~HMM_PFN_FLAGS; } } EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 3c3bb670c805..bc9fe3ceca4d 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -193,7 +193,7 @@ _ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs) fd, attrs); if (IS_ERR(uobj)) - return (void *)uobj; + return ERR_CAST(uobj); uverbs_uobject_get(uobj); uobj_put_read(uobj); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index c5e78bbefbd0..75fde0fe9989 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -572,7 +572,7 @@ struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, GFP_KERNEL : GFP_ATOMIC); if (IS_ERR(slave)) { rdma_unfill_sgid_attr(ah_attr, old_sgid_attr); - return (void *)slave; + return ERR_CAST(slave); } ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave); rdma_lag_put_ah_roce_slave(slave); diff --git a/drivers/infiniband/hw/bnxt_re/debugfs.c b/drivers/infiniband/hw/bnxt_re/debugfs.c index af91d16c3c77..e632f1661b92 100644 --- a/drivers/infiniband/hw/bnxt_re/debugfs.c +++ b/drivers/infiniband/hw/bnxt_re/debugfs.c @@ -170,6 +170,9 @@ static int map_cc_config_offset_gen0_ext0(u32 offset, struct bnxt_qplib_cc_param case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TCP_CP: *val = ccparam->tcp_cp; break; + case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP: + *val = ccparam->inact_th; + break; default: return -EINVAL; } @@ -203,7 +206,7 @@ static ssize_t bnxt_re_cc_config_get(struct file *filp, char __user *buffer, return simple_read_from_buffer(buffer, usr_buf_len, ppos, (u8 *)(buf), rc); } -static void bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offset, u32 val) +static int bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offset, u32 val) { u32 modify_mask; @@ -247,7 +250,9 @@ static void bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offs ccparam->tcp_cp = val; break; case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TX_QUEUE: + return -EOPNOTSUPP; case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP: + ccparam->inact_th = val; break; case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TIME_PER_PHASE: ccparam->time_pph = val; @@ -258,17 +263,20 @@ static void bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offs } ccparam->mask = modify_mask; + return 0; } static int bnxt_re_configure_cc(struct bnxt_re_dev *rdev, u32 gen_ext, u32 offset, u32 val) { struct bnxt_qplib_cc_param ccparam = { }; + int rc; - /* Supporting only Gen 0 now */ - if (gen_ext == CC_CONFIG_GEN0_EXT0) - bnxt_re_fill_gen0_ext0(&ccparam, offset, val); - else - return -EINVAL; + if (gen_ext != CC_CONFIG_GEN0_EXT0) + return -EOPNOTSUPP; + + rc = bnxt_re_fill_gen0_ext0(&ccparam, offset, val); + if (rc) + return rc; bnxt_qplib_modify_cc(&rdev->qplib_res, &ccparam); return 0; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 457eecb99f96..be34c605d516 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -1113,7 +1113,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION; if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED; - if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf) + if (bnxt_ext_stats_supported(res->cctx, res->dattr->dev_cap_flags, res->is_vf)) qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED; req.qp_flags = cpu_to_le32(qp_flags); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index f231e886ad9d..9efd32a3dc55 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -846,7 +846,12 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid, req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS; req.resp_addr = cpu_to_le64(sbuf.dma_addr); - req.function_id = cpu_to_le32(fid); + if (bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx) && rcfw->res->is_vf) + req.function_id = + cpu_to_le32(CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID | + (fid << CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT)); + else + req.function_id = cpu_to_le32(fid); req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID); bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h index b6e3141253c4..d6dde762921a 100644 --- a/drivers/infiniband/hw/hfi1/mad.h +++ b/drivers/infiniband/hw/hfi1/mad.h @@ -124,7 +124,6 @@ struct opa_mad_notice_attr { } __packed ntc_2048; }; - u8 class_data[]; }; #define IB_VLARB_LOWPRI_0_31 1 diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index 5a91cbda4aee..764286da2ce8 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -1361,16 +1361,6 @@ void sc_flush(struct send_context *sc) sc_wait_for_packet_egress(sc, 1); } -/* drop all packets on the context, no waiting until they are sent */ -void sc_drop(struct send_context *sc) -{ - if (!sc) - return; - - dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n", - __func__, sc->sw_index, sc->hw_context); -} - /* * Start the software reaction to a context halt or SPC freeze: * - mark the context as halted or frozen diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h index d07cc6ea7c63..ab0f9a3a8d12 100644 --- a/drivers/infiniband/hw/hfi1/pio.h +++ b/drivers/infiniband/hw/hfi1/pio.h @@ -246,7 +246,6 @@ void sc_disable(struct send_context *sc); int sc_restart(struct send_context *sc); void sc_return_credits(struct send_context *sc); void sc_flush(struct send_context *sc); -void sc_drop(struct send_context *sc); void sc_stop(struct send_context *sc, int bit); struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len, pio_release_cb cb, void *arg); diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 0d2b39b7c8b5..16a749d16ee9 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -1521,24 +1521,6 @@ void sdma_all_running(struct hfi1_devdata *dd) } /** - * sdma_all_idle() - called when the link goes down - * @dd: hfi1_devdata - * - * This routine moves all engines to the idle state. - */ -void sdma_all_idle(struct hfi1_devdata *dd) -{ - struct sdma_engine *sde; - unsigned int i; - - /* idle all engines */ - for (i = 0; i < dd->num_sdma; ++i) { - sde = &dd->per_sdma[i]; - sdma_process_event(sde, sdma_event_e70_go_idle); - } -} - -/** * sdma_start() - called to kick off state processing for all engines * @dd: hfi1_devdata * diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h index d77246b48434..91dfd5d0c419 100644 --- a/drivers/infiniband/hw/hfi1/sdma.h +++ b/drivers/infiniband/hw/hfi1/sdma.h @@ -373,7 +373,6 @@ void sdma_start(struct hfi1_devdata *dd); void sdma_exit(struct hfi1_devdata *dd); void sdma_clean(struct hfi1_devdata *dd, size_t num_engines); void sdma_all_running(struct hfi1_devdata *dd); -void sdma_all_idle(struct hfi1_devdata *dd); void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle); void sdma_freeze(struct hfi1_devdata *dd); void sdma_unfreeze(struct hfi1_devdata *dd); diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c index cf2d29098406..62b4f16dab27 100644 --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c @@ -53,7 +53,7 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd, int ret = 0; fd->entry_to_rb = kcalloc(uctxt->expected_count, - sizeof(struct rb_node *), + sizeof(*fd->entry_to_rb), GFP_KERNEL); if (!fd->entry_to_rb) return -ENOMEM; diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile index 7917af8e6380..baf592e6f21b 100644 --- a/drivers/infiniband/hw/hns/Makefile +++ b/drivers/infiniband/hw/hns/Makefile @@ -4,6 +4,7 @@ # ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 +ccflags-y += -I $(src) hns-roce-hw-v2-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \ hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \ diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index 4fc5b9d5fea8..307c35888b30 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c @@ -33,7 +33,6 @@ #include <linux/pci.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> -#include "hnae3.h" #include "hns_roce_device.h" #include "hns_roce_hw_v2.h" diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 560a1d9de408..1dcc9cbb4678 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -1027,6 +1027,26 @@ struct hns_roce_dev { atomic64_t *dfx_cnt; }; +enum hns_roce_trace_type { + TRACE_SQ, + TRACE_RQ, + TRACE_SRQ, +}; + +static inline const char *trace_type_to_str(enum hns_roce_trace_type type) +{ + switch (type) { + case TRACE_SQ: + return "SQ"; + case TRACE_RQ: + return "RQ"; + case TRACE_SRQ: + return "SRQ"; + default: + return "UNKNOWN"; + } +} + static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev) { return container_of(ib_dev, struct hns_roce_dev, ib_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 160e8927d364..fa8747656f25 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -43,13 +43,15 @@ #include <rdma/ib_umem.h> #include <rdma/uverbs_ioctl.h> -#include "hnae3.h" #include "hns_roce_common.h" #include "hns_roce_device.h" #include "hns_roce_cmd.h" #include "hns_roce_hem.h" #include "hns_roce_hw_v2.h" +#define CREATE_TRACE_POINTS +#include "hns_roce_trace.h" + enum { CMD_RST_PRC_OTHERS, CMD_RST_PRC_SUCCESS, @@ -738,6 +740,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, else ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit); + trace_hns_sq_wqe(qp->qpn, wqe_idx, wqe, 1 << qp->sq.wqe_shift, + wr->wr_id, TRACE_SQ); if (unlikely(ret)) { *bad_wr = wr; goto out; @@ -807,6 +811,9 @@ static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr, wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx); fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge); + + trace_hns_rq_wqe(hr_qp->qpn, wqe_idx, wqe, 1 << hr_qp->rq.wqe_shift, + wr->wr_id, TRACE_RQ); } static int hns_roce_v2_post_recv(struct ib_qp *ibqp, @@ -943,7 +950,7 @@ static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx) static void update_srq_db(struct hns_roce_srq *srq) { struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device); - struct hns_roce_v2_db db; + struct hns_roce_v2_db db = {}; hr_reg_write(&db, DB_TAG, srq->srqn); hr_reg_write(&db, DB_CMD, HNS_ROCE_V2_SRQ_DB); @@ -984,6 +991,9 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge); fill_wqe_idx(srq, wqe_idx); srq->wrid[wqe_idx] = wr->wr_id; + + trace_hns_srq_wqe(srq->srqn, wqe_idx, wqe, 1 << srq->wqe_shift, + wr->wr_id, TRACE_SRQ); } if (likely(nreq)) { @@ -1311,6 +1321,8 @@ static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev, tail = csq->head; for (i = 0; i < num; i++) { + trace_hns_cmdq_req(hr_dev, &desc[i]); + csq->desc[csq->head++] = desc[i]; if (csq->head == csq->desc_num) csq->head = 0; @@ -1325,6 +1337,8 @@ static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev, if (hns_roce_cmq_csq_done(hr_dev)) { ret = 0; for (i = 0; i < num; i++) { + trace_hns_cmdq_resp(hr_dev, &csq->desc[tail]); + /* check the result of hardware write back */ desc_ret = le16_to_cpu(csq->desc[tail++].retval); if (tail == csq->desc_num) @@ -4302,8 +4316,7 @@ static inline int get_pdn(struct ib_pd *ib_pd) } static void modify_qp_reset_to_init(struct ib_qp *ibqp, - struct hns_roce_v2_qp_context *context, - struct hns_roce_v2_qp_context *qpc_mask) + struct hns_roce_v2_qp_context *context) { struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); @@ -5122,7 +5135,7 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp, if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { memset(qpc_mask, 0, hr_dev->caps.qpc_sz); - modify_qp_reset_to_init(ibqp, context, qpc_mask); + modify_qp_reset_to_init(ibqp, context); } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { modify_qp_init_to_init(ibqp, context, qpc_mask); } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { @@ -5313,6 +5326,7 @@ static void v2_set_flushed_fields(struct ib_qp *ibqp, return; spin_lock_irqsave(&hr_qp->sq.lock, sq_flag); + trace_hns_sq_flush_cqe(hr_qp->qpn, hr_qp->sq.head, TRACE_SQ); hr_reg_write(context, QPC_SQ_PRODUCER_IDX, hr_qp->sq.head); hr_reg_clear(qpc_mask, QPC_SQ_PRODUCER_IDX); hr_qp->state = IB_QPS_ERR; @@ -5322,6 +5336,7 @@ static void v2_set_flushed_fields(struct ib_qp *ibqp, return; spin_lock_irqsave(&hr_qp->rq.lock, rq_flag); + trace_hns_rq_flush_cqe(hr_qp->qpn, hr_qp->rq.head, TRACE_RQ); hr_reg_write(context, QPC_RQ_PRODUCER_IDX, hr_qp->rq.head); hr_reg_clear(qpc_mask, QPC_RQ_PRODUCER_IDX); spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag); @@ -6248,6 +6263,7 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, eq->sub_type = sub_type; ++eq->cons_index; aeqe_found = IRQ_HANDLED; + trace_hns_ae_info(event_type, aeqe, eq->eqe_size); atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_AEQE_CNT]); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 91a5665465ff..bc7466830eaf 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -34,6 +34,7 @@ #define _HNS_ROCE_HW_V2_H #include <linux/bitops.h> +#include "hnae3.h" #define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32 #define HNS_ROCE_V2_MTT_ENTRY_SZ 64 diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 8d0b63d4b50a..e7a497cc125c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -37,7 +37,6 @@ #include <rdma/ib_smi.h> #include <rdma/ib_user_verbs.h> #include <rdma/ib_cache.h> -#include "hnae3.h" #include "hns_roce_common.h" #include "hns_roce_device.h" #include "hns_roce_hem.h" diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 09da3496843b..93a48b41955b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -38,6 +38,7 @@ #include "hns_roce_device.h" #include "hns_roce_cmd.h" #include "hns_roce_hem.h" +#include "hns_roce_trace.h" static u32 hw_index_to_key(int ind) { @@ -159,6 +160,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, if (IS_ERR(mailbox)) return PTR_ERR(mailbox); + trace_hns_mr(mr); if (mr->type != MR_TYPE_FRMR) ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr); else @@ -1146,6 +1148,7 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, struct ib_device *ibdev = &hr_dev->ib_dev; int ret; + trace_hns_buf_attr(buf_attr); /* The caller has its own buffer list and invokes the hns_roce_mtr_map() * to finish the MTT configuration. */ diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c index 356d98816949..f637b73b946e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_restrack.c +++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c @@ -4,7 +4,6 @@ #include <rdma/rdma_cm.h> #include <rdma/restrack.h> #include <uapi/rdma/rdma_netlink.h> -#include "hnae3.h" #include "hns_roce_common.h" #include "hns_roce_device.h" #include "hns_roce_hw_v2.h" diff --git a/drivers/infiniband/hw/hns/hns_roce_trace.h b/drivers/infiniband/hw/hns/hns_roce_trace.h new file mode 100644 index 000000000000..59ceb591b3a1 --- /dev/null +++ b/drivers/infiniband/hw/hns/hns_roce_trace.h @@ -0,0 +1,216 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 Hisilicon Limited. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hns_roce + +#if !defined(__HNS_ROCE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define __HNS_ROCE_TRACE_H + +#include <linux/tracepoint.h> +#include <linux/string_choices.h> +#include "hns_roce_device.h" +#include "hns_roce_hw_v2.h" + +DECLARE_EVENT_CLASS(flush_head_template, + TP_PROTO(unsigned long qpn, u32 pi, + enum hns_roce_trace_type type), + TP_ARGS(qpn, pi, type), + + TP_STRUCT__entry(__field(unsigned long, qpn) + __field(u32, pi) + __field(enum hns_roce_trace_type, type) + ), + + TP_fast_assign(__entry->qpn = qpn; + __entry->pi = pi; + __entry->type = type; + ), + + TP_printk("%s 0x%lx flush head 0x%x.", + trace_type_to_str(__entry->type), + __entry->qpn, __entry->pi) +); + +DEFINE_EVENT(flush_head_template, hns_sq_flush_cqe, + TP_PROTO(unsigned long qpn, u32 pi, + enum hns_roce_trace_type type), + TP_ARGS(qpn, pi, type)); +DEFINE_EVENT(flush_head_template, hns_rq_flush_cqe, + TP_PROTO(unsigned long qpn, u32 pi, + enum hns_roce_trace_type type), + TP_ARGS(qpn, pi, type)); + +#define MAX_SGE_PER_WQE 64 +#define MAX_WQE_SIZE (MAX_SGE_PER_WQE * HNS_ROCE_SGE_SIZE) +DECLARE_EVENT_CLASS(wqe_template, + TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, + u64 id, enum hns_roce_trace_type type), + TP_ARGS(qpn, idx, wqe, len, id, type), + + TP_STRUCT__entry(__field(unsigned long, qpn) + __field(u32, idx) + __array(u32, wqe, + MAX_WQE_SIZE / sizeof(__le32)) + __field(u32, len) + __field(u64, id) + __field(enum hns_roce_trace_type, type) + ), + + TP_fast_assign(__entry->qpn = qpn; + __entry->idx = idx; + __entry->id = id; + __entry->len = len / sizeof(__le32); + __entry->type = type; + for (int i = 0; i < __entry->len; i++) + __entry->wqe[i] = le32_to_cpu(((__le32 *)wqe)[i]); + ), + + TP_printk("%s 0x%lx wqe(0x%x/0x%llx): %s", + trace_type_to_str(__entry->type), + __entry->qpn, __entry->idx, __entry->id, + __print_array(__entry->wqe, __entry->len, + sizeof(__le32))) +); + +DEFINE_EVENT(wqe_template, hns_sq_wqe, + TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id, + enum hns_roce_trace_type type), + TP_ARGS(qpn, idx, wqe, len, id, type)); +DEFINE_EVENT(wqe_template, hns_rq_wqe, + TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id, + enum hns_roce_trace_type type), + TP_ARGS(qpn, idx, wqe, len, id, type)); +DEFINE_EVENT(wqe_template, hns_srq_wqe, + TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id, + enum hns_roce_trace_type type), + TP_ARGS(qpn, idx, wqe, len, id, type)); + +TRACE_EVENT(hns_ae_info, + TP_PROTO(int event_type, void *aeqe, unsigned int len), + TP_ARGS(event_type, aeqe, len), + + TP_STRUCT__entry(__field(int, event_type) + __array(u32, aeqe, + HNS_ROCE_V3_EQE_SIZE / sizeof(__le32)) + __field(u32, len) + ), + + TP_fast_assign(__entry->event_type = event_type; + __entry->len = len / sizeof(__le32); + for (int i = 0; i < __entry->len; i++) + __entry->aeqe[i] = le32_to_cpu(((__le32 *)aeqe)[i]); + ), + + TP_printk("event %2d aeqe: %s", __entry->event_type, + __print_array(__entry->aeqe, __entry->len, sizeof(__le32))) +); + +TRACE_EVENT(hns_mr, + TP_PROTO(struct hns_roce_mr *mr), + TP_ARGS(mr), + + TP_STRUCT__entry(__field(u64, iova) + __field(u64, size) + __field(u32, key) + __field(u32, pd) + __field(u32, pbl_hop_num) + __field(u32, npages) + __field(int, type) + __field(int, enabled) + ), + + TP_fast_assign(__entry->iova = mr->iova; + __entry->size = mr->size; + __entry->key = mr->key; + __entry->pd = mr->pd; + __entry->pbl_hop_num = mr->pbl_hop_num; + __entry->npages = mr->npages; + __entry->type = mr->type; + __entry->enabled = mr->enabled; + ), + + TP_printk("iova:0x%llx, size:%llu, key:%u, pd:%u, pbl_hop:%u, npages:%u, type:%d, status:%d", + __entry->iova, __entry->size, __entry->key, + __entry->pd, __entry->pbl_hop_num, __entry->npages, + __entry->type, __entry->enabled) +); + +TRACE_EVENT(hns_buf_attr, + TP_PROTO(struct hns_roce_buf_attr *attr), + TP_ARGS(attr), + + TP_STRUCT__entry(__field(unsigned int, region_count) + __field(unsigned int, region0_size) + __field(int, region0_hopnum) + __field(unsigned int, region1_size) + __field(int, region1_hopnum) + __field(unsigned int, region2_size) + __field(int, region2_hopnum) + __field(unsigned int, page_shift) + __field(bool, mtt_only) + ), + + TP_fast_assign(__entry->region_count = attr->region_count; + __entry->region0_size = attr->region[0].size; + __entry->region0_hopnum = attr->region[0].hopnum; + __entry->region1_size = attr->region[1].size; + __entry->region1_hopnum = attr->region[1].hopnum; + __entry->region2_size = attr->region[2].size; + __entry->region2_hopnum = attr->region[2].hopnum; + __entry->page_shift = attr->page_shift; + __entry->mtt_only = attr->mtt_only; + ), + + TP_printk("rg cnt:%u, pg_sft:0x%x, mtt_only:%s, rg 0 (sz:%u, hop:%u), rg 1 (sz:%u, hop:%u), rg 2 (sz:%u, hop:%u)\n", + __entry->region_count, __entry->page_shift, + str_yes_no(__entry->mtt_only), + __entry->region0_size, __entry->region0_hopnum, + __entry->region1_size, __entry->region1_hopnum, + __entry->region2_size, __entry->region2_hopnum) +); + +DECLARE_EVENT_CLASS(cmdq, + TP_PROTO(struct hns_roce_dev *hr_dev, + struct hns_roce_cmq_desc *desc), + TP_ARGS(hr_dev, desc), + + TP_STRUCT__entry(__string(dev_name, dev_name(hr_dev->dev)) + __field(u16, opcode) + __field(u16, flag) + __field(u16, retval) + __array(u32, data, 6) + ), + + TP_fast_assign(__assign_str(dev_name); + __entry->opcode = le16_to_cpu(desc->opcode); + __entry->flag = le16_to_cpu(desc->flag); + __entry->retval = le16_to_cpu(desc->retval); + for (int i = 0; i < 6; i++) + __entry->data[i] = le32_to_cpu(desc->data[i]); + ), + + TP_printk("%s cmdq opcode:0x%x, flag:0x%x, retval:0x%x, data:%s\n", + __get_str(dev_name), __entry->opcode, + __entry->flag, __entry->retval, + __print_array(__entry->data, 6, sizeof(__le32))) +); + +DEFINE_EVENT(cmdq, hns_cmdq_req, + TP_PROTO(struct hns_roce_dev *hr_dev, + struct hns_roce_cmq_desc *desc), + TP_ARGS(hr_dev, desc)); +DEFINE_EVENT(cmdq, hns_cmdq_resp, + TP_PROTO(struct hns_roce_dev *hr_dev, + struct hns_roce_cmq_desc *desc), + TP_ARGS(hr_dev, desc)); + +#endif /* __HNS_ROCE_TRACE_H */ + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE hns_roce_trace +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include <trace/define_trace.h> diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c index 6aed6169c07d..99a7f1a6c0b5 100644 --- a/drivers/infiniband/hw/irdma/ctrl.c +++ b/drivers/infiniband/hw/irdma/ctrl.c @@ -3131,7 +3131,7 @@ int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp, writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]); ibdev_dbg(to_ibdev(cqp->dev), - "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%pK] cqp[%p] polarity[x%04x]\n", + "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%p] cqp[%p] polarity[x%04x]\n", cqp->sq_size, cqp->hw_sq_size, cqp->sq_base, (u64 *)(uintptr_t)cqp->sq_pa, cqp, cqp->polarity); return 0; diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c index e7ce6840755f..37ce35cb10e7 100644 --- a/drivers/infiniband/hw/irdma/pble.c +++ b/drivers/infiniband/hw/irdma/pble.c @@ -108,7 +108,7 @@ static int add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc, chunk->vaddr = sd_entry->u.bp.addr.va + offset; chunk->fpm_addr = pble_rsrc->next_fpm_addr; ibdev_dbg(to_ibdev(dev), - "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%pK fpm_addr = %llx\n", + "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%p fpm_addr = %llx\n", chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr); return 0; diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c index 0fc4e2679218..28e154bbb50f 100644 --- a/drivers/infiniband/hw/mana/cq.c +++ b/drivers/infiniband/hw/mana/cq.c @@ -15,14 +15,12 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_device *ibdev = ibcq->device; struct mana_ib_create_cq ucmd = {}; struct mana_ib_dev *mdev; - struct gdma_context *gc; bool is_rnic_cq; u32 doorbell; u32 buf_size; int err; mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); - gc = mdev_to_gc(mdev); cq->comp_vector = attr->comp_vector % ibdev->num_comp_vectors; cq->cq_handle = INVALID_MANA_HANDLE; @@ -65,7 +63,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, ibdev_dbg(ibdev, "Failed to create kernel queue for create cq, %d\n", err); return err; } - doorbell = gc->mana_ib.doorbell; + doorbell = mdev->gdma_dev->doorbell; } if (is_rnic_cq) { diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c index b31089320aa5..165c0a1e67d1 100644 --- a/drivers/infiniband/hw/mana/device.c +++ b/drivers/infiniband/hw/mana/device.c @@ -101,103 +101,95 @@ static int mana_ib_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) { struct mana_adev *madev = container_of(adev, struct mana_adev, adev); + struct gdma_context *gc = madev->mdev->gdma_context; + struct mana_context *mc = gc->mana.driver_data; struct gdma_dev *mdev = madev->mdev; struct net_device *ndev; - struct mana_context *mc; struct mana_ib_dev *dev; u8 mac_addr[ETH_ALEN]; int ret; - mc = mdev->driver_data; - dev = ib_alloc_device(mana_ib_dev, ib_dev); if (!dev) return -ENOMEM; ib_set_device_ops(&dev->ib_dev, &mana_ib_dev_ops); - - dev->ib_dev.phys_port_cnt = mc->num_ports; - - ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev, - mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt); - dev->ib_dev.node_type = RDMA_NODE_IB_CA; - - /* - * num_comp_vectors needs to set to the max MSIX index - * when interrupts and event queues are implemented - */ - dev->ib_dev.num_comp_vectors = mdev->gdma_context->max_num_queues; - dev->ib_dev.dev.parent = mdev->gdma_context->dev; - - ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker); - if (!ndev) { - ret = -ENODEV; - ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1"); - goto free_ib_device; - } - ether_addr_copy(mac_addr, ndev->dev_addr); - addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr); - ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1); - /* mana_get_primary_netdev() returns ndev with refcount held */ - netdev_put(ndev, &dev->dev_tracker); - if (ret) { - ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret); - goto free_ib_device; - } - - ret = mana_gd_register_device(&mdev->gdma_context->mana_ib); - if (ret) { - ibdev_err(&dev->ib_dev, "Failed to register device, ret %d", - ret); - goto free_ib_device; - } - dev->gdma_dev = &mdev->gdma_context->mana_ib; - - dev->nb.notifier_call = mana_ib_netdev_event; - ret = register_netdevice_notifier(&dev->nb); - if (ret) { - ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d", - ret); - goto deregister_device; - } - - ret = mana_ib_gd_query_adapter_caps(dev); - if (ret) { - ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d", - ret); - goto deregister_net_notifier; - } - - ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops); - - ret = mana_ib_create_eqs(dev); - if (ret) { - ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret); - goto deregister_net_notifier; - } - - ret = mana_ib_gd_create_rnic_adapter(dev); - if (ret) - goto destroy_eqs; - + dev->ib_dev.num_comp_vectors = gc->max_num_queues; + dev->ib_dev.dev.parent = gc->dev; + dev->gdma_dev = mdev; xa_init_flags(&dev->qp_table_wq, XA_FLAGS_LOCK_IRQ); - ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr); - if (ret) { - ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d", - ret); - goto destroy_rnic; + + if (mana_ib_is_rnic(dev)) { + dev->ib_dev.phys_port_cnt = 1; + ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker); + if (!ndev) { + ret = -ENODEV; + ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1"); + goto free_ib_device; + } + ether_addr_copy(mac_addr, ndev->dev_addr); + addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr); + ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1); + /* mana_get_primary_netdev() returns ndev with refcount held */ + netdev_put(ndev, &dev->dev_tracker); + if (ret) { + ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret); + goto free_ib_device; + } + + dev->nb.notifier_call = mana_ib_netdev_event; + ret = register_netdevice_notifier(&dev->nb); + if (ret) { + ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d", + ret); + goto free_ib_device; + } + + ret = mana_ib_gd_query_adapter_caps(dev); + if (ret) { + ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d", ret); + goto deregister_net_notifier; + } + + ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops); + + ret = mana_ib_create_eqs(dev); + if (ret) { + ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret); + goto deregister_net_notifier; + } + + ret = mana_ib_gd_create_rnic_adapter(dev); + if (ret) + goto destroy_eqs; + + ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr); + if (ret) { + ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d", ret); + goto destroy_rnic; + } + } else { + dev->ib_dev.phys_port_cnt = mc->num_ports; + ret = mana_eth_query_adapter_caps(dev); + if (ret) { + ibdev_err(&dev->ib_dev, "Failed to query ETH device caps, ret %d", ret); + goto free_ib_device; + } } - dev->av_pool = dma_pool_create("mana_ib_av", mdev->gdma_context->dev, - MANA_AV_BUFFER_SIZE, MANA_AV_BUFFER_SIZE, 0); + dev->av_pool = dma_pool_create("mana_ib_av", gc->dev, MANA_AV_BUFFER_SIZE, + MANA_AV_BUFFER_SIZE, 0); if (!dev->av_pool) { ret = -ENOMEM; goto destroy_rnic; } - ret = ib_register_device(&dev->ib_dev, "mana_%d", - mdev->gdma_context->dev); + ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev, + mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt); + + ret = ib_register_device(&dev->ib_dev, mana_ib_is_rnic(dev) ? "mana_%d" : "manae_%d", + gc->dev); if (ret) goto deallocate_pool; @@ -208,15 +200,16 @@ static int mana_ib_probe(struct auxiliary_device *adev, deallocate_pool: dma_pool_destroy(dev->av_pool); destroy_rnic: - xa_destroy(&dev->qp_table_wq); - mana_ib_gd_destroy_rnic_adapter(dev); + if (mana_ib_is_rnic(dev)) + mana_ib_gd_destroy_rnic_adapter(dev); destroy_eqs: - mana_ib_destroy_eqs(dev); + if (mana_ib_is_rnic(dev)) + mana_ib_destroy_eqs(dev); deregister_net_notifier: - unregister_netdevice_notifier(&dev->nb); -deregister_device: - mana_gd_deregister_device(dev->gdma_dev); + if (mana_ib_is_rnic(dev)) + unregister_netdevice_notifier(&dev->nb); free_ib_device: + xa_destroy(&dev->qp_table_wq); ib_dealloc_device(&dev->ib_dev); return ret; } @@ -227,25 +220,24 @@ static void mana_ib_remove(struct auxiliary_device *adev) ib_unregister_device(&dev->ib_dev); dma_pool_destroy(dev->av_pool); + if (mana_ib_is_rnic(dev)) { + mana_ib_gd_destroy_rnic_adapter(dev); + mana_ib_destroy_eqs(dev); + unregister_netdevice_notifier(&dev->nb); + } xa_destroy(&dev->qp_table_wq); - mana_ib_gd_destroy_rnic_adapter(dev); - mana_ib_destroy_eqs(dev); - unregister_netdevice_notifier(&dev->nb); - mana_gd_deregister_device(dev->gdma_dev); ib_dealloc_device(&dev->ib_dev); } static const struct auxiliary_device_id mana_id_table[] = { - { - .name = "mana.rdma", - }, + { .name = "mana.rdma", }, + { .name = "mana.eth", }, {}, }; MODULE_DEVICE_TABLE(auxiliary, mana_id_table); static struct auxiliary_driver mana_driver = { - .name = "rdma", .probe = mana_ib_probe, .remove = mana_ib_remove, .id_table = mana_id_table, diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c index eda9c5b971de..41a24a186f9d 100644 --- a/drivers/infiniband/hw/mana/main.c +++ b/drivers/infiniband/hw/mana/main.c @@ -4,6 +4,7 @@ */ #include "mana_ib.h" +#include "linux/pci.h" void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd, u32 port) @@ -243,7 +244,6 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type, struct mana_ib_queue *queue) { - struct gdma_context *gc = mdev_to_gc(mdev); struct gdma_queue_spec spec = {}; int err; @@ -252,7 +252,7 @@ int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_qu spec.type = type; spec.monitor_avl_buf = false; spec.queue_size = size; - err = mana_gd_create_mana_wq_cq(&gc->mana_ib, &spec, &queue->kmem); + err = mana_gd_create_mana_wq_cq(mdev->gdma_dev, &spec, &queue->kmem); if (err) return err; /* take ownership into mana_ib from mana */ @@ -479,7 +479,7 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem, { unsigned long page_sz; - page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, virt); + page_sz = ib_umem_find_best_pgsz(umem, dev->adapter_caps.page_size_cap, virt); if (!page_sz) { ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n"); return -EINVAL; @@ -494,7 +494,7 @@ int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_ume unsigned long page_sz; /* Hardware requires dma region to align to chosen page size */ - page_sz = ib_umem_find_best_pgoff(umem, PAGE_SZ_BM, 0); + page_sz = ib_umem_find_best_pgoff(umem, dev->adapter_caps.page_size_cap, 0); if (!page_sz) { ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n"); return -EINVAL; @@ -551,6 +551,7 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { + struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev); struct ib_port_attr attr; int err; @@ -560,10 +561,12 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; - immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; - if (port_num == 1) { - immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; + + if (mana_ib_is_rnic(dev)) { + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; immutable->max_mad_size = IB_MGMT_MAD_SIZE; + } else { + immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; } return 0; @@ -572,12 +575,14 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num, int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *uhw) { - struct mana_ib_dev *dev = container_of(ibdev, - struct mana_ib_dev, ib_dev); + struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev); + struct pci_dev *pdev = to_pci_dev(mdev_to_gc(dev)->dev); memset(props, 0, sizeof(*props)); + props->vendor_id = pdev->vendor; + props->vendor_part_id = dev->gdma_dev->dev_id.type; props->max_mr_size = MANA_IB_MAX_MR_SIZE; - props->page_size_cap = PAGE_SZ_BM; + props->page_size_cap = dev->adapter_caps.page_size_cap; props->max_qp = dev->adapter_caps.max_qp_count; props->max_qp_wr = dev->adapter_caps.max_qp_wr; props->device_cap_flags = IB_DEVICE_RC_RNR_NAK_GEN; @@ -596,6 +601,8 @@ int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, props->max_ah = INT_MAX; props->max_pkeys = 1; props->local_ca_ack_delay = MANA_CA_ACK_DELAY; + if (!mana_ib_is_rnic(dev)) + props->raw_packet_caps = IB_RAW_PACKET_CAP_IP_CSUM; return 0; } @@ -603,6 +610,7 @@ int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, int mana_ib_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props) { + struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev); struct net_device *ndev = mana_ib_get_netdev(ibdev, port); if (!ndev) @@ -623,7 +631,7 @@ int mana_ib_query_port(struct ib_device *ibdev, u32 port, props->active_width = IB_WIDTH_4X; props->active_speed = IB_SPEED_EDR; props->pkey_tbl_len = 1; - if (port == 1) { + if (mana_ib_is_rnic(dev)) { props->gid_tbl_len = 16; props->port_cap_flags = IB_PORT_CM_SUP; props->ip_gids = true; @@ -696,6 +704,41 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev) caps->max_recv_sge_count = resp.max_recv_sge_count; caps->feature_flags = resp.feature_flags; + caps->page_size_cap = PAGE_SZ_BM; + if (mdev_to_gc(dev)->pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB) + caps->page_size_cap |= (SZ_4M | SZ_1G | SZ_2G); + + return 0; +} + +int mana_eth_query_adapter_caps(struct mana_ib_dev *dev) +{ + struct mana_ib_adapter_caps *caps = &dev->adapter_caps; + struct gdma_query_max_resources_resp resp = {}; + struct gdma_general_req req = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES, + sizeof(req), sizeof(resp)); + + err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req), &req, sizeof(resp), &resp); + if (err) { + ibdev_err(&dev->ib_dev, + "Failed to query adapter caps err %d", err); + return err; + } + + caps->max_qp_count = min_t(u32, resp.max_sq, resp.max_rq); + caps->max_cq_count = resp.max_cq; + caps->max_mr_count = resp.max_mst; + caps->max_pd_count = 0x6000; + caps->max_qp_wr = min_t(u32, + 0x100000 / GDMA_MAX_SQE_SIZE, + 0x100000 / GDMA_MAX_RQE_SIZE); + caps->max_send_sge_count = 30; + caps->max_recv_sge_count = 15; + caps->page_size_cap = PAGE_SZ_BM; + return 0; } @@ -740,7 +783,7 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev) spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE; spec.eq.msix_index = 0; - err = mana_gd_create_mana_eq(&gc->mana_ib, &spec, &mdev->fatal_err_eq); + err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->fatal_err_eq); if (err) return err; @@ -791,7 +834,7 @@ int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev) mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER, sizeof(req), sizeof(resp)); req.hdr.req.msg_version = GDMA_MESSAGE_V2; - req.hdr.dev_id = gc->mana_ib.dev_id; + req.hdr.dev_id = mdev->gdma_dev->dev_id; req.notify_eq_id = mdev->fatal_err_eq->id; if (mdev->adapter_caps.feature_flags & MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT) @@ -816,7 +859,7 @@ int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev) gc = mdev_to_gc(mdev); mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req), sizeof(resp)); - req.hdr.dev_id = gc->mana_ib.dev_id; + req.hdr.dev_id = mdev->gdma_dev->dev_id; req.adapter = mdev->adapter_handle; err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); @@ -843,7 +886,7 @@ int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context) } mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp)); - req.hdr.dev_id = gc->mana_ib.dev_id; + req.hdr.dev_id = mdev->gdma_dev->dev_id; req.adapter = mdev->adapter_handle; req.op = ADDR_OP_ADD; req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4; @@ -873,7 +916,7 @@ int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context) } mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp)); - req.hdr.dev_id = gc->mana_ib.dev_id; + req.hdr.dev_id = mdev->gdma_dev->dev_id; req.adapter = mdev->adapter_handle; req.op = ADDR_OP_REMOVE; req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4; @@ -896,7 +939,7 @@ int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 int err; mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_MAC_ADDR, sizeof(req), sizeof(resp)); - req.hdr.dev_id = gc->mana_ib.dev_id; + req.hdr.dev_id = mdev->gdma_dev->dev_id; req.adapter = mdev->adapter_handle; req.op = op; copy_in_reverse(req.mac_addr, mac, ETH_ALEN); @@ -917,8 +960,11 @@ int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 do struct mana_rnic_create_cq_req req = {}; int err; + if (!mdev->eqs) + return -EINVAL; + mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_CQ, sizeof(req), sizeof(resp)); - req.hdr.dev_id = gc->mana_ib.dev_id; + req.hdr.dev_id = mdev->gdma_dev->dev_id; req.adapter = mdev->adapter_handle; req.gdma_region = cq->queue.gdma_region; req.eq_id = mdev->eqs[cq->comp_vector]->id; @@ -950,7 +996,7 @@ int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq) return 0; mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_CQ, sizeof(req), sizeof(resp)); - req.hdr.dev_id = gc->mana_ib.dev_id; + req.hdr.dev_id = mdev->gdma_dev->dev_id; req.adapter = mdev->adapter_handle; req.cq_handle = cq->cq_handle; @@ -976,7 +1022,7 @@ int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp, int err, i; mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_RC_QP, sizeof(req), sizeof(resp)); - req.hdr.dev_id = gc->mana_ib.dev_id; + req.hdr.dev_id = mdev->gdma_dev->dev_id; req.adapter = mdev->adapter_handle; req.pd_handle = pd->pd_handle; req.send_cq_handle = send_cq->cq_handle; @@ -1012,7 +1058,7 @@ int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp) int err; mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_RC_QP, sizeof(req), sizeof(resp)); - req.hdr.dev_id = gc->mana_ib.dev_id; + req.hdr.dev_id = mdev->gdma_dev->dev_id; req.adapter = mdev->adapter_handle; req.rc_qp_handle = qp->qp_handle; err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); @@ -1035,7 +1081,7 @@ int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp, int err, i; mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_UD_QP, sizeof(req), sizeof(resp)); - req.hdr.dev_id = gc->mana_ib.dev_id; + req.hdr.dev_id = mdev->gdma_dev->dev_id; req.adapter = mdev->adapter_handle; req.pd_handle = pd->pd_handle; req.send_cq_handle = send_cq->cq_handle; @@ -1070,7 +1116,7 @@ int mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp) int err; mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_UD_QP, sizeof(req), sizeof(resp)); - req.hdr.dev_id = gc->mana_ib.dev_id; + req.hdr.dev_id = mdev->gdma_dev->dev_id; req.adapter = mdev->adapter_handle; req.qp_handle = qp->qp_handle; err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h index 6903946677e5..42bebd6cd4f7 100644 --- a/drivers/infiniband/hw/mana/mana_ib.h +++ b/drivers/infiniband/hw/mana/mana_ib.h @@ -60,6 +60,7 @@ struct mana_ib_adapter_caps { u32 max_recv_sge_count; u32 max_inline_data_size; u64 feature_flags; + u64 page_size_cap; }; struct mana_ib_queue { @@ -543,6 +544,11 @@ static inline void mana_put_qp_ref(struct mana_ib_qp *qp) complete(&qp->free); } +static inline bool mana_ib_is_rnic(struct mana_ib_dev *mdev) +{ + return mdev->gdma_dev->dev_id.type == GDMA_DEVICE_MANA_IB; +} + static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32 port) { struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); @@ -642,6 +648,7 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index, void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext); int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev); +int mana_eth_query_adapter_caps(struct mana_ib_dev *mdev); int mana_ib_create_eqs(struct mana_ib_dev *mdev); diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c index f99557ec7767..6d974d0a8400 100644 --- a/drivers/infiniband/hw/mana/mr.c +++ b/drivers/infiniband/hw/mana/mr.c @@ -5,8 +5,8 @@ #include "mana_ib.h" -#define VALID_MR_FLAGS \ - (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ) +#define VALID_MR_FLAGS (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |\ + IB_ACCESS_REMOTE_ATOMIC | IB_ZERO_BASED) #define VALID_DMA_MR_FLAGS (IB_ACCESS_LOCAL_WRITE) @@ -24,6 +24,9 @@ mana_ib_verbs_to_gdma_access_flags(int access_flags) if (access_flags & IB_ACCESS_REMOTE_READ) flags |= GDMA_ACCESS_FLAG_REMOTE_READ; + if (access_flags & IB_ACCESS_REMOTE_ATOMIC) + flags |= GDMA_ACCESS_FLAG_REMOTE_ATOMIC; + return flags; } @@ -48,7 +51,10 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr, req.gva.virtual_address = mr_params->gva.virtual_address; req.gva.access_flags = mr_params->gva.access_flags; break; - + case GDMA_MR_TYPE_ZBVA: + req.zbva.dma_region_handle = mr_params->zbva.dma_region_handle; + req.zbva.access_flags = mr_params->zbva.access_flags; + break; default: ibdev_dbg(&dev->ib_dev, "invalid param (GDMA_MR_TYPE) passed, type %d\n", @@ -144,11 +150,18 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length, dma_region_handle); mr_params.pd_handle = pd->pd_handle; - mr_params.mr_type = GDMA_MR_TYPE_GVA; - mr_params.gva.dma_region_handle = dma_region_handle; - mr_params.gva.virtual_address = iova; - mr_params.gva.access_flags = - mana_ib_verbs_to_gdma_access_flags(access_flags); + if (access_flags & IB_ZERO_BASED) { + mr_params.mr_type = GDMA_MR_TYPE_ZBVA; + mr_params.zbva.dma_region_handle = dma_region_handle; + mr_params.zbva.access_flags = + mana_ib_verbs_to_gdma_access_flags(access_flags); + } else { + mr_params.mr_type = GDMA_MR_TYPE_GVA; + mr_params.gva.dma_region_handle = dma_region_handle; + mr_params.gva.virtual_address = iova; + mr_params.gva.access_flags = + mana_ib_verbs_to_gdma_access_flags(access_flags); + } err = mana_ib_gd_create_mr(dev, mr, &mr_params); if (err) diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c index c928af58f38b..14fd7d6c54a2 100644 --- a/drivers/infiniband/hw/mana/qp.c +++ b/drivers/infiniband/hw/mana/qp.c @@ -635,7 +635,6 @@ static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd, { struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev); struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); - struct gdma_context *gc = mdev_to_gc(mdev); u32 doorbell, queue_size; int i, err; @@ -654,7 +653,7 @@ static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd, goto destroy_queues; } } - doorbell = gc->mana_ib.doorbell; + doorbell = mdev->gdma_dev->doorbell; err = create_shadow_queue(&qp->shadow_rq, attr->cap.max_recv_wr, sizeof(struct ud_rq_shadow_wqe)); @@ -736,7 +735,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int err; mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp)); - req.hdr.dev_id = gc->mana_ib.dev_id; + req.hdr.dev_id = mdev->gdma_dev->dev_id; req.adapter = mdev->adapter_handle; req.qp_handle = qp->qp_handle; req.qp_state = attr->qp_state; diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c index 33f525b744f2..e279e69b9a51 100644 --- a/drivers/infiniband/hw/mlx4/mcg.c +++ b/drivers/infiniband/hw/mlx4/mcg.c @@ -43,7 +43,7 @@ #define MAX_VFS 80 #define MAX_PEND_REQS_PER_FUNC 4 -#define MAD_TIMEOUT_MS 2000 +#define MAD_TIMEOUT_SEC 2 #define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg) #define mcg_error(fmt, arg...) pr_err(fmt, ##arg) @@ -270,7 +270,7 @@ static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad if (!ret) { /* calls mlx4_ib_mcg_timeout_handler */ queue_delayed_work(group->demux->mcg_wq, &group->timeout_work, - msecs_to_jiffies(MAD_TIMEOUT_MS)); + secs_to_jiffies(MAD_TIMEOUT_SEC)); } return ret; @@ -309,7 +309,7 @@ static int send_leave_to_wire(struct mcast_group *group, u8 join_state) if (!ret) { /* calls mlx4_ib_mcg_timeout_handler */ queue_delayed_work(group->demux->mcg_wq, &group->timeout_work, - msecs_to_jiffies(MAD_TIMEOUT_MS)); + secs_to_jiffies(MAD_TIMEOUT_SEC)); } return ret; @@ -1091,7 +1091,7 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy for (i = 0; i < MAX_VFS; ++i) clean_vf_mcast(ctx, i); - end = jiffies + msecs_to_jiffies(MAD_TIMEOUT_MS + 3000); + end = jiffies + secs_to_jiffies(MAD_TIMEOUT_SEC + 3); do { count = 0; mutex_lock(&ctx->mcg_table_lock); diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c index 0ff9f18a71e8..680627f1de33 100644 --- a/drivers/infiniband/hw/mlx5/fs.c +++ b/drivers/infiniband/hw/mlx5/fs.c @@ -1645,11 +1645,6 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL); } -enum { - LEFTOVERS_MC, - LEFTOVERS_UC, -}; - static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev, struct mlx5_ib_flow_prio *ft_prio, struct ib_flow_attr *flow_attr, @@ -1659,43 +1654,32 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de struct mlx5_ib_flow_handler *handler = NULL; static struct { - struct ib_flow_attr flow_attr; struct ib_flow_spec_eth eth_flow; - } leftovers_specs[] = { - [LEFTOVERS_MC] = { - .flow_attr = { - .num_of_specs = 1, - .size = sizeof(leftovers_specs[0]) - }, - .eth_flow = { - .type = IB_FLOW_SPEC_ETH, - .size = sizeof(struct ib_flow_spec_eth), - .mask = {.dst_mac = {0x1} }, - .val = {.dst_mac = {0x1} } - } - }, - [LEFTOVERS_UC] = { - .flow_attr = { - .num_of_specs = 1, - .size = sizeof(leftovers_specs[0]) - }, - .eth_flow = { - .type = IB_FLOW_SPEC_ETH, - .size = sizeof(struct ib_flow_spec_eth), - .mask = {.dst_mac = {0x1} }, - .val = {.dst_mac = {} } - } - } - }; + struct ib_flow_attr flow_attr; + } leftovers_wc = { .flow_attr = { .num_of_specs = 1, + .size = sizeof(leftovers_wc) }, + .eth_flow = { + .type = IB_FLOW_SPEC_ETH, + .size = sizeof(struct ib_flow_spec_eth), + .mask = { .dst_mac = { 0x1 } }, + .val = { .dst_mac = { 0x1 } } } }; - handler = create_flow_rule(dev, ft_prio, - &leftovers_specs[LEFTOVERS_MC].flow_attr, - dst); + static struct { + struct ib_flow_spec_eth eth_flow; + struct ib_flow_attr flow_attr; + } leftovers_uc = { .flow_attr = { .num_of_specs = 1, + .size = sizeof(leftovers_uc) }, + .eth_flow = { + .type = IB_FLOW_SPEC_ETH, + .size = sizeof(struct ib_flow_spec_eth), + .mask = { .dst_mac = { 0x1 } }, + .val = { .dst_mac = {} } } }; + + handler = create_flow_rule(dev, ft_prio, &leftovers_wc.flow_attr, dst); if (!IS_ERR(handler) && flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) { handler_ucast = create_flow_rule(dev, ft_prio, - &leftovers_specs[LEFTOVERS_UC].flow_attr, - dst); + &leftovers_uc.flow_attr, dst); if (IS_ERR(handler_ucast)) { mlx5_del_flow_rules(handler->rule); ft_prio->refcount--; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index d07cacaa0abd..ce7610740412 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -485,6 +485,10 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed, *active_width = IB_WIDTH_2X; *active_speed = IB_SPEED_NDR; break; + case MLX5E_PROT_MASK(MLX5E_200GAUI_1_200GBASE_CR1_KR1): + *active_width = IB_WIDTH_1X; + *active_speed = IB_SPEED_XDR; + break; case MLX5E_PROT_MASK(MLX5E_400GAUI_8_400GBASE_CR8): *active_width = IB_WIDTH_8X; *active_speed = IB_SPEED_HDR; @@ -493,10 +497,18 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed, *active_width = IB_WIDTH_4X; *active_speed = IB_SPEED_NDR; break; + case MLX5E_PROT_MASK(MLX5E_400GAUI_2_400GBASE_CR2_KR2): + *active_width = IB_WIDTH_2X; + *active_speed = IB_SPEED_XDR; + break; case MLX5E_PROT_MASK(MLX5E_800GAUI_8_800GBASE_CR8_KR8): *active_width = IB_WIDTH_8X; *active_speed = IB_SPEED_NDR; break; + case MLX5E_PROT_MASK(MLX5E_800GAUI_4_800GBASE_CR4_KR4): + *active_width = IB_WIDTH_4X; + *active_speed = IB_SPEED_XDR; + break; default: return -EINVAL; } @@ -4422,17 +4434,6 @@ static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev) mlx5_core_native_port_num(dev->mdev) - 1); } -static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev) -{ - dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev); - return PTR_ERR_OR_ZERO(dev->mdev->priv.uar); -} - -static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev) -{ - mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); -} - static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev) { int err; @@ -4662,9 +4663,6 @@ static const struct mlx5_ib_profile pf_profile = { STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS, mlx5_ib_stage_cong_debugfs_init, mlx5_ib_stage_cong_debugfs_cleanup), - STAGE_CREATE(MLX5_IB_STAGE_UAR, - mlx5_ib_stage_uar_init, - mlx5_ib_stage_uar_cleanup), STAGE_CREATE(MLX5_IB_STAGE_BFREG, mlx5_ib_stage_bfrag_init, mlx5_ib_stage_bfrag_cleanup), @@ -4722,9 +4720,6 @@ const struct mlx5_ib_profile raw_eth_profile = { STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS, mlx5_ib_stage_cong_debugfs_init, mlx5_ib_stage_cong_debugfs_cleanup), - STAGE_CREATE(MLX5_IB_STAGE_UAR, - mlx5_ib_stage_uar_init, - mlx5_ib_stage_uar_cleanup), STAGE_CREATE(MLX5_IB_STAGE_BFREG, mlx5_ib_stage_bfrag_init, mlx5_ib_stage_bfrag_cleanup), diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index ace2df3e1d9f..fde859d207ae 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -351,6 +351,7 @@ struct mlx5_ib_flow_db { #define MLX5_IB_UPD_XLT_PD BIT(4) #define MLX5_IB_UPD_XLT_ACCESS BIT(5) #define MLX5_IB_UPD_XLT_INDIRECT BIT(6) +#define MLX5_IB_UPD_XLT_DOWNGRADE BIT(7) /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags. * @@ -1005,7 +1006,6 @@ enum mlx5_ib_stages { MLX5_IB_STAGE_ODP, MLX5_IB_STAGE_COUNTERS, MLX5_IB_STAGE_CONG_DEBUGFS, - MLX5_IB_STAGE_UAR, MLX5_IB_STAGE_BFREG, MLX5_IB_STAGE_PRE_IB_REG_UMR, MLX5_IB_STAGE_WHITELIST_UID, @@ -1473,8 +1473,8 @@ void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev); int __init mlx5_ib_odp_init(void); void mlx5_ib_odp_cleanup(void); int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev); -void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, - struct mlx5_ib_mr *mr, int flags); +int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, + struct mlx5_ib_mr *mr, int flags); int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice, @@ -1495,8 +1495,11 @@ static inline int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev) { return 0; } -static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, - struct mlx5_ib_mr *mr, int flags) {} +static inline int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, + struct mlx5_ib_mr *mr, int flags) +{ + return -EOPNOTSUPP; +} static inline int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 5fbebafc8774..6dd813bac5b2 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -525,7 +525,7 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) ent->fill_to_high_water = false; if (ent->pending) queue_delayed_work(ent->dev->cache.wq, &ent->dwork, - msecs_to_jiffies(1000)); + secs_to_jiffies(1)); else mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); } @@ -576,7 +576,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) "add keys command failed, err %d\n", err); queue_delayed_work(cache->wq, &ent->dwork, - msecs_to_jiffies(1000)); + secs_to_jiffies(1)); } } } else if (ent->mkeys_queue.ci > 2 * ent->limit) { @@ -2051,7 +2051,7 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr) ent->in_use--; if (ent->is_tmp && !ent->tmp_cleanup_scheduled) { mod_delayed_work(ent->dev->cache.wq, &ent->dwork, - msecs_to_jiffies(30 * 1000)); + secs_to_jiffies(30)); ent->tmp_cleanup_scheduled = true; } spin_unlock_irq(&ent->mkeys_queue.lock); diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 86d8fa63bf69..eaa2f9f5f3a9 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -34,6 +34,9 @@ #include <linux/kernel.h> #include <linux/dma-buf.h> #include <linux/dma-resv.h> +#include <linux/hmm.h> +#include <linux/hmm-dma.h> +#include <linux/pci-p2pdma.h> #include "mlx5_ib.h" #include "cmd.h" @@ -158,41 +161,50 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries, } } -static u64 umem_dma_to_mtt(dma_addr_t umem_dma) -{ - u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK; - - if (umem_dma & ODP_READ_ALLOWED_BIT) - mtt_entry |= MLX5_IB_MTT_READ; - if (umem_dma & ODP_WRITE_ALLOWED_BIT) - mtt_entry |= MLX5_IB_MTT_WRITE; - - return mtt_entry; -} - -static void populate_mtt(__be64 *pas, size_t idx, size_t nentries, - struct mlx5_ib_mr *mr, int flags) +static int populate_mtt(__be64 *pas, size_t start, size_t nentries, + struct mlx5_ib_mr *mr, int flags) { struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); - dma_addr_t pa; + bool downgrade = flags & MLX5_IB_UPD_XLT_DOWNGRADE; + struct pci_p2pdma_map_state p2pdma_state = {}; + struct ib_device *dev = odp->umem.ibdev; size_t i; if (flags & MLX5_IB_UPD_XLT_ZAP) - return; + return 0; for (i = 0; i < nentries; i++) { - pa = odp->dma_list[idx + i]; - pas[i] = cpu_to_be64(umem_dma_to_mtt(pa)); + unsigned long pfn = odp->map.pfn_list[start + i]; + dma_addr_t dma_addr; + + pfn = odp->map.pfn_list[start + i]; + if (!(pfn & HMM_PFN_VALID)) + /* ODP initialization */ + continue; + + dma_addr = hmm_dma_map_pfn(dev->dma_device, &odp->map, + start + i, &p2pdma_state); + if (ib_dma_mapping_error(dev, dma_addr)) + return -EFAULT; + + dma_addr |= MLX5_IB_MTT_READ; + if ((pfn & HMM_PFN_WRITE) && !downgrade) + dma_addr |= MLX5_IB_MTT_WRITE; + + pas[i] = cpu_to_be64(dma_addr); + odp->npages++; } + return 0; } -void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, - struct mlx5_ib_mr *mr, int flags) +int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, + struct mlx5_ib_mr *mr, int flags) { if (flags & MLX5_IB_UPD_XLT_INDIRECT) { populate_klm(xlt, idx, nentries, mr, flags); + return 0; } else { - populate_mtt(xlt, idx, nentries, mr, flags); + return populate_mtt(xlt, idx, nentries, mr, flags); } } @@ -303,8 +315,7 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni, * estimate the cost of another UMR vs. the cost of bigger * UMR. */ - if (umem_odp->dma_list[idx] & - (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) { + if (umem_odp->map.pfn_list[idx] & HMM_PFN_VALID) { if (!in_block) { blk_start_idx = idx; in_block = 1; @@ -687,7 +698,7 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp, { int page_shift, ret, np; bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE; - u64 access_mask; + u64 access_mask = 0; u64 start_idx; bool fault = !(flags & MLX5_PF_FLAGS_SNAPSHOT); u32 xlt_flags = MLX5_IB_UPD_XLT_ATOMIC; @@ -695,12 +706,14 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp, if (flags & MLX5_PF_FLAGS_ENABLE) xlt_flags |= MLX5_IB_UPD_XLT_ENABLE; + if (flags & MLX5_PF_FLAGS_DOWNGRADE) + xlt_flags |= MLX5_IB_UPD_XLT_DOWNGRADE; + page_shift = odp->page_shift; start_idx = (user_va - ib_umem_start(odp)) >> page_shift; - access_mask = ODP_READ_ALLOWED_BIT; if (odp->umem.writable && !downgrade) - access_mask |= ODP_WRITE_ALLOWED_BIT; + access_mask |= HMM_PFN_WRITE; np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault); if (np < 0) diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c index d3dcc272200a..146d03ae40bd 100644 --- a/drivers/infiniband/hw/mlx5/qpc.c +++ b/drivers/infiniband/hw/mlx5/qpc.c @@ -21,8 +21,10 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn) spin_lock_irqsave(&table->lock, flags); common = radix_tree_lookup(&table->tree, rsn); - if (common) + if (common && !common->invalid) refcount_inc(&common->refcount); + else + common = NULL; spin_unlock_irqrestore(&table->lock, flags); @@ -178,6 +180,18 @@ static int create_resource_common(struct mlx5_ib_dev *dev, return 0; } +static void modify_resource_common_state(struct mlx5_ib_dev *dev, + struct mlx5_core_qp *qp, + bool invalid) +{ + struct mlx5_qp_table *table = &dev->qp_table; + unsigned long flags; + + spin_lock_irqsave(&table->lock, flags); + qp->common.invalid = invalid; + spin_unlock_irqrestore(&table->lock, flags); +} + static void destroy_resource_common(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp) { @@ -609,8 +623,20 @@ err_destroy_rq: int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev, struct mlx5_core_qp *rq) { + int ret; + + /* The rq destruction can be called again in case it fails, hence we + * mark the common resource as invalid and only once FW destruction + * is completed successfully we actually destroy the resources. + */ + modify_resource_common_state(dev, rq, true); + ret = destroy_rq_tracked(dev, rq->qpn, rq->uid); + if (ret) { + modify_resource_common_state(dev, rq, false); + return ret; + } destroy_resource_common(dev, rq); - return destroy_rq_tracked(dev, rq->qpn, rq->uid); + return 0; } static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid) diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c index 793f3c5c4d01..5be4426a2884 100644 --- a/drivers/infiniband/hw/mlx5/umr.c +++ b/drivers/infiniband/hw/mlx5/umr.c @@ -840,7 +840,17 @@ int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, size_to_map = npages * desc_size; dma_sync_single_for_cpu(ddev, sg.addr, sg.length, DMA_TO_DEVICE); - mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags); + /* + * npages is the maximum number of pages to map, but we + * can't guarantee that all pages are actually mapped. + * + * For example, if page is p2p of type which is not supported + * for mapping, the number of pages mapped will be less than + * requested. + */ + err = mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags); + if (err) + return err; dma_sync_single_for_device(ddev, sg.addr, sg.length, DMA_TO_DEVICE); sg.length = ALIGN(size_to_map, MLX5_UMR_FLEX_ALIGNMENT); diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index 192f83fd7c8a..dacb8ceeebe0 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c @@ -144,7 +144,7 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order) buddy->max_order = max_order; spin_lock_init(&buddy->lock); - buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *), + buddy->bits = kcalloc(buddy->max_order + 1, sizeof(*buddy->bits), GFP_KERNEL); buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, GFP_KERNEL); diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index f948b76f984d..3fbf99757b11 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -56,7 +56,7 @@ static int usnic_uiom_dma_fault(struct iommu_domain *domain, unsigned long iova, int flags, void *token) { - usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n", + usnic_err("Device %s iommu fault domain 0x%p va 0x%lx flags 0x%x\n", dev_name(dev), domain, iova, flags); return -ENOSYS; diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig index c180e7ebcfc5..1ed5b63f8afc 100644 --- a/drivers/infiniband/sw/rxe/Kconfig +++ b/drivers/infiniband/sw/rxe/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config RDMA_RXE tristate "Software RDMA over Ethernet (RoCE) driver" - depends on INET && PCI && INFINIBAND + depends on INET && PCI && INFINIBAND && 64BIT depends on INFINIBAND_VIRT_DMA select NET_UDP_TUNNEL select CRC32 diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index b248c68bf9b1..3a77d6db1720 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c @@ -101,6 +101,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe, struct net_device *ndev) rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ; rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC; rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV; + rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_FLUSH; + rxe->attr.odp_caps.per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC_WRITE; } } diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index 0bc3fbb6554f..876702058c84 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -70,9 +70,9 @@ int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma, void *addr, int length, enum rxe_mr_copy_dir dir); int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); -int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, - u64 compare, u64 swap_add, u64 *orig_val); -int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value); +enum resp_states rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, + u64 compare, u64 swap_add, u64 *orig_val); +enum resp_states rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value); struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key, enum rxe_mr_lookup_type type); int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length); @@ -193,13 +193,16 @@ static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp) /* rxe_odp.c */ extern const struct mmu_interval_notifier_ops rxe_mn_ops; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +#if defined CONFIG_INFINIBAND_ON_DEMAND_PAGING int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, int access_flags, struct rxe_mr *mr); int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, enum rxe_mr_copy_dir dir); -int rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, - u64 compare, u64 swap_add, u64 *orig_val); +enum resp_states rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, + u64 compare, u64 swap_add, u64 *orig_val); +int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova, + unsigned int length); +enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value); #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ static inline int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, @@ -212,9 +215,19 @@ static inline int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, { return -EOPNOTSUPP; } -static inline int +static inline enum resp_states rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, - u64 compare, u64 swap_add, u64 *orig_val) + u64 compare, u64 swap_add, u64 *orig_val) +{ + return RESPST_ERR_UNSUPPORTED_OPCODE; +} +static inline int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova, + unsigned int length) +{ + return -EOPNOTSUPP; +} +static inline enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, + u64 iova, u64 value) { return RESPST_ERR_UNSUPPORTED_OPCODE; } diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index 432d864c3ce9..bcb97b3ea58a 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -424,7 +424,7 @@ err1: return err; } -int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length) +static int rxe_mr_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length) { unsigned int page_offset; unsigned long index; @@ -433,16 +433,6 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length) int err; u8 *va; - /* mr must be valid even if length is zero */ - if (WARN_ON(!mr)) - return -EINVAL; - - if (length == 0) - return 0; - - if (mr->ibmr.type == IB_MR_TYPE_DMA) - return -EFAULT; - err = mr_check_range(mr, iova, length); if (err) return err; @@ -454,7 +444,7 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length) if (!page) return -EFAULT; bytes = min_t(unsigned int, length, - mr_page_size(mr) - page_offset); + mr_page_size(mr) - page_offset); va = kmap_local_page(page); arch_wb_cache_pmem(va + page_offset, bytes); @@ -468,11 +458,33 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length) return 0; } +int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 start, unsigned int length) +{ + int err; + + /* mr must be valid even if length is zero */ + if (WARN_ON(!mr)) + return -EINVAL; + + if (length == 0) + return 0; + + if (mr->ibmr.type == IB_MR_TYPE_DMA) + return -EFAULT; + + if (is_odp_mr(mr)) + err = rxe_odp_flush_pmem_iova(mr, start, length); + else + err = rxe_mr_flush_pmem_iova(mr, start, length); + + return err; +} + /* Guarantee atomicity of atomic operations at the machine level. */ DEFINE_SPINLOCK(atomic_ops_lock); -int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, - u64 compare, u64 swap_add, u64 *orig_val) +enum resp_states rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, + u64 compare, u64 swap_add, u64 *orig_val) { unsigned int page_offset; struct page *page; @@ -524,27 +536,15 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, kunmap_local(va); - return 0; + return RESPST_NONE; } -#if defined CONFIG_64BIT -/* only implemented or called for 64 bit architectures */ -int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) +enum resp_states rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) { unsigned int page_offset; struct page *page; u64 *va; - /* ODP is not supported right now. WIP. */ - if (is_odp_mr(mr)) - return RESPST_ERR_UNSUPPORTED_OPCODE; - - /* See IBA oA19-28 */ - if (unlikely(mr->state != RXE_MR_STATE_VALID)) { - rxe_dbg_mr(mr, "mr not in valid state\n"); - return RESPST_ERR_RKEY_VIOLATION; - } - if (mr->ibmr.type == IB_MR_TYPE_DMA) { page_offset = iova & (PAGE_SIZE - 1); page = ib_virt_dma_to_page(iova); @@ -572,20 +572,12 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) } va = kmap_local_page(page); - /* Do atomic write after all prior operations have completed */ smp_store_release(&va[page_offset >> 3], value); - kunmap_local(va); - return 0; -} -#else -int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) -{ - return RESPST_ERR_UNSUPPORTED_OPCODE; + return RESPST_NONE; } -#endif int advance_dma_data(struct rxe_dma_info *dma, unsigned int length) { diff --git a/drivers/infiniband/sw/rxe/rxe_odp.c b/drivers/infiniband/sw/rxe/rxe_odp.c index 9f6e2bb2a269..dbc5a5600eb7 100644 --- a/drivers/infiniband/sw/rxe/rxe_odp.c +++ b/drivers/infiniband/sw/rxe/rxe_odp.c @@ -4,6 +4,7 @@ */ #include <linux/hmm.h> +#include <linux/libnvdimm.h> #include <rdma/ib_umem_odp.h> @@ -26,7 +27,7 @@ static bool rxe_ib_invalidate_range(struct mmu_interval_notifier *mni, start = max_t(u64, ib_umem_start(umem_odp), range->start); end = min_t(u64, ib_umem_end(umem_odp), range->end); - /* update umem_odp->dma_list */ + /* update umem_odp->map.pfn_list */ ib_umem_odp_unmap_dma_pages(umem_odp, start, end); mutex_unlock(&umem_odp->umem_mutex); @@ -44,12 +45,11 @@ static int rxe_odp_do_pagefault_and_lock(struct rxe_mr *mr, u64 user_va, int bcn { struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); bool fault = !(flags & RXE_PAGEFAULT_SNAPSHOT); - u64 access_mask; + u64 access_mask = 0; int np; - access_mask = ODP_READ_ALLOWED_BIT; if (umem_odp->umem.writable && !(flags & RXE_PAGEFAULT_RDONLY)) - access_mask |= ODP_WRITE_ALLOWED_BIT; + access_mask |= HMM_PFN_WRITE; /* * ib_umem_odp_map_dma_and_lock() locks umem_mutex on success. @@ -124,8 +124,8 @@ int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, return err; } -static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp, - u64 iova, int length, u32 perm) +static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp, u64 iova, + int length) { bool need_fault = false; u64 addr; @@ -137,7 +137,7 @@ static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp, while (addr < iova + length) { idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; - if (!(umem_odp->dma_list[idx] & perm)) { + if (!(umem_odp->map.pfn_list[idx] & HMM_PFN_VALID)) { need_fault = true; break; } @@ -147,23 +147,28 @@ static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp, return need_fault; } +static unsigned long rxe_odp_iova_to_index(struct ib_umem_odp *umem_odp, u64 iova) +{ + return (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift; +} + +static unsigned long rxe_odp_iova_to_page_offset(struct ib_umem_odp *umem_odp, u64 iova) +{ + return iova & (BIT(umem_odp->page_shift) - 1); +} + static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u32 flags) { struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); bool need_fault; - u64 perm; int err; if (unlikely(length < 1)) return -EINVAL; - perm = ODP_READ_ALLOWED_BIT; - if (!(flags & RXE_PAGEFAULT_RDONLY)) - perm |= ODP_WRITE_ALLOWED_BIT; - mutex_lock(&umem_odp->umem_mutex); - need_fault = rxe_check_pagefault(umem_odp, iova, length, perm); + need_fault = rxe_check_pagefault(umem_odp, iova, length); if (need_fault) { mutex_unlock(&umem_odp->umem_mutex); @@ -173,7 +178,7 @@ static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u if (err < 0) return err; - need_fault = rxe_check_pagefault(umem_odp, iova, length, perm); + need_fault = rxe_check_pagefault(umem_odp, iova, length); if (need_fault) return -EFAULT; } @@ -190,13 +195,13 @@ static int __rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, size_t offset; u8 *user_va; - idx = (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift; - offset = iova & (BIT(umem_odp->page_shift) - 1); + idx = rxe_odp_iova_to_index(umem_odp, iova); + offset = rxe_odp_iova_to_page_offset(umem_odp, iova); while (length > 0) { u8 *src, *dest; - page = hmm_pfn_to_page(umem_odp->pfn_list[idx]); + page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]); user_va = kmap_local_page(page); if (!user_va) return -EFAULT; @@ -255,8 +260,9 @@ int rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, return err; } -static int rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, - u64 compare, u64 swap_add, u64 *orig_val) +static enum resp_states rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, + int opcode, u64 compare, + u64 swap_add, u64 *orig_val) { struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); unsigned int page_offset; @@ -277,9 +283,9 @@ static int rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, return RESPST_ERR_RKEY_VIOLATION; } - idx = (iova - ib_umem_start(umem_odp)) >> umem_odp->page_shift; - page_offset = iova & (BIT(umem_odp->page_shift) - 1); - page = hmm_pfn_to_page(umem_odp->pfn_list[idx]); + idx = rxe_odp_iova_to_index(umem_odp, iova); + page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova); + page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]); if (!page) return RESPST_ERR_RKEY_VIOLATION; @@ -304,11 +310,11 @@ static int rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, kunmap_local(va); - return 0; + return RESPST_NONE; } -int rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, - u64 compare, u64 swap_add, u64 *orig_val) +enum resp_states rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, + u64 compare, u64 swap_add, u64 *orig_val) { struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); int err; @@ -324,3 +330,91 @@ int rxe_odp_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, return err; } + +int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova, + unsigned int length) +{ + struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); + unsigned int page_offset; + unsigned long index; + struct page *page; + unsigned int bytes; + int err; + u8 *va; + + err = rxe_odp_map_range_and_lock(mr, iova, length, + RXE_PAGEFAULT_DEFAULT); + if (err) + return err; + + while (length > 0) { + index = rxe_odp_iova_to_index(umem_odp, iova); + page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova); + + page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]); + if (!page) { + mutex_unlock(&umem_odp->umem_mutex); + return -EFAULT; + } + + bytes = min_t(unsigned int, length, + mr_page_size(mr) - page_offset); + + va = kmap_local_page(page); + arch_wb_cache_pmem(va + page_offset, bytes); + kunmap_local(va); + + length -= bytes; + iova += bytes; + page_offset = 0; + } + + mutex_unlock(&umem_odp->umem_mutex); + + return 0; +} + +enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) +{ + struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); + unsigned int page_offset; + unsigned long index; + struct page *page; + int err; + u64 *va; + + /* See IBA oA19-28 */ + err = mr_check_range(mr, iova, sizeof(value)); + if (unlikely(err)) { + rxe_dbg_mr(mr, "iova out of range\n"); + return RESPST_ERR_RKEY_VIOLATION; + } + + err = rxe_odp_map_range_and_lock(mr, iova, sizeof(value), + RXE_PAGEFAULT_DEFAULT); + if (err) + return RESPST_ERR_RKEY_VIOLATION; + + page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova); + index = rxe_odp_iova_to_index(umem_odp, iova); + page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]); + if (!page) { + mutex_unlock(&umem_odp->umem_mutex); + return RESPST_ERR_RKEY_VIOLATION; + } + /* See IBA A19.4.2 */ + if (unlikely(page_offset & 0x7)) { + mutex_unlock(&umem_odp->umem_mutex); + rxe_dbg_mr(mr, "misaligned address\n"); + return RESPST_ERR_MISALIGNED_ATOMIC; + } + + va = kmap_local_page(page); + /* Do atomic write after all prior operations have completed */ + smp_store_release(&va[page_offset >> 3], value); + kunmap_local(va); + + mutex_unlock(&umem_odp->umem_mutex); + + return RESPST_NONE; +} diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h index 003f681e5dc0..767870568372 100644 --- a/drivers/infiniband/sw/rxe/rxe_param.h +++ b/drivers/infiniband/sw/rxe/rxe_param.h @@ -53,12 +53,9 @@ enum rxe_device_param { | IB_DEVICE_MEM_WINDOW | IB_DEVICE_FLUSH_GLOBAL | IB_DEVICE_FLUSH_PERSISTENT -#ifdef CONFIG_64BIT | IB_DEVICE_MEM_WINDOW_TYPE_2B | IB_DEVICE_ATOMIC_WRITE, -#else - | IB_DEVICE_MEM_WINDOW_TYPE_2B, -#endif /* CONFIG_64BIT */ + RXE_MAX_SGE = 32, RXE_MAX_WQE_SIZE = sizeof(struct rxe_send_wqe) + sizeof(struct ib_sge) * RXE_MAX_SGE, diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 7975fb0e2782..f2af3e0aef35 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -811,7 +811,12 @@ static void rxe_qp_do_cleanup(struct work_struct *work) spin_unlock_irqrestore(&qp->state_lock, flags); qp->qp_timeout_jiffies = 0; - if (qp_type(qp) == IB_QPT_RC) { + /* In the function timer_setup, .function is initialized. If .function + * is NULL, it indicates the function timer_setup is not called, the + * timer is not initialized. Or else, the timer is initialized. + */ + if (qp_type(qp) == IB_QPT_RC && qp->retrans_timer.function && + qp->rnr_nak_timer.function) { timer_delete_sync(&qp->retrans_timer); timer_delete_sync(&qp->rnr_nak_timer); } diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 5d9174e408db..711f73e0bbb1 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -649,10 +649,6 @@ static enum resp_states process_flush(struct rxe_qp *qp, struct rxe_mr *mr = qp->resp.mr; struct resp_res *res = qp->resp.res; - /* ODP is not supported right now. WIP. */ - if (is_odp_mr(mr)) - return RESPST_ERR_UNSUPPORTED_OPCODE; - /* oA19-14, oA19-15 */ if (res && res->replay) return RESPST_ACKNOWLEDGE; @@ -753,7 +749,16 @@ static enum resp_states atomic_write_reply(struct rxe_qp *qp, value = *(u64 *)payload_addr(pkt); iova = qp->resp.va + qp->resp.offset; - err = rxe_mr_do_atomic_write(mr, iova, value); + /* See IBA oA19-28 */ + if (unlikely(mr->state != RXE_MR_STATE_VALID)) { + rxe_dbg_mr(mr, "mr not in valid state\n"); + return RESPST_ERR_RKEY_VIOLATION; + } + + if (is_odp_mr(mr)) + err = rxe_odp_do_atomic_write(mr, iova, value); + else + err = rxe_mr_do_atomic_write(mr, iova, value); if (err) return err; diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c index 80332638d9e3..6f8f353e9583 100644 --- a/drivers/infiniband/sw/rxe/rxe_task.c +++ b/drivers/infiniband/sw/rxe/rxe_task.c @@ -85,17 +85,17 @@ static bool is_done(struct rxe_task *task) /* do_task is a wrapper for the three tasks (requester, * completer, responder) and calls them in a loop until - * they return a non-zero value. It is called either - * directly by rxe_run_task or indirectly if rxe_sched_task - * schedules the task. They must call __reserve_if_idle to - * move the task to busy before calling or scheduling. - * The task can also be moved to drained or invalid - * by calls to rxe_cleanup_task or rxe_disable_task. - * In that case tasks which get here are not executed but - * just flushed. The tasks are designed to look to see if - * there is work to do and then do part of it before returning - * here with a return value of zero until all the work - * has been consumed then it returns a non-zero value. + * they return a non-zero value. It is called indirectly + * when rxe_sched_task schedules the task. They must + * call __reserve_if_idle to move the task to busy before + * calling or scheduling. The task can also be moved to + * drained or invalid by calls to rxe_cleanup_task or + * rxe_disable_task. In that case tasks which get here + * are not executed but just flushed. The tasks are + * designed to look to see if there is work to do and + * then do part of it before returning here with a return + * value of zero until all the work has been consumed then + * it returns a non-zero value. * The number of times the task can be run is limited by * max iterations so one task cannot hold the cpu forever. * If the limit is hit and work remains the task is rescheduled. @@ -234,24 +234,6 @@ void rxe_cleanup_task(struct rxe_task *task) spin_unlock_irqrestore(&task->lock, flags); } -/* run the task inline if it is currently idle - * cannot call do_task holding the lock - */ -void rxe_run_task(struct rxe_task *task) -{ - unsigned long flags; - bool run; - - WARN_ON(rxe_read(task->qp) <= 0); - - spin_lock_irqsave(&task->lock, flags); - run = __reserve_if_idle(task); - spin_unlock_irqrestore(&task->lock, flags); - - if (run) - do_task(task); -} - /* schedule the task to run later as a work queue entry. * the queue_work call can be called holding * the lock. diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h index a63e258b3d66..a8c9a77b6027 100644 --- a/drivers/infiniband/sw/rxe/rxe_task.h +++ b/drivers/infiniband/sw/rxe/rxe_task.h @@ -47,8 +47,6 @@ int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp, /* cleanup task */ void rxe_cleanup_task(struct rxe_task *task); -void rxe_run_task(struct rxe_task *task); - void rxe_sched_task(struct rxe_task *task); /* keep a task from scheduling */ diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h index d9e5a2e4c471..f5fd71717b80 100644 --- a/drivers/infiniband/sw/siw/siw.h +++ b/drivers/infiniband/sw/siw/siw.h @@ -718,7 +718,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len) "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__) #define siw_dbg_cep(cep, fmt, ...) \ - ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \ + ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%p] %s: " fmt, \ cep, __func__, ##__VA_ARGS__) void siw_cq_flush(struct siw_cq *cq); diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c index f3c2226aff94..25b3c741b66b 100644 --- a/drivers/infiniband/sw/siw/siw_cq.c +++ b/drivers/infiniband/sw/siw/siw_cq.c @@ -72,7 +72,7 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) wc->opcode = map_wc_opcode[cqe->opcode]; wc->status = map_cqe_status[cqe->status].ib; siw_dbg_cq(cq, - "idx %u, type %d, flags %2x, id 0x%pK\n", + "idx %u, type %d, flags %2x, id 0x%p\n", cq->cq_get % cq->num_cqe, cqe->opcode, cqe->flags, (void *)(uintptr_t)cqe->id); } else { diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c index dcb963607c8b..d5ddeb17bd22 100644 --- a/drivers/infiniband/sw/siw/siw_mem.c +++ b/drivers/infiniband/sw/siw/siw_mem.c @@ -18,30 +18,6 @@ #define SIW_STAG_MAX_INDEX 0x00ffffff /* - * The code avoids special Stag of zero and tries to randomize - * STag values between 1 and SIW_STAG_MAX_INDEX. - */ -int siw_mem_add(struct siw_device *sdev, struct siw_mem *m) -{ - struct xa_limit limit = XA_LIMIT(1, SIW_STAG_MAX_INDEX); - u32 id, next; - - get_random_bytes(&next, 4); - next &= SIW_STAG_MAX_INDEX; - - if (xa_alloc_cyclic(&sdev->mem_xa, &id, m, limit, &next, - GFP_KERNEL) < 0) - return -ENOMEM; - - /* Set the STag index part */ - m->stag = id << 8; - - siw_dbg_mem(m, "new MEM object\n"); - - return 0; -} - -/* * siw_mem_id2obj() * * resolves memory from stag given by id. might be called from: @@ -181,10 +157,10 @@ int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr, */ if (addr < mem->va || addr + len > mem->va + mem->len) { siw_dbg_pd(pd, "MEM interval len %d\n", len); - siw_dbg_pd(pd, "[0x%pK, 0x%pK] out of bounds\n", + siw_dbg_pd(pd, "[0x%p, 0x%p] out of bounds\n", (void *)(uintptr_t)addr, (void *)(uintptr_t)(addr + len)); - siw_dbg_pd(pd, "[0x%pK, 0x%pK] STag=0x%08x\n", + siw_dbg_pd(pd, "[0x%p, 0x%p] STag=0x%08x\n", (void *)(uintptr_t)mem->va, (void *)(uintptr_t)(mem->va + mem->len), mem->stag); diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h index e74cfcd6dbc1..8e769d30e2ac 100644 --- a/drivers/infiniband/sw/siw/siw_mem.h +++ b/drivers/infiniband/sw/siw/siw_mem.h @@ -12,7 +12,6 @@ void siw_umem_release(struct siw_umem *umem); struct siw_pbl *siw_pbl_alloc(u32 num_buf); dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx); struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index); -int siw_mem_add(struct siw_device *sdev, struct siw_mem *m); int siw_invalidate_stag(struct ib_pd *pd, u32 stag); int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr, enum ib_access_flags perms, int len); diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index 32554eba1eac..a10820e33887 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -38,7 +38,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem, p = siw_get_upage(umem, dest_addr); if (unlikely(!p)) { - pr_warn("siw: %s: [QP %u]: bogus addr: %pK, %pK\n", + pr_warn("siw: %s: [QP %u]: bogus addr: %p, %p\n", __func__, qp_id(rx_qp(srx)), (void *)(uintptr_t)dest_addr, (void *)(uintptr_t)umem->fp_addr); @@ -51,7 +51,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem, pg_off = dest_addr & ~PAGE_MASK; bytes = min(len, (int)PAGE_SIZE - pg_off); - siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes); + siw_dbg_qp(rx_qp(srx), "page %p, bytes=%u\n", p, bytes); dest = kmap_atomic(p); rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off, @@ -105,11 +105,11 @@ static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len) { int rv; - siw_dbg_qp(rx_qp(srx), "kva: 0x%pK, len: %u\n", kva, len); + siw_dbg_qp(rx_qp(srx), "kva: 0x%p, len: %u\n", kva, len); rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len); if (unlikely(rv)) { - pr_warn("siw: [QP %u]: %s, len %d, kva 0x%pK, rv %d\n", + pr_warn("siw: [QP %u]: %s, len %d, kva 0x%p, rv %d\n", qp_id(rx_qp(srx)), __func__, len, kva, rv); return rv; diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index fd7b266a221b..2b2a7b8e93b0 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -936,7 +936,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, rv = -EINVAL; break; } - siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n", + siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%p\n", sqe->opcode, sqe->flags, (void *)(uintptr_t)sqe->id); @@ -1102,7 +1102,7 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr, siw_dbg_qp(qp, "error %d\n", rv); *bad_wr = wr; } - return rv > 0 ? 0 : rv; + return rv; } int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata) @@ -1332,7 +1332,7 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, struct siw_device *sdev = to_siw_dev(pd->device); int rv; - siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n", + siw_dbg_pd(pd, "start: 0x%p, va: 0x%p, len: %llu\n", (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va, (unsigned long long)len); @@ -1525,7 +1525,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle, mem->len = base_mr->length; mem->va = base_mr->iova; siw_dbg_mem(mem, - "%llu bytes, start 0x%pK, %u SLE to %u entries\n", + "%llu bytes, start 0x%p, %u SLE to %u entries\n", mem->len, (void *)(uintptr_t)mem->va, num_sle, pbl->num_buf); } diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index cd750f512dee..0a33d995d15d 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -192,6 +192,7 @@ config MSM_IOMMU If unsure, say N here. source "drivers/iommu/amd/Kconfig" +source "drivers/iommu/arm/Kconfig" source "drivers/iommu/intel/Kconfig" source "drivers/iommu/iommufd/Kconfig" source "drivers/iommu/riscv/Kconfig" @@ -199,7 +200,6 @@ source "drivers/iommu/riscv/Kconfig" config IRQ_REMAP bool "Support for Interrupt Remapping" depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI - select DMAR_TABLE if INTEL_IOMMU help Supports Interrupt remapping for IO-APIC and MSI devices. To use x2apic mode in the CPU's which support x2APIC enhancements or @@ -314,150 +314,6 @@ config APPLE_DART Say Y here if you are using an Apple SoC. -# ARM IOMMU support -config ARM_SMMU - tristate "ARM Ltd. System MMU (SMMU) Support" - depends on ARM64 || ARM || COMPILE_TEST - depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE - select IOMMU_API - select IOMMU_IO_PGTABLE_LPAE - select ARM_DMA_USE_IOMMU if ARM - help - Support for implementations of the ARM System MMU architecture - versions 1 and 2. - - Say Y here if your SoC includes an IOMMU device implementing - the ARM SMMU architecture. - -config ARM_SMMU_LEGACY_DT_BINDINGS - bool "Support the legacy \"mmu-masters\" devicetree bindings" - depends on ARM_SMMU=y && OF - help - Support for the badly designed and deprecated "mmu-masters" - devicetree bindings. This allows some DMA masters to attach - to the SMMU but does not provide any support via the DMA API. - If you're lucky, you might be able to get VFIO up and running. - - If you say Y here then you'll make me very sad. Instead, say N - and move your firmware to the utopian future that was 2016. - -config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT - bool "Default to disabling bypass on ARM SMMU v1 and v2" - depends on ARM_SMMU - default y - help - Say Y here to (by default) disable bypass streams such that - incoming transactions from devices that are not attached to - an iommu domain will report an abort back to the device and - will not be allowed to pass through the SMMU. - - Any old kernels that existed before this KConfig was - introduced would default to _allowing_ bypass (AKA the - equivalent of NO for this config). However the default for - this option is YES because the old behavior is insecure. - - There are few reasons to allow unmatched stream bypass, and - even fewer good ones. If saying YES here breaks your board - you should work on fixing your board. This KConfig option - is expected to be removed in the future and we'll simply - hardcode the bypass disable in the code. - - NOTE: the kernel command line parameter - 'arm-smmu.disable_bypass' will continue to override this - config. - -config ARM_SMMU_MMU_500_CPRE_ERRATA - bool "Enable errata workaround for CPRE in SMMU reset path" - depends on ARM_SMMU - default y - help - Say Y here (by default) to apply workaround to disable - MMU-500's next-page prefetcher for sake of 4 known errata. - - Say N here only when it is sure that any errata related to - prefetch enablement are not applicable on the platform. - Refer silicon-errata.rst for info on errata IDs. - -config ARM_SMMU_QCOM - def_tristate y - depends on ARM_SMMU && ARCH_QCOM - select QCOM_SCM - help - When running on a Qualcomm platform that has the custom variant - of the ARM SMMU, this needs to be built into the SMMU driver. - -config ARM_SMMU_QCOM_DEBUG - bool "ARM SMMU QCOM implementation defined debug support" - depends on ARM_SMMU_QCOM=y - help - Support for implementation specific debug features in ARM SMMU - hardware found in QTI platforms. This include support for - the Translation Buffer Units (TBU) that can be used to obtain - additional information when debugging memory management issues - like context faults. - - Say Y here to enable debug for issues such as context faults - or TLB sync timeouts which requires implementation defined - register dumps. - -config ARM_SMMU_V3 - tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support" - depends on ARM64 - select IOMMU_API - select IOMMU_IO_PGTABLE_LPAE - select GENERIC_MSI_IRQ - select IOMMUFD_DRIVER if IOMMUFD - help - Support for implementations of the ARM System MMU architecture - version 3 providing translation support to a PCIe root complex. - - Say Y here if your system includes an IOMMU device implementing - the ARM SMMUv3 architecture. - -if ARM_SMMU_V3 -config ARM_SMMU_V3_SVA - bool "Shared Virtual Addressing support for the ARM SMMUv3" - select IOMMU_SVA - select IOMMU_IOPF - select MMU_NOTIFIER - help - Support for sharing process address spaces with devices using the - SMMUv3. - - Say Y here if your system supports SVA extensions such as PCIe PASID - and PRI. - -config ARM_SMMU_V3_IOMMUFD - bool "Enable IOMMUFD features for ARM SMMUv3 (EXPERIMENTAL)" - depends on IOMMUFD - help - Support for IOMMUFD features intended to support virtual machines - with accelerated virtual IOMMUs. - - Say Y here if you are doing development and testing on this feature. - -config ARM_SMMU_V3_KUNIT_TEST - tristate "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS - depends on KUNIT - depends on ARM_SMMU_V3_SVA - default KUNIT_ALL_TESTS - help - Enable this option to unit-test arm-smmu-v3 driver functions. - - If unsure, say N. - -config TEGRA241_CMDQV - bool "NVIDIA Tegra241 CMDQ-V extension support for ARM SMMUv3" - depends on ACPI - help - Support for NVIDIA CMDQ-Virtualization extension for ARM SMMUv3. The - CMDQ-V extension is similar to v3.3 ECMDQ for multi command queues - support, except with virtualization capabilities. - - Say Y here if your system is NVIDIA Tegra241 (Grace) or it has the same - CMDQ-V extension. -endif - config S390_IOMMU def_bool y if S390 && PCI depends on S390 && PCI @@ -494,18 +350,6 @@ config MTK_IOMMU_V1 if unsure, say N here. -config QCOM_IOMMU - # Note: iommu drivers cannot (yet?) be built as modules - bool "Qualcomm IOMMU Support" - depends on ARCH_QCOM || COMPILE_TEST - depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE - select QCOM_SCM - select IOMMU_API - select IOMMU_IO_PGTABLE_LPAE - select ARM_DMA_USE_IOMMU - help - Support for IOMMU on certain Qualcomm SoCs. - config HYPERV_IOMMU bool "Hyper-V IRQ Handling" depends on HYPERV && X86 diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 5e5a83c6c2aa..355294fa9033 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -1,6 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += amd/ intel/ arm/ iommufd/ riscv/ +obj-y += arm/ iommufd/ +obj-$(CONFIG_AMD_IOMMU) += amd/ +obj-$(CONFIG_INTEL_IOMMU) += intel/ +obj-$(CONFIG_RISCV_IOMMU) += riscv/ obj-$(CONFIG_IOMMU_API) += iommu.o +obj-$(CONFIG_IOMMU_SUPPORT) += iommu-pages.o obj-$(CONFIG_IOMMU_API) += iommu-traces.o obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o obj-$(CONFIG_IOMMU_DEBUGFS) += iommu-debugfs.o diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile index 9de33b2d42f5..59c04a67f398 100644 --- a/drivers/iommu/amd/Makefile +++ b/drivers/iommu/amd/Makefile @@ -1,3 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o ppr.o pasid.o +obj-y += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o ppr.o pasid.o obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 220c598b7e14..29a8864381c3 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -147,6 +147,8 @@ static inline int get_pci_sbdf_id(struct pci_dev *pdev) return PCI_SEG_DEVID_TO_SBDF(seg, devid); } +bool amd_iommu_ht_range_ignore(void); + /* * This must be called after device probe completes. During probe * use rlookup_amd_iommu() get the iommu. diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 5089b58e528a..ccbab3a4811a 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -29,8 +29,6 @@ * some size calculation constants */ #define DEV_TABLE_ENTRY_SIZE 32 -#define ALIAS_TABLE_ENTRY_SIZE 2 -#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) /* Capability offsets used by the driver */ #define MMIO_CAP_HDR_OFFSET 0x00 @@ -111,6 +109,7 @@ #define FEATURE_SNPAVICSUP GENMASK_ULL(7, 5) #define FEATURE_SNPAVICSUP_GAM(x) \ (FIELD_GET(FEATURE_SNPAVICSUP, x) == 0x1) +#define FEATURE_HT_RANGE_IGNORE BIT_ULL(11) #define FEATURE_NUM_INT_REMAP_SUP GENMASK_ULL(9, 8) #define FEATURE_NUM_INT_REMAP_SUP_2K(x) \ @@ -316,6 +315,7 @@ #define DTE_IRQ_REMAP_INTCTL (2ULL << 60) #define DTE_IRQ_REMAP_ENABLE 1ULL +#define DTE_INTTAB_ALIGNMENT 128 #define DTE_INTTABLEN_MASK (0xfULL << 1) #define DTE_INTTABLEN_VALUE_512 9ULL #define DTE_INTTABLEN_512 (DTE_INTTABLEN_VALUE_512 << 1) @@ -616,12 +616,6 @@ struct amd_iommu_pci_seg { /* Size of the device table */ u32 dev_table_size; - /* Size of the alias table */ - u32 alias_table_size; - - /* Size of the rlookup table */ - u32 rlookup_table_size; - /* * device table virtual address * diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 14aa0d77df26..c06b62f87b9b 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -243,17 +243,14 @@ static void init_translation_status(struct amd_iommu *iommu) iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; } -static inline unsigned long tbl_size(int entry_size, int last_bdf) +int amd_iommu_get_num_iommus(void) { - unsigned shift = PAGE_SHIFT + - get_order((last_bdf + 1) * entry_size); - - return 1UL << shift; + return amd_iommus_present; } -int amd_iommu_get_num_iommus(void) +bool amd_iommu_ht_range_ignore(void) { - return amd_iommus_present; + return check_feature2(FEATURE_HT_RANGE_IGNORE); } /* @@ -634,8 +631,8 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_ /* Allocate per PCI segment device table */ static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg) { - pci_seg->dev_table = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32, - get_order(pci_seg->dev_table_size)); + pci_seg->dev_table = iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32, + pci_seg->dev_table_size); if (!pci_seg->dev_table) return -ENOMEM; @@ -644,16 +641,16 @@ static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg) static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg) { - iommu_free_pages(pci_seg->dev_table, - get_order(pci_seg->dev_table_size)); + iommu_free_pages(pci_seg->dev_table); pci_seg->dev_table = NULL; } /* Allocate per PCI segment IOMMU rlookup table. */ static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg) { - pci_seg->rlookup_table = iommu_alloc_pages(GFP_KERNEL, - get_order(pci_seg->rlookup_table_size)); + pci_seg->rlookup_table = kvcalloc(pci_seg->last_bdf + 1, + sizeof(*pci_seg->rlookup_table), + GFP_KERNEL); if (pci_seg->rlookup_table == NULL) return -ENOMEM; @@ -662,17 +659,15 @@ static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg) static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg) { - iommu_free_pages(pci_seg->rlookup_table, - get_order(pci_seg->rlookup_table_size)); + kvfree(pci_seg->rlookup_table); pci_seg->rlookup_table = NULL; } static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg) { - pci_seg->irq_lookup_table = iommu_alloc_pages(GFP_KERNEL, - get_order(pci_seg->rlookup_table_size)); - kmemleak_alloc(pci_seg->irq_lookup_table, - pci_seg->rlookup_table_size, 1, GFP_KERNEL); + pci_seg->irq_lookup_table = kvcalloc(pci_seg->last_bdf + 1, + sizeof(*pci_seg->irq_lookup_table), + GFP_KERNEL); if (pci_seg->irq_lookup_table == NULL) return -ENOMEM; @@ -681,9 +676,7 @@ static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_se static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg) { - kmemleak_free(pci_seg->irq_lookup_table); - iommu_free_pages(pci_seg->irq_lookup_table, - get_order(pci_seg->rlookup_table_size)); + kvfree(pci_seg->irq_lookup_table); pci_seg->irq_lookup_table = NULL; } @@ -691,8 +684,9 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg) { int i; - pci_seg->alias_table = iommu_alloc_pages(GFP_KERNEL, - get_order(pci_seg->alias_table_size)); + pci_seg->alias_table = kvmalloc_array(pci_seg->last_bdf + 1, + sizeof(*pci_seg->alias_table), + GFP_KERNEL); if (!pci_seg->alias_table) return -ENOMEM; @@ -707,8 +701,7 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg) static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg) { - iommu_free_pages(pci_seg->alias_table, - get_order(pci_seg->alias_table_size)); + kvfree(pci_seg->alias_table); pci_seg->alias_table = NULL; } @@ -719,8 +712,7 @@ static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg) */ static int __init alloc_command_buffer(struct amd_iommu *iommu) { - iommu->cmd_buf = iommu_alloc_pages(GFP_KERNEL, - get_order(CMD_BUFFER_SIZE)); + iommu->cmd_buf = iommu_alloc_pages_sz(GFP_KERNEL, CMD_BUFFER_SIZE); return iommu->cmd_buf ? 0 : -ENOMEM; } @@ -817,20 +809,22 @@ static void iommu_disable_command_buffer(struct amd_iommu *iommu) static void __init free_command_buffer(struct amd_iommu *iommu) { - iommu_free_pages(iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); + iommu_free_pages(iommu->cmd_buf); } void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp, size_t size) { - int order = get_order(size); - void *buf = iommu_alloc_pages(gfp, order); + void *buf; - if (buf && - check_feature(FEATURE_SNP) && - set_memory_4k((unsigned long)buf, (1 << order))) { - iommu_free_pages(buf, order); - buf = NULL; + size = PAGE_ALIGN(size); + buf = iommu_alloc_pages_sz(gfp, size); + if (!buf) + return NULL; + if (check_feature(FEATURE_SNP) && + set_memory_4k((unsigned long)buf, size / PAGE_SIZE)) { + iommu_free_pages(buf); + return NULL; } return buf; @@ -873,14 +867,14 @@ static void iommu_disable_event_buffer(struct amd_iommu *iommu) static void __init free_event_buffer(struct amd_iommu *iommu) { - iommu_free_pages(iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); + iommu_free_pages(iommu->evt_buf); } static void free_ga_log(struct amd_iommu *iommu) { #ifdef CONFIG_IRQ_REMAP - iommu_free_pages(iommu->ga_log, get_order(GA_LOG_SIZE)); - iommu_free_pages(iommu->ga_log_tail, get_order(8)); + iommu_free_pages(iommu->ga_log); + iommu_free_pages(iommu->ga_log_tail); #endif } @@ -925,11 +919,11 @@ static int iommu_init_ga_log(struct amd_iommu *iommu) if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) return 0; - iommu->ga_log = iommu_alloc_pages(GFP_KERNEL, get_order(GA_LOG_SIZE)); + iommu->ga_log = iommu_alloc_pages_sz(GFP_KERNEL, GA_LOG_SIZE); if (!iommu->ga_log) goto err_out; - iommu->ga_log_tail = iommu_alloc_pages(GFP_KERNEL, get_order(8)); + iommu->ga_log_tail = iommu_alloc_pages_sz(GFP_KERNEL, 8); if (!iommu->ga_log_tail) goto err_out; @@ -950,7 +944,7 @@ static int __init alloc_cwwb_sem(struct amd_iommu *iommu) static void __init free_cwwb_sem(struct amd_iommu *iommu) { if (iommu->cmd_sem) - iommu_free_page((void *)iommu->cmd_sem); + iommu_free_pages((void *)iommu->cmd_sem); } static void iommu_enable_xt(struct amd_iommu *iommu) @@ -1024,8 +1018,8 @@ static bool __copy_device_table(struct amd_iommu *iommu) if (!old_devtb) return false; - pci_seg->old_dev_tbl_cpy = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32, - get_order(pci_seg->dev_table_size)); + pci_seg->old_dev_tbl_cpy = iommu_alloc_pages_sz( + GFP_KERNEL | GFP_DMA32, pci_seg->dev_table_size); if (pci_seg->old_dev_tbl_cpy == NULL) { pr_err("Failed to allocate memory for copying old device table!\n"); memunmap(old_devtb); @@ -1599,9 +1593,9 @@ static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id, pci_seg->last_bdf = last_bdf; DUMP_printk("PCI segment : 0x%0x, last bdf : 0x%04x\n", id, last_bdf); - pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf); - pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf); - pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf); + pci_seg->dev_table_size = + max(roundup_pow_of_two((last_bdf + 1) * DEV_TABLE_ENTRY_SIZE), + SZ_4K); pci_seg->id = id; init_llist_head(&pci_seg->dev_data_list); @@ -2789,8 +2783,7 @@ static void early_enable_iommus(void) for_each_pci_segment(pci_seg) { if (pci_seg->old_dev_tbl_cpy != NULL) { - iommu_free_pages(pci_seg->old_dev_tbl_cpy, - get_order(pci_seg->dev_table_size)); + iommu_free_pages(pci_seg->old_dev_tbl_cpy); pci_seg->old_dev_tbl_cpy = NULL; } } @@ -2803,8 +2796,7 @@ static void early_enable_iommus(void) pr_info("Copied DEV table from previous kernel.\n"); for_each_pci_segment(pci_seg) { - iommu_free_pages(pci_seg->dev_table, - get_order(pci_seg->dev_table_size)); + iommu_free_pages(pci_seg->dev_table); pci_seg->dev_table = pci_seg->old_dev_tbl_cpy; } diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index 26cf562dde11..4d308c071134 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -47,14 +47,7 @@ static u64 *first_pte_l7(u64 *pte, unsigned long *page_size, return fpte; } -static void free_pt_page(u64 *pt, struct list_head *freelist) -{ - struct page *p = virt_to_page(pt); - - list_add_tail(&p->lru, freelist); -} - -static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl) +static void free_pt_lvl(u64 *pt, struct iommu_pages_list *freelist, int lvl) { u64 *p; int i; @@ -77,20 +70,20 @@ static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl) if (lvl > 2) free_pt_lvl(p, freelist, lvl - 1); else - free_pt_page(p, freelist); + iommu_pages_list_add(freelist, p); } - free_pt_page(pt, freelist); + iommu_pages_list_add(freelist, pt); } -static void free_sub_pt(u64 *root, int mode, struct list_head *freelist) +static void free_sub_pt(u64 *root, int mode, struct iommu_pages_list *freelist) { switch (mode) { case PAGE_MODE_NONE: case PAGE_MODE_7_LEVEL: break; case PAGE_MODE_1_LEVEL: - free_pt_page(root, freelist); + iommu_pages_list_add(freelist, root); break; case PAGE_MODE_2_LEVEL: case PAGE_MODE_3_LEVEL: @@ -121,7 +114,7 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable, bool ret = true; u64 *pte; - pte = iommu_alloc_page_node(cfg->amd.nid, gfp); + pte = iommu_alloc_pages_node_sz(cfg->amd.nid, gfp, SZ_4K); if (!pte) return false; @@ -146,7 +139,7 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable, out: spin_unlock_irqrestore(&domain->lock, flags); - iommu_free_page(pte); + iommu_free_pages(pte); return ret; } @@ -213,7 +206,8 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable, if (!IOMMU_PTE_PRESENT(__pte) || pte_level == PAGE_MODE_NONE) { - page = iommu_alloc_page_node(cfg->amd.nid, gfp); + page = iommu_alloc_pages_node_sz(cfg->amd.nid, gfp, + SZ_4K); if (!page) return NULL; @@ -222,7 +216,7 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable, /* pte could have been changed somewhere. */ if (!try_cmpxchg64(pte, &__pte, __npte)) - iommu_free_page(page); + iommu_free_pages(page); else if (IOMMU_PTE_PRESENT(__pte)) *updated = true; @@ -299,7 +293,8 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable, return pte; } -static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist) +static void free_clear_pte(u64 *pte, u64 pteval, + struct iommu_pages_list *freelist) { u64 *pt; int mode; @@ -328,7 +323,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova, int prot, gfp_t gfp, size_t *mapped) { struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops); - LIST_HEAD(freelist); + struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist); bool updated = false; u64 __pte, *pte; int ret, i, count; @@ -353,7 +348,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova, for (i = 0; i < count; ++i) free_clear_pte(&pte[i], pte[i], &freelist); - if (!list_empty(&freelist)) + if (!iommu_pages_list_empty(&freelist)) updated = true; if (count > 1) { @@ -524,7 +519,7 @@ static int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops, static void v1_free_pgtable(struct io_pgtable *iop) { struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl); - LIST_HEAD(freelist); + struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist); if (pgtable->mode == PAGE_MODE_NONE) return; @@ -541,7 +536,8 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo { struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg); - pgtable->root = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL); + pgtable->root = + iommu_alloc_pages_node_sz(cfg->amd.nid, GFP_KERNEL, SZ_4K); if (!pgtable->root) return NULL; pgtable->mode = PAGE_MODE_3_LEVEL; diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c index a56a27396305..b47941353ccb 100644 --- a/drivers/iommu/amd/io_pgtable_v2.c +++ b/drivers/iommu/amd/io_pgtable_v2.c @@ -121,10 +121,10 @@ static void free_pgtable(u64 *pt, int level) if (level > 2) free_pgtable(p, level - 1); else - iommu_free_page(p); + iommu_free_pages(p); } - iommu_free_page(pt); + iommu_free_pages(pt); } /* Allocate page table */ @@ -152,14 +152,14 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova, } if (!IOMMU_PTE_PRESENT(__pte)) { - page = iommu_alloc_page_node(nid, gfp); + page = iommu_alloc_pages_node_sz(nid, gfp, SZ_4K); if (!page) return NULL; __npte = set_pgtable_attr(page); /* pte could have been changed somewhere. */ if (!try_cmpxchg64(pte, &__pte, __npte)) - iommu_free_page(page); + iommu_free_pages(page); else if (IOMMU_PTE_PRESENT(__pte)) *updated = true; @@ -181,7 +181,7 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova, if (pg_size == IOMMU_PAGE_SIZE_1G) free_pgtable(__pte, end_level - 1); else if (pg_size == IOMMU_PAGE_SIZE_2M) - iommu_free_page(__pte); + iommu_free_pages(__pte); } return pte; @@ -346,7 +346,7 @@ static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg); int ias = IOMMU_IN_ADDR_BIT_SIZE; - pgtable->pgd = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL); + pgtable->pgd = iommu_alloc_pages_node_sz(cfg->amd.nid, GFP_KERNEL, SZ_4K); if (!pgtable->pgd) return NULL; diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index f34209b08b4c..3117d99cf83d 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -241,7 +241,9 @@ static inline int get_acpihid_device_id(struct device *dev, struct acpihid_map_entry **entry) { struct acpi_device *adev = ACPI_COMPANION(dev); - struct acpihid_map_entry *p; + struct acpihid_map_entry *p, *p1 = NULL; + int hid_count = 0; + bool fw_bug; if (!adev) return -ENODEV; @@ -249,12 +251,33 @@ static inline int get_acpihid_device_id(struct device *dev, list_for_each_entry(p, &acpihid_map, list) { if (acpi_dev_hid_uid_match(adev, p->hid, p->uid[0] ? p->uid : NULL)) { - if (entry) - *entry = p; - return p->devid; + p1 = p; + fw_bug = false; + hid_count = 1; + break; + } + + /* + * Count HID matches w/o UID, raise FW_BUG but allow exactly one match + */ + if (acpi_dev_hid_match(adev, p->hid)) { + p1 = p; + hid_count++; + fw_bug = true; } } - return -EINVAL; + + if (!p1) + return -EINVAL; + if (fw_bug) + dev_err_once(dev, FW_BUG "No ACPI device matched UID, but %d device%s matched HID.\n", + hid_count, hid_count > 1 ? "s" : ""); + if (hid_count > 1) + return -EINVAL; + if (entry) + *entry = p1; + + return p1->devid; } static inline int get_device_sbdf_id(struct device *dev) @@ -982,6 +1005,14 @@ int amd_iommu_register_ga_log_notifier(int (*notifier)(u32)) { iommu_ga_log_notifier = notifier; + /* + * Ensure all in-flight IRQ handlers run to completion before returning + * to the caller, e.g. to ensure module code isn't unloaded while it's + * being executed in the IRQ handler. + */ + if (!notifier) + synchronize_rcu(); + return 0; } EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier); @@ -1812,7 +1843,7 @@ static void free_gcr3_tbl_level1(u64 *tbl) ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK); - iommu_free_page(ptr); + iommu_free_pages(ptr); } } @@ -1845,7 +1876,7 @@ static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info) /* Free per device domain ID */ pdom_id_free(gcr3_info->domid); - iommu_free_page(gcr3_info->gcr3_tbl); + iommu_free_pages(gcr3_info->gcr3_tbl); gcr3_info->gcr3_tbl = NULL; } @@ -1884,7 +1915,7 @@ static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info, return -ENOSPC; gcr3_info->domid = domid; - gcr3_info->gcr3_tbl = iommu_alloc_page_node(nid, GFP_ATOMIC); + gcr3_info->gcr3_tbl = iommu_alloc_pages_node_sz(nid, GFP_ATOMIC, SZ_4K); if (gcr3_info->gcr3_tbl == NULL) { pdom_id_free(domid); return -ENOMEM; @@ -2908,6 +2939,9 @@ static void amd_iommu_get_resv_regions(struct device *dev, return; list_add_tail(®ion->list, head); + if (amd_iommu_ht_range_ignore()) + return; + region = iommu_alloc_resv_region(HT_RANGE_START, HT_RANGE_END - HT_RANGE_START + 1, 0, IOMMU_RESV_RESERVED, GFP_KERNEL); @@ -2984,38 +3018,6 @@ static const struct iommu_dirty_ops amd_dirty_ops = { .read_and_clear_dirty = amd_iommu_read_and_clear_dirty, }; -static int amd_iommu_dev_enable_feature(struct device *dev, - enum iommu_dev_features feat) -{ - int ret = 0; - - switch (feat) { - case IOMMU_DEV_FEAT_IOPF: - case IOMMU_DEV_FEAT_SVA: - break; - default: - ret = -EINVAL; - break; - } - return ret; -} - -static int amd_iommu_dev_disable_feature(struct device *dev, - enum iommu_dev_features feat) -{ - int ret = 0; - - switch (feat) { - case IOMMU_DEV_FEAT_IOPF: - case IOMMU_DEV_FEAT_SVA: - break; - default: - ret = -EINVAL; - break; - } - return ret; -} - const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, .blocked_domain = &blocked_domain, @@ -3029,8 +3031,6 @@ const struct iommu_ops amd_iommu_ops = { .get_resv_regions = amd_iommu_get_resv_regions, .is_attach_deferred = amd_iommu_is_attach_deferred, .def_domain_type = amd_iommu_def_domain_type, - .dev_enable_feat = amd_iommu_dev_enable_feature, - .dev_disable_feat = amd_iommu_dev_disable_feature, .page_response = amd_iommu_page_response, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = amd_iommu_attach_device, @@ -3129,7 +3129,7 @@ static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid) return table; } -static struct irq_remap_table *__alloc_irq_table(int nid, int order) +static struct irq_remap_table *__alloc_irq_table(int nid, size_t size) { struct irq_remap_table *table; @@ -3137,7 +3137,8 @@ static struct irq_remap_table *__alloc_irq_table(int nid, int order) if (!table) return NULL; - table->table = iommu_alloc_pages_node(nid, GFP_KERNEL, order); + table->table = iommu_alloc_pages_node_sz( + nid, GFP_KERNEL, max(DTE_INTTAB_ALIGNMENT, size)); if (!table->table) { kfree(table); return NULL; @@ -3191,7 +3192,6 @@ static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu, struct irq_remap_table *new_table = NULL; struct amd_iommu_pci_seg *pci_seg; unsigned long flags; - int order = get_order(get_irq_table_size(max_irqs)); int nid = iommu && iommu->dev ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE; u16 alias; @@ -3211,7 +3211,7 @@ static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu, spin_unlock_irqrestore(&iommu_table_lock, flags); /* Nothing there yet, allocate new irq remapping table */ - new_table = __alloc_irq_table(nid, order); + new_table = __alloc_irq_table(nid, get_irq_table_size(max_irqs)); if (!new_table) return NULL; @@ -3246,7 +3246,7 @@ out_unlock: spin_unlock_irqrestore(&iommu_table_lock, flags); if (new_table) { - iommu_free_pages(new_table->table, order); + iommu_free_pages(new_table->table); kfree(new_table); } return table; diff --git a/drivers/iommu/amd/ppr.c b/drivers/iommu/amd/ppr.c index 7c67d69f0b8c..e6767c057d01 100644 --- a/drivers/iommu/amd/ppr.c +++ b/drivers/iommu/amd/ppr.c @@ -48,7 +48,7 @@ void amd_iommu_enable_ppr_log(struct amd_iommu *iommu) void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu) { - iommu_free_pages(iommu->ppr_log, get_order(PPR_LOG_SIZE)); + iommu_free_pages(iommu->ppr_log); } /* diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c index e13501541fdd..757d24f67ad4 100644 --- a/drivers/iommu/apple-dart.c +++ b/drivers/iommu/apple-dart.c @@ -776,8 +776,7 @@ static void apple_dart_domain_free(struct iommu_domain *domain) { struct apple_dart_domain *dart_domain = to_dart_domain(domain); - if (dart_domain->pgtbl_ops) - free_io_pgtable_ops(dart_domain->pgtbl_ops); + free_io_pgtable_ops(dart_domain->pgtbl_ops); kfree(dart_domain); } diff --git a/drivers/iommu/arm/Kconfig b/drivers/iommu/arm/Kconfig new file mode 100644 index 000000000000..ef42bbe07dbe --- /dev/null +++ b/drivers/iommu/arm/Kconfig @@ -0,0 +1,144 @@ +# SPDX-License-Identifier: GPL-2.0-only +# ARM IOMMU support +config ARM_SMMU + tristate "ARM Ltd. System MMU (SMMU) Support" + depends on ARM64 || ARM || COMPILE_TEST + depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE + select IOMMU_API + select IOMMU_IO_PGTABLE_LPAE + select ARM_DMA_USE_IOMMU if ARM + help + Support for implementations of the ARM System MMU architecture + versions 1 and 2. + + Say Y here if your SoC includes an IOMMU device implementing + the ARM SMMU architecture. + +if ARM_SMMU +config ARM_SMMU_LEGACY_DT_BINDINGS + bool "Support the legacy \"mmu-masters\" devicetree bindings" + depends on ARM_SMMU=y && OF + help + Support for the badly designed and deprecated "mmu-masters" + devicetree bindings. This allows some DMA masters to attach + to the SMMU but does not provide any support via the DMA API. + If you're lucky, you might be able to get VFIO up and running. + + If you say Y here then you'll make me very sad. Instead, say N + and move your firmware to the utopian future that was 2016. + +config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT + bool "Disable unmatched stream bypass by default" if EXPERT + default y + help + If your firmware is broken and fails to describe StreamIDs which + Linux should know about in order to manage the SMMU correctly and + securely, and you don't want to boot with the 'arm-smmu.disable_bypass=0' + command line parameter, then as a last resort you can turn it off + by default here. But don't. This option may be removed at any time. + + Note that 'arm-smmu.disable_bypass=1' will still take precedence. + +config ARM_SMMU_MMU_500_CPRE_ERRATA + bool "Enable errata workaround for CPRE in SMMU reset path" + default y + help + Say Y here (by default) to apply workaround to disable + MMU-500's next-page prefetcher for sake of 4 known errata. + + Say N here only when it is sure that any errata related to + prefetch enablement are not applicable on the platform. + Refer silicon-errata.rst for info on errata IDs. + +config ARM_SMMU_QCOM + def_tristate y + depends on ARCH_QCOM + select QCOM_SCM + help + When running on a Qualcomm platform that has the custom variant + of the ARM SMMU, this needs to be built into the SMMU driver. + +config ARM_SMMU_QCOM_DEBUG + bool "ARM SMMU QCOM implementation defined debug support" + depends on ARM_SMMU_QCOM=y + help + Support for implementation specific debug features in ARM SMMU + hardware found in QTI platforms. This include support for + the Translation Buffer Units (TBU) that can be used to obtain + additional information when debugging memory management issues + like context faults. + + Say Y here to enable debug for issues such as context faults + or TLB sync timeouts which requires implementation defined + register dumps. +endif + +config ARM_SMMU_V3 + tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support" + depends on ARM64 + select IOMMU_API + select IOMMU_IO_PGTABLE_LPAE + select GENERIC_MSI_IRQ + select IOMMUFD_DRIVER if IOMMUFD + help + Support for implementations of the ARM System MMU architecture + version 3 providing translation support to a PCIe root complex. + + Say Y here if your system includes an IOMMU device implementing + the ARM SMMUv3 architecture. + +if ARM_SMMU_V3 +config ARM_SMMU_V3_SVA + bool "Shared Virtual Addressing support for the ARM SMMUv3" + select IOMMU_SVA + select IOMMU_IOPF + select MMU_NOTIFIER + help + Support for sharing process address spaces with devices using the + SMMUv3. + + Say Y here if your system supports SVA extensions such as PCIe PASID + and PRI. + +config ARM_SMMU_V3_IOMMUFD + bool "Enable IOMMUFD features for ARM SMMUv3 (EXPERIMENTAL)" + depends on IOMMUFD + help + Support for IOMMUFD features intended to support virtual machines + with accelerated virtual IOMMUs. + + Say Y here if you are doing development and testing on this feature. + +config ARM_SMMU_V3_KUNIT_TEST + tristate "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS + depends on KUNIT + depends on ARM_SMMU_V3_SVA + default KUNIT_ALL_TESTS + help + Enable this option to unit-test arm-smmu-v3 driver functions. + + If unsure, say N. + +config TEGRA241_CMDQV + bool "NVIDIA Tegra241 CMDQ-V extension support for ARM SMMUv3" + depends on ACPI + help + Support for NVIDIA CMDQ-Virtualization extension for ARM SMMUv3. The + CMDQ-V extension is similar to v3.3 ECMDQ for multi command queues + support, except with virtualization capabilities. + + Say Y here if your system is NVIDIA Tegra241 (Grace) or it has the same + CMDQ-V extension. +endif + +config QCOM_IOMMU + # Note: iommu drivers cannot (yet?) be built as modules + bool "Qualcomm IOMMU Support" + depends on ARCH_QCOM || COMPILE_TEST + depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE + select QCOM_SCM + select IOMMU_API + select IOMMU_IO_PGTABLE_LPAE + select ARM_DMA_USE_IOMMU + help + Support for IOMMU on certain Qualcomm SoCs. diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index 980cc6b33c43..0601dece0a0d 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -13,8 +13,6 @@ #include "arm-smmu-v3.h" #include "../../io-pgtable-arm.h" -static DEFINE_MUTEX(sva_lock); - static void __maybe_unused arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain) { @@ -257,84 +255,6 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) return true; } -bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master) -{ - /* We're not keeping track of SIDs in fault events */ - if (master->num_streams != 1) - return false; - - return master->stall_enabled; -} - -bool arm_smmu_master_sva_supported(struct arm_smmu_master *master) -{ - if (!(master->smmu->features & ARM_SMMU_FEAT_SVA)) - return false; - - /* SSID support is mandatory for the moment */ - return master->ssid_bits; -} - -bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master) -{ - bool enabled; - - mutex_lock(&sva_lock); - enabled = master->sva_enabled; - mutex_unlock(&sva_lock); - return enabled; -} - -static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master) -{ - struct device *dev = master->dev; - - /* - * Drivers for devices supporting PRI or stall should enable IOPF first. - * Others have device-specific fault handlers and don't need IOPF. - */ - if (!arm_smmu_master_iopf_supported(master)) - return 0; - - if (!master->iopf_enabled) - return -EINVAL; - - return iopf_queue_add_device(master->smmu->evtq.iopf, dev); -} - -static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master) -{ - struct device *dev = master->dev; - - if (!master->iopf_enabled) - return; - - iopf_queue_remove_device(master->smmu->evtq.iopf, dev); -} - -int arm_smmu_master_enable_sva(struct arm_smmu_master *master) -{ - int ret; - - mutex_lock(&sva_lock); - ret = arm_smmu_master_sva_enable_iopf(master); - if (!ret) - master->sva_enabled = true; - mutex_unlock(&sva_lock); - - return ret; -} - -int arm_smmu_master_disable_sva(struct arm_smmu_master *master) -{ - mutex_lock(&sva_lock); - arm_smmu_master_sva_disable_iopf(master); - master->sva_enabled = false; - mutex_unlock(&sva_lock); - - return 0; -} - void arm_smmu_sva_notifier_synchronize(void) { /* @@ -353,6 +273,9 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain, struct arm_smmu_cd target; int ret; + if (!(master->smmu->features & ARM_SMMU_FEAT_SVA)) + return -EOPNOTSUPP; + /* Prevent arm_smmu_mm_release from being called while we are attaching */ if (!mmget_not_zero(domain->mm)) return -EINVAL; @@ -406,6 +329,9 @@ struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev, u32 asid; int ret; + if (!(master->smmu->features & ARM_SMMU_FEAT_SVA)) + return ERR_PTR(-EOPNOTSUPP); + smmu_domain = arm_smmu_domain_alloc(); if (IS_ERR(smmu_domain)) return ERR_CAST(smmu_domain); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 48d910399a1b..10cc6dc26b7b 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2720,6 +2720,7 @@ static void arm_smmu_disable_pasid(struct arm_smmu_master *master) static struct arm_smmu_master_domain * arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain, + struct iommu_domain *domain, struct arm_smmu_master *master, ioasid_t ssid, bool nested_ats_flush) { @@ -2730,6 +2731,7 @@ arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain, list_for_each_entry(master_domain, &smmu_domain->devices, devices_elm) { if (master_domain->master == master && + master_domain->domain == domain && master_domain->ssid == ssid && master_domain->nested_ats_flush == nested_ats_flush) return master_domain; @@ -2756,6 +2758,58 @@ to_smmu_domain_devices(struct iommu_domain *domain) return NULL; } +static int arm_smmu_enable_iopf(struct arm_smmu_master *master, + struct arm_smmu_master_domain *master_domain) +{ + int ret; + + iommu_group_mutex_assert(master->dev); + + if (!IS_ENABLED(CONFIG_ARM_SMMU_V3_SVA)) + return -EOPNOTSUPP; + + /* + * Drivers for devices supporting PRI or stall require iopf others have + * device-specific fault handlers and don't need IOPF, so this is not a + * failure. + */ + if (!master->stall_enabled) + return 0; + + /* We're not keeping track of SIDs in fault events */ + if (master->num_streams != 1) + return -EOPNOTSUPP; + + if (master->iopf_refcount) { + master->iopf_refcount++; + master_domain->using_iopf = true; + return 0; + } + + ret = iopf_queue_add_device(master->smmu->evtq.iopf, master->dev); + if (ret) + return ret; + master->iopf_refcount = 1; + master_domain->using_iopf = true; + return 0; +} + +static void arm_smmu_disable_iopf(struct arm_smmu_master *master, + struct arm_smmu_master_domain *master_domain) +{ + iommu_group_mutex_assert(master->dev); + + if (!IS_ENABLED(CONFIG_ARM_SMMU_V3_SVA)) + return; + + if (!master_domain || !master_domain->using_iopf) + return; + + master->iopf_refcount--; + if (master->iopf_refcount == 0) + iopf_queue_remove_device(master->smmu->evtq.iopf, master->dev); +} + static void arm_smmu_remove_master_domain(struct arm_smmu_master *master, struct iommu_domain *domain, ioasid_t ssid) @@ -2772,15 +2826,17 @@ static void arm_smmu_remove_master_domain(struct arm_smmu_master *master, nested_ats_flush = to_smmu_nested_domain(domain)->enable_ats; spin_lock_irqsave(&smmu_domain->devices_lock, flags); - master_domain = arm_smmu_find_master_domain(smmu_domain, master, ssid, - nested_ats_flush); + master_domain = arm_smmu_find_master_domain(smmu_domain, domain, master, + ssid, nested_ats_flush); if (master_domain) { list_del(&master_domain->devices_elm); - kfree(master_domain); if (master->ats_enabled) atomic_dec(&smmu_domain->nr_ats_masters); } spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); + + arm_smmu_disable_iopf(master, master_domain); + kfree(master_domain); } /* @@ -2853,12 +2909,19 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state, kfree(state->vmaster); return -ENOMEM; } + master_domain->domain = new_domain; master_domain->master = master; master_domain->ssid = state->ssid; if (new_domain->type == IOMMU_DOMAIN_NESTED) master_domain->nested_ats_flush = to_smmu_nested_domain(new_domain)->enable_ats; + if (new_domain->iopf_handler) { + ret = arm_smmu_enable_iopf(master, master_domain); + if (ret) + goto err_free_master_domain; + } + /* * During prepare we want the current smmu_domain and new * smmu_domain to be in the devices list before we change any @@ -2878,9 +2941,9 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state, !arm_smmu_master_canwbs(master)) { spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); - kfree(master_domain); kfree(state->vmaster); - return -EINVAL; + ret = -EINVAL; + goto err_iopf; } if (state->ats_enabled) @@ -2899,6 +2962,12 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state, wmb(); } return 0; + +err_iopf: + arm_smmu_disable_iopf(master, master_domain); +err_free_master_domain: + kfree(master_domain); + return ret; } /* @@ -2953,7 +3022,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) smmu = master->smmu; if (smmu_domain->smmu != smmu) - return ret; + return -EINVAL; if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { cdptr = arm_smmu_alloc_cd_ptr(master, IOMMU_NO_PASID); @@ -3510,8 +3579,7 @@ static void arm_smmu_release_device(struct device *dev) { struct arm_smmu_master *master = dev_iommu_priv_get(dev); - if (WARN_ON(arm_smmu_master_sva_enabled(master))) - iopf_queue_remove_device(master->smmu->evtq.iopf, dev); + WARN_ON(master->iopf_refcount); /* Put the STE back to what arm_smmu_init_strtab() sets */ if (dev->iommu->require_direct) @@ -3586,58 +3654,6 @@ static void arm_smmu_get_resv_regions(struct device *dev, iommu_dma_get_resv_regions(dev, head); } -static int arm_smmu_dev_enable_feature(struct device *dev, - enum iommu_dev_features feat) -{ - struct arm_smmu_master *master = dev_iommu_priv_get(dev); - - if (!master) - return -ENODEV; - - switch (feat) { - case IOMMU_DEV_FEAT_IOPF: - if (!arm_smmu_master_iopf_supported(master)) - return -EINVAL; - if (master->iopf_enabled) - return -EBUSY; - master->iopf_enabled = true; - return 0; - case IOMMU_DEV_FEAT_SVA: - if (!arm_smmu_master_sva_supported(master)) - return -EINVAL; - if (arm_smmu_master_sva_enabled(master)) - return -EBUSY; - return arm_smmu_master_enable_sva(master); - default: - return -EINVAL; - } -} - -static int arm_smmu_dev_disable_feature(struct device *dev, - enum iommu_dev_features feat) -{ - struct arm_smmu_master *master = dev_iommu_priv_get(dev); - - if (!master) - return -EINVAL; - - switch (feat) { - case IOMMU_DEV_FEAT_IOPF: - if (!master->iopf_enabled) - return -EINVAL; - if (master->sva_enabled) - return -EBUSY; - master->iopf_enabled = false; - return 0; - case IOMMU_DEV_FEAT_SVA: - if (!arm_smmu_master_sva_enabled(master)) - return -EINVAL; - return arm_smmu_master_disable_sva(master); - default: - return -EINVAL; - } -} - /* * HiSilicon PCIe tune and trace device can be used to trace TLP headers on the * PCIe link and save the data to memory by DMA. The hardware is restricted to @@ -3670,8 +3686,6 @@ static struct iommu_ops arm_smmu_ops = { .device_group = arm_smmu_device_group, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, - .dev_enable_feat = arm_smmu_dev_enable_feature, - .dev_disable_feat = arm_smmu_dev_disable_feature, .page_response = arm_smmu_page_response, .def_domain_type = arm_smmu_def_domain_type, .viommu_alloc = arm_vsmmu_alloc, diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index dd1ad56ce863..ea41d790463e 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -837,9 +837,8 @@ struct arm_smmu_master { bool ats_enabled : 1; bool ste_ats_enabled : 1; bool stall_enabled; - bool sva_enabled; - bool iopf_enabled; unsigned int ssid_bits; + unsigned int iopf_refcount; }; /* SMMU private data for an IOMMU domain */ @@ -915,8 +914,14 @@ void arm_smmu_make_sva_cd(struct arm_smmu_cd *target, struct arm_smmu_master_domain { struct list_head devices_elm; struct arm_smmu_master *master; + /* + * For nested domains the master_domain is threaded onto the S2 parent, + * this points to the IOMMU_DOMAIN_NESTED to disambiguate the masters. + */ + struct iommu_domain *domain; ioasid_t ssid; bool nested_ats_flush : 1; + bool using_iopf : 1; }; static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) @@ -995,11 +1000,6 @@ int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, #ifdef CONFIG_ARM_SMMU_V3_SVA bool arm_smmu_sva_supported(struct arm_smmu_device *smmu); -bool arm_smmu_master_sva_supported(struct arm_smmu_master *master); -bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master); -int arm_smmu_master_enable_sva(struct arm_smmu_master *master); -int arm_smmu_master_disable_sva(struct arm_smmu_master *master); -bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master); void arm_smmu_sva_notifier_synchronize(void); struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev, struct mm_struct *mm); @@ -1009,31 +1009,6 @@ static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) return false; } -static inline bool arm_smmu_master_sva_supported(struct arm_smmu_master *master) -{ - return false; -} - -static inline bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master) -{ - return false; -} - -static inline int arm_smmu_master_enable_sva(struct arm_smmu_master *master) -{ - return -ENODEV; -} - -static inline int arm_smmu_master_disable_sva(struct arm_smmu_master *master) -{ - return -ENODEV; -} - -static inline bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master) -{ - return false; -} - static inline void arm_smmu_sva_notifier_synchronize(void) {} #define arm_smmu_sva_domain_alloc NULL diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c index d03b2239baad..65e0ef6539fe 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c @@ -406,6 +406,12 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev) arm_smmu_print_context_fault_info(smmu, idx, &cfi); arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr); + + if (cfi.fsr & ARM_SMMU_CB_FSR_SS) { + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME, + ret == -EAGAIN ? 0 : ARM_SMMU_RESUME_TERMINATE); + } + return IRQ_HANDLED; } @@ -416,6 +422,9 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev) if (!tmp || tmp == -EBUSY) { ret = IRQ_HANDLED; resume = ARM_SMMU_RESUME_TERMINATE; + } else if (tmp == -EAGAIN) { + ret = IRQ_HANDLED; + resume = 0; } else { phys_addr_t phys_atos = qcom_smmu_verify_fault(smmu_domain, cfi.iova, cfi.fsr); diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index 59d02687280e..62874b18f645 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -112,25 +112,39 @@ static void qcom_adreno_smmu_set_stall(const void *cookie, bool enabled) { struct arm_smmu_domain *smmu_domain = (void *)cookie; struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - struct qcom_smmu *qsmmu = to_qcom_smmu(smmu_domain->smmu); + struct arm_smmu_device *smmu = smmu_domain->smmu; + struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); + u32 mask = BIT(cfg->cbndx); + bool stall_changed = !!(qsmmu->stall_enabled & mask) != enabled; + unsigned long flags; if (enabled) - qsmmu->stall_enabled |= BIT(cfg->cbndx); + qsmmu->stall_enabled |= mask; else - qsmmu->stall_enabled &= ~BIT(cfg->cbndx); -} + qsmmu->stall_enabled &= ~mask; -static void qcom_adreno_smmu_resume_translation(const void *cookie, bool terminate) -{ - struct arm_smmu_domain *smmu_domain = (void *)cookie; - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - struct arm_smmu_device *smmu = smmu_domain->smmu; - u32 reg = 0; + /* + * If the device is on and we changed the setting, update the register. + * The spec pseudocode says that CFCFG is resampled after a fault, and + * we believe that no implementations cache it in the TLB, so it should + * be safe to change it without a TLB invalidation. + */ + if (stall_changed && pm_runtime_get_if_active(smmu->dev) > 0) { + u32 reg; + + spin_lock_irqsave(&smmu_domain->cb_lock, flags); + reg = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_SCTLR); + + if (enabled) + reg |= ARM_SMMU_SCTLR_CFCFG; + else + reg &= ~ARM_SMMU_SCTLR_CFCFG; - if (terminate) - reg |= ARM_SMMU_RESUME_TERMINATE; + arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_SCTLR, reg); + spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); - arm_smmu_cb_write(smmu, cfg->cbndx, ARM_SMMU_CB_RESUME, reg); + pm_runtime_put_autosuspend(smmu->dev); + } } static void qcom_adreno_smmu_set_prr_bit(const void *cookie, bool set) @@ -337,7 +351,6 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain, priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg; priv->get_fault_info = qcom_adreno_smmu_get_fault_info; priv->set_stall = qcom_adreno_smmu_set_stall; - priv->resume_translation = qcom_adreno_smmu_resume_translation; priv->set_prr_bit = NULL; priv->set_prr_addr = NULL; @@ -356,6 +369,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { { .compatible = "qcom,mdp4" }, { .compatible = "qcom,mdss" }, { .compatible = "qcom,qcm2290-mdss" }, + { .compatible = "qcom,sar2130p-mdss" }, { .compatible = "qcom,sc7180-mdss" }, { .compatible = "qcom,sc7180-mss-pil" }, { .compatible = "qcom,sc7280-mdss" }, @@ -585,6 +599,7 @@ static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = { .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank, .write_sctlr = qcom_adreno_smmu_write_sctlr, .tlb_sync = qcom_smmu_tlb_sync, + .context_fault_needs_threaded_irq = true, }; static const struct arm_smmu_impl qcom_adreno_smmu_500_impl = { @@ -594,6 +609,7 @@ static const struct arm_smmu_impl qcom_adreno_smmu_500_impl = { .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank, .write_sctlr = qcom_adreno_smmu_write_sctlr, .tlb_sync = qcom_smmu_tlb_sync, + .context_fault_needs_threaded_irq = true, }; static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu, diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 8f439c265a23..8d95b14c7d5a 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -474,6 +474,12 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) arm_smmu_print_context_fault_info(smmu, idx, &cfi); arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr); + + if (cfi.fsr & ARM_SMMU_CB_FSR_SS) { + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME, + ret == -EAGAIN ? 0 : ARM_SMMU_RESUME_TERMINATE); + } + return IRQ_HANDLED; } diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 98f7205ec8fb..6c708fec48d1 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -106,7 +106,7 @@ early_param("iommu.forcedac", iommu_dma_forcedac_setup); struct iova_fq_entry { unsigned long iova_pfn; unsigned long pages; - struct list_head freelist; + struct iommu_pages_list freelist; u64 counter; /* Flush counter when this entry was added */ }; @@ -155,6 +155,8 @@ static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq fq->entries[idx].iova_pfn, fq->entries[idx].pages); + fq->entries[idx].freelist = + IOMMU_PAGES_LIST_INIT(fq->entries[idx].freelist); fq->head = (fq->head + 1) & fq->mod_mask; } } @@ -193,7 +195,7 @@ static void fq_flush_timeout(struct timer_list *t) static void queue_iova(struct iommu_dma_cookie *cookie, unsigned long pfn, unsigned long pages, - struct list_head *freelist) + struct iommu_pages_list *freelist) { struct iova_fq *fq; unsigned long flags; @@ -232,7 +234,7 @@ static void queue_iova(struct iommu_dma_cookie *cookie, fq->entries[idx].iova_pfn = pfn; fq->entries[idx].pages = pages; fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt); - list_splice(freelist, &fq->entries[idx].freelist); + iommu_pages_list_splice(freelist, &fq->entries[idx].freelist); spin_unlock_irqrestore(&fq->lock, flags); @@ -290,7 +292,8 @@ static void iommu_dma_init_one_fq(struct iova_fq *fq, size_t fq_size) spin_lock_init(&fq->lock); for (i = 0; i < fq_size; i++) - INIT_LIST_HEAD(&fq->entries[i].freelist); + fq->entries[i].freelist = + IOMMU_PAGES_LIST_INIT(fq->entries[i].freelist); } static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie) diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 317266aca6e2..fcb6a0f7c082 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -902,11 +902,11 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev) if (!domain) return NULL; - domain->pgtable = iommu_alloc_pages(GFP_KERNEL, 2); + domain->pgtable = iommu_alloc_pages_sz(GFP_KERNEL, SZ_16K); if (!domain->pgtable) goto err_pgtable; - domain->lv2entcnt = iommu_alloc_pages(GFP_KERNEL, 1); + domain->lv2entcnt = iommu_alloc_pages_sz(GFP_KERNEL, SZ_8K); if (!domain->lv2entcnt) goto err_counter; @@ -932,9 +932,9 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev) return &domain->domain; err_lv2ent: - iommu_free_pages(domain->lv2entcnt, 1); + iommu_free_pages(domain->lv2entcnt); err_counter: - iommu_free_pages(domain->pgtable, 2); + iommu_free_pages(domain->pgtable); err_pgtable: kfree(domain); return NULL; @@ -975,8 +975,8 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain) phys_to_virt(base)); } - iommu_free_pages(domain->pgtable, 2); - iommu_free_pages(domain->lv2entcnt, 1); + iommu_free_pages(domain->pgtable); + iommu_free_pages(domain->lv2entcnt); kfree(domain); } diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 30be786bff11..5f08523f97cb 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -64,7 +64,7 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain, spin_lock_irqsave(&iommu_lock, flags); ret = pamu_update_paace_stash(liodn, val); if (ret) { - pr_debug("Failed to update SPAACE for liodn %d\n ", liodn); + pr_debug("Failed to update SPAACE for liodn %d\n", liodn); spin_unlock_irqrestore(&iommu_lock, flags); return ret; } diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile index 6c7528130cf9..ada651c4a01b 100644 --- a/drivers/iommu/intel/Makefile +++ b/drivers/iommu/intel/Makefile @@ -1,11 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_DMAR_TABLE) += dmar.o -obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o nested.o cache.o prq.o -obj-$(CONFIG_DMAR_TABLE) += trace.o +obj-y += iommu.o pasid.o nested.o cache.o prq.o +obj-$(CONFIG_DMAR_TABLE) += dmar.o trace.o obj-$(CONFIG_DMAR_PERF) += perf.o obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o -ifdef CONFIG_INTEL_IOMMU obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o -endif obj-$(CONFIG_INTEL_IOMMU_PERF_EVENTS) += perfmon.o diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index e540092d664d..b61d9ea27aa9 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -1099,6 +1099,9 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) spin_lock_init(&iommu->device_rbtree_lock); mutex_init(&iommu->iopf_lock); iommu->node = NUMA_NO_NODE; + spin_lock_init(&iommu->lock); + ida_init(&iommu->domain_ida); + mutex_init(&iommu->did_lock); ver = readl(iommu->reg + DMAR_VER_REG); pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n", @@ -1187,7 +1190,7 @@ static void free_iommu(struct intel_iommu *iommu) } if (iommu->qi) { - iommu_free_page(iommu->qi->desc); + iommu_free_pages(iommu->qi->desc); kfree(iommu->qi->desc_status); kfree(iommu->qi); } @@ -1195,6 +1198,7 @@ static void free_iommu(struct intel_iommu *iommu) if (iommu->reg) unmap_iommu(iommu); + ida_destroy(&iommu->domain_ida); ida_free(&dmar_seq_ids, iommu->seq_id); kfree(iommu); } @@ -1681,7 +1685,6 @@ int dmar_enable_qi(struct intel_iommu *iommu) { struct q_inval *qi; void *desc; - int order; if (!ecap_qis(iommu->ecap)) return -ENOENT; @@ -1702,8 +1705,9 @@ int dmar_enable_qi(struct intel_iommu *iommu) * Need two pages to accommodate 256 descriptors of 256 bits each * if the remapping hardware supports scalable mode translation. */ - order = ecap_smts(iommu->ecap) ? 1 : 0; - desc = iommu_alloc_pages_node(iommu->node, GFP_ATOMIC, order); + desc = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC, + ecap_smts(iommu->ecap) ? SZ_8K : + SZ_4K); if (!desc) { kfree(qi); iommu->qi = NULL; @@ -1714,7 +1718,7 @@ int dmar_enable_qi(struct intel_iommu *iommu) qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC); if (!qi->desc_status) { - iommu_free_page(qi->desc); + iommu_free_pages(qi->desc); kfree(qi); iommu->qi = NULL; return -ENOMEM; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index cb0b993bebb4..7aa3932251b2 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -397,7 +397,8 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, if (!alloc) return NULL; - context = iommu_alloc_page_node(iommu->node, GFP_ATOMIC); + context = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC, + SZ_4K); if (!context) return NULL; @@ -571,17 +572,17 @@ static void free_context_table(struct intel_iommu *iommu) for (i = 0; i < ROOT_ENTRY_NR; i++) { context = iommu_context_addr(iommu, i, 0, 0); if (context) - iommu_free_page(context); + iommu_free_pages(context); if (!sm_supported(iommu)) continue; context = iommu_context_addr(iommu, i, 0x80, 0); if (context) - iommu_free_page(context); + iommu_free_pages(context); } - iommu_free_page(iommu->root_entry); + iommu_free_pages(iommu->root_entry); iommu->root_entry = NULL; } @@ -731,7 +732,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, if (!dma_pte_present(pte)) { uint64_t pteval, tmp; - tmp_page = iommu_alloc_page_node(domain->nid, gfp); + tmp_page = iommu_alloc_pages_node_sz(domain->nid, gfp, + SZ_4K); if (!tmp_page) return NULL; @@ -745,7 +747,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, tmp = 0ULL; if (!try_cmpxchg64(&pte->val, &tmp, pteval)) /* Someone else set it while we were thinking; use theirs. */ - iommu_free_page(tmp_page); + iommu_free_pages(tmp_page); else domain_flush_cache(domain, pte, sizeof(*pte)); } @@ -858,7 +860,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level, last_pfn < level_pfn + level_size(level) - 1)) { dma_clear_pte(pte); domain_flush_cache(domain, pte, sizeof(*pte)); - iommu_free_page(level_pte); + iommu_free_pages(level_pte); } next: pfn += level_size(level); @@ -882,7 +884,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, /* free pgd */ if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { - iommu_free_page(domain->pgd); + iommu_free_pages(domain->pgd); domain->pgd = NULL; } } @@ -894,18 +896,16 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, The 'pte' argument is the *parent* PTE, pointing to the page that is to be freed. */ static void dma_pte_list_pagetables(struct dmar_domain *domain, - int level, struct dma_pte *pte, - struct list_head *freelist) + int level, struct dma_pte *parent_pte, + struct iommu_pages_list *freelist) { - struct page *pg; + struct dma_pte *pte = phys_to_virt(dma_pte_addr(parent_pte)); - pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT); - list_add_tail(&pg->lru, freelist); + iommu_pages_list_add(freelist, pte); if (level == 1) return; - pte = page_address(pg); do { if (dma_pte_present(pte) && !dma_pte_superpage(pte)) dma_pte_list_pagetables(domain, level - 1, pte, freelist); @@ -916,7 +916,7 @@ static void dma_pte_list_pagetables(struct dmar_domain *domain, static void dma_pte_clear_level(struct dmar_domain *domain, int level, struct dma_pte *pte, unsigned long pfn, unsigned long start_pfn, unsigned long last_pfn, - struct list_head *freelist) + struct iommu_pages_list *freelist) { struct dma_pte *first_pte = NULL, *last_pte = NULL; @@ -961,7 +961,8 @@ next: the page tables, and may have cached the intermediate levels. The pages can only be freed after the IOTLB flush has been done. */ static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn, - unsigned long last_pfn, struct list_head *freelist) + unsigned long last_pfn, + struct iommu_pages_list *freelist) { if (WARN_ON(!domain_pfn_supported(domain, last_pfn)) || WARN_ON(start_pfn > last_pfn)) @@ -973,8 +974,7 @@ static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn, /* free pgd */ if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { - struct page *pgd_page = virt_to_page(domain->pgd); - list_add_tail(&pgd_page->lru, freelist); + iommu_pages_list_add(freelist, domain->pgd); domain->pgd = NULL; } } @@ -984,7 +984,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) { struct root_entry *root; - root = iommu_alloc_page_node(iommu->node, GFP_ATOMIC); + root = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC, SZ_4K); if (!root) { pr_err("Allocating root entry for %s failed\n", iommu->name); @@ -1289,52 +1289,13 @@ static void iommu_disable_translation(struct intel_iommu *iommu) raw_spin_unlock_irqrestore(&iommu->register_lock, flag); } -static int iommu_init_domains(struct intel_iommu *iommu) -{ - u32 ndomains; - - ndomains = cap_ndoms(iommu->cap); - pr_debug("%s: Number of Domains supported <%d>\n", - iommu->name, ndomains); - - spin_lock_init(&iommu->lock); - - iommu->domain_ids = bitmap_zalloc(ndomains, GFP_KERNEL); - if (!iommu->domain_ids) - return -ENOMEM; - - /* - * If Caching mode is set, then invalid translations are tagged - * with domain-id 0, hence we need to pre-allocate it. We also - * use domain-id 0 as a marker for non-allocated domain-id, so - * make sure it is not used for a real domain. - */ - set_bit(0, iommu->domain_ids); - - /* - * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid - * entry for first-level or pass-through translation modes should - * be programmed with a domain id different from those used for - * second-level or nested translation. We reserve a domain id for - * this purpose. This domain id is also used for identity domain - * in legacy mode. - */ - set_bit(FLPT_DEFAULT_DID, iommu->domain_ids); - - return 0; -} - static void disable_dmar_iommu(struct intel_iommu *iommu) { - if (!iommu->domain_ids) - return; - /* * All iommu domains must have been detached from the devices, * hence there should be no domain IDs in use. */ - if (WARN_ON(bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap)) - > NUM_RESERVED_DID)) + if (WARN_ON(!ida_is_empty(&iommu->domain_ida))) return; if (iommu->gcmd & DMA_GCMD_TE) @@ -1343,11 +1304,6 @@ static void disable_dmar_iommu(struct intel_iommu *iommu) static void free_dmar_iommu(struct intel_iommu *iommu) { - if (iommu->domain_ids) { - bitmap_free(iommu->domain_ids); - iommu->domain_ids = NULL; - } - if (iommu->copied_tables) { bitmap_free(iommu->copied_tables); iommu->copied_tables = NULL; @@ -1380,7 +1336,6 @@ static bool first_level_by_default(struct intel_iommu *iommu) int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) { struct iommu_domain_info *info, *curr; - unsigned long ndomains; int num, ret = -ENOSPC; if (domain->domain.type == IOMMU_DOMAIN_SVA) @@ -1390,40 +1345,36 @@ int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) if (!info) return -ENOMEM; - spin_lock(&iommu->lock); + guard(mutex)(&iommu->did_lock); curr = xa_load(&domain->iommu_array, iommu->seq_id); if (curr) { curr->refcnt++; - spin_unlock(&iommu->lock); kfree(info); return 0; } - ndomains = cap_ndoms(iommu->cap); - num = find_first_zero_bit(iommu->domain_ids, ndomains); - if (num >= ndomains) { + num = ida_alloc_range(&iommu->domain_ida, IDA_START_DID, + cap_ndoms(iommu->cap) - 1, GFP_KERNEL); + if (num < 0) { pr_err("%s: No free domain ids\n", iommu->name); goto err_unlock; } - set_bit(num, iommu->domain_ids); info->refcnt = 1; info->did = num; info->iommu = iommu; curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id, - NULL, info, GFP_ATOMIC); + NULL, info, GFP_KERNEL); if (curr) { ret = xa_err(curr) ? : -EBUSY; goto err_clear; } - spin_unlock(&iommu->lock); return 0; err_clear: - clear_bit(info->did, iommu->domain_ids); + ida_free(&iommu->domain_ida, info->did); err_unlock: - spin_unlock(&iommu->lock); kfree(info); return ret; } @@ -1435,21 +1386,21 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) if (domain->domain.type == IOMMU_DOMAIN_SVA) return; - spin_lock(&iommu->lock); + guard(mutex)(&iommu->did_lock); info = xa_load(&domain->iommu_array, iommu->seq_id); if (--info->refcnt == 0) { - clear_bit(info->did, iommu->domain_ids); + ida_free(&iommu->domain_ida, info->did); xa_erase(&domain->iommu_array, iommu->seq_id); domain->nid = NUMA_NO_NODE; kfree(info); } - spin_unlock(&iommu->lock); } static void domain_exit(struct dmar_domain *domain) { if (domain->pgd) { - LIST_HEAD(freelist); + struct iommu_pages_list freelist = + IOMMU_PAGES_LIST_INIT(freelist); domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist); iommu_put_pages_list(&freelist); @@ -1681,9 +1632,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, } attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP); - attr |= DMA_FL_PTE_PRESENT; if (domain->use_first_level) { - attr |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS; + attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS; if (prot & DMA_PTE_WRITE) attr |= DMA_FL_PTE_DIRTY; } @@ -1859,6 +1809,7 @@ static int dmar_domain_attach_device(struct dmar_domain *domain, return ret; info->domain = domain; + info->domain_attached = true; spin_lock_irqsave(&domain->lock, flags); list_add(&info->link, &domain->devices); spin_unlock_irqrestore(&domain->lock, flags); @@ -2027,7 +1978,8 @@ static int copy_context_table(struct intel_iommu *iommu, if (!old_ce) goto out; - new_ce = iommu_alloc_page_node(iommu->node, GFP_KERNEL); + new_ce = iommu_alloc_pages_node_sz(iommu->node, + GFP_KERNEL, SZ_4K); if (!new_ce) goto out_unmap; @@ -2042,7 +1994,7 @@ static int copy_context_table(struct intel_iommu *iommu, did = context_domain_id(&ce); if (did >= 0 && did < cap_ndoms(iommu->cap)) - set_bit(did, iommu->domain_ids); + ida_alloc_range(&iommu->domain_ida, did, did, GFP_KERNEL); set_context_copied(iommu, bus, devfn); new_ce[idx] = ce; @@ -2169,11 +2121,6 @@ static int __init init_dmars(void) } intel_iommu_init_qi(iommu); - - ret = iommu_init_domains(iommu); - if (ret) - goto free_iommu; - init_translation_status(iommu); if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { @@ -2651,9 +2598,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru) if (iommu->gcmd & DMA_GCMD_TE) iommu_disable_translation(iommu); - ret = iommu_init_domains(iommu); - if (ret == 0) - ret = iommu_alloc_root_entry(iommu); + ret = iommu_alloc_root_entry(iommu); if (ret) goto out; @@ -2744,7 +2689,6 @@ static struct dmar_satc_unit *dmar_find_matched_satc_unit(struct pci_dev *dev) struct device *tmp; int i; - dev = pci_physfn(dev); rcu_read_lock(); list_for_each_entry_rcu(satcu, &dmar_satc_units, list) { @@ -2761,15 +2705,16 @@ out: return satcu; } -static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu) +static bool dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu) { - int i, ret = 1; - struct pci_bus *bus; struct pci_dev *bridge = NULL; - struct device *tmp; - struct acpi_dmar_atsr *atsr; struct dmar_atsr_unit *atsru; struct dmar_satc_unit *satcu; + struct acpi_dmar_atsr *atsr; + bool supported = true; + struct pci_bus *bus; + struct device *tmp; + int i; dev = pci_physfn(dev); satcu = dmar_find_matched_satc_unit(dev); @@ -2787,11 +2732,11 @@ static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu) bridge = bus->self; /* If it's an integrated device, allow ATS */ if (!bridge) - return 1; + return true; /* Connected via non-PCIe: no ATS */ if (!pci_is_pcie(bridge) || pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) - return 0; + return false; /* If we found the root port, look it up in the ATSR */ if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) break; @@ -2810,11 +2755,11 @@ static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu) if (atsru->include_all) goto out; } - ret = 0; + supported = false; out: rcu_read_unlock(); - return ret; + return supported; } int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) @@ -2972,9 +2917,14 @@ static ssize_t domains_used_show(struct device *dev, struct device_attribute *attr, char *buf) { struct intel_iommu *iommu = dev_to_intel_iommu(dev); - return sysfs_emit(buf, "%d\n", - bitmap_weight(iommu->domain_ids, - cap_ndoms(iommu->cap))); + unsigned int count = 0; + int id; + + for (id = 0; id < cap_ndoms(iommu->cap); id++) + if (ida_exists(&iommu->domain_ida, id)) + count++; + + return sysfs_emit(buf, "%d\n", count); } static DEVICE_ATTR_RO(domains_used); @@ -3257,6 +3207,10 @@ void device_block_translation(struct device *dev) struct intel_iommu *iommu = info->iommu; unsigned long flags; + /* Device in DMA blocking state. Noting to do. */ + if (!info->domain_attached) + return; + if (info->domain) cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID); @@ -3268,6 +3222,9 @@ void device_block_translation(struct device *dev) domain_context_clear(info); } + /* Device now in DMA blocking state. */ + info->domain_attached = false; + if (!info->domain) return; @@ -3282,6 +3239,9 @@ void device_block_translation(struct device *dev) static int blocking_domain_attach_dev(struct iommu_domain *domain, struct device *dev) { + struct device_domain_info *info = dev_iommu_priv_get(dev); + + iopf_for_domain_remove(info->domain ? &info->domain->domain : NULL, dev); device_block_translation(dev); return 0; } @@ -3360,7 +3320,7 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw); /* always allocate the top pgd */ - domain->pgd = iommu_alloc_page_node(domain->nid, GFP_KERNEL); + domain->pgd = iommu_alloc_pages_node_sz(domain->nid, GFP_KERNEL, SZ_4K); if (!domain->pgd) { kfree(domain); return ERR_PTR(-ENOMEM); @@ -3492,7 +3452,15 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, if (ret) return ret; - return dmar_domain_attach_device(to_dmar_domain(domain), dev); + ret = iopf_for_domain_set(domain, dev); + if (ret) + return ret; + + ret = dmar_domain_attach_device(to_dmar_domain(domain), dev); + if (ret) + iopf_for_domain_remove(domain, dev); + + return ret; } static int intel_iommu_map(struct iommu_domain *domain, @@ -3603,7 +3571,8 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain, struct iommu_iotlb_gather *gather) { cache_tag_flush_range(to_dmar_domain(domain), gather->start, - gather->end, list_empty(&gather->freelist)); + gather->end, + iommu_pages_list_empty(&gather->freelist)); iommu_put_pages_list(&gather->freelist); } @@ -3918,6 +3887,8 @@ int intel_iommu_enable_iopf(struct device *dev) if (!info->pri_enabled) return -ENODEV; + /* pri_enabled is protected by the group mutex. */ + iommu_group_mutex_assert(dev); if (info->iopf_refcount) { info->iopf_refcount++; return 0; @@ -3940,43 +3911,13 @@ void intel_iommu_disable_iopf(struct device *dev) if (WARN_ON(!info->pri_enabled || !info->iopf_refcount)) return; + iommu_group_mutex_assert(dev); if (--info->iopf_refcount) return; iopf_queue_remove_device(iommu->iopf_queue, dev); } -static int -intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat) -{ - switch (feat) { - case IOMMU_DEV_FEAT_IOPF: - return intel_iommu_enable_iopf(dev); - - case IOMMU_DEV_FEAT_SVA: - return 0; - - default: - return -ENODEV; - } -} - -static int -intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat) -{ - switch (feat) { - case IOMMU_DEV_FEAT_IOPF: - intel_iommu_disable_iopf(dev); - return 0; - - case IOMMU_DEV_FEAT_SVA: - return 0; - - default: - return -ENODEV; - } -} - static bool intel_iommu_is_attach_deferred(struct device *dev) { struct device_domain_info *info = dev_iommu_priv_get(dev); @@ -4050,6 +3991,7 @@ static int blocking_domain_set_dev_pasid(struct iommu_domain *domain, { struct device_domain_info *info = dev_iommu_priv_get(dev); + iopf_for_domain_remove(old, dev); intel_pasid_tear_down_entry(info->iommu, dev, pasid, false); domain_remove_dev_pasid(old, dev, pasid); @@ -4123,6 +4065,10 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain, if (IS_ERR(dev_pasid)) return PTR_ERR(dev_pasid); + ret = iopf_for_domain_replace(domain, old, dev); + if (ret) + goto out_remove_dev_pasid; + if (dmar_domain->use_first_level) ret = domain_setup_first_level(iommu, dmar_domain, dev, pasid, old); @@ -4130,7 +4076,7 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain, ret = domain_setup_second_level(iommu, dmar_domain, dev, pasid, old); if (ret) - goto out_remove_dev_pasid; + goto out_unwind_iopf; domain_remove_dev_pasid(old, dev, pasid); @@ -4138,6 +4084,8 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain, return 0; +out_unwind_iopf: + iopf_for_domain_replace(old, domain, dev); out_remove_dev_pasid: domain_remove_dev_pasid(domain, dev, pasid); return ret; @@ -4352,11 +4300,19 @@ static int identity_domain_attach_dev(struct iommu_domain *domain, struct device if (dev_is_real_dma_subdevice(dev)) return 0; + /* + * No PRI support with the global identity domain. No need to enable or + * disable PRI in this path as the iommu has been put in the blocking + * state. + */ if (sm_supported(iommu)) ret = intel_pasid_setup_pass_through(iommu, dev, IOMMU_NO_PASID); else ret = device_setup_pass_through(dev); + if (!ret) + info->domain_attached = true; + return ret; } @@ -4371,10 +4327,16 @@ static int identity_domain_set_dev_pasid(struct iommu_domain *domain, if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev)) return -EOPNOTSUPP; - ret = domain_setup_passthrough(iommu, dev, pasid, old); + ret = iopf_for_domain_replace(domain, old, dev); if (ret) return ret; + ret = domain_setup_passthrough(iommu, dev, pasid, old); + if (ret) { + iopf_for_domain_replace(old, domain, dev); + return ret; + } + domain_remove_dev_pasid(old, dev, pasid); return 0; } @@ -4401,8 +4363,6 @@ const struct iommu_ops intel_iommu_ops = { .release_device = intel_iommu_release_device, .get_resv_regions = intel_iommu_get_resv_regions, .device_group = intel_iommu_device_group, - .dev_enable_feat = intel_iommu_dev_enable_feat, - .dev_disable_feat = intel_iommu_dev_disable_feat, .is_attach_deferred = intel_iommu_is_attach_deferred, .def_domain_type = device_def_domain_type, .pgsize_bitmap = SZ_4K, diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index c4916886da5a..3ddbcc603de2 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -493,14 +493,13 @@ struct q_inval { /* Page Request Queue depth */ #define PRQ_ORDER 4 -#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20) -#define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5) +#define PRQ_SIZE (SZ_4K << PRQ_ORDER) +#define PRQ_RING_MASK (PRQ_SIZE - 0x20) +#define PRQ_DEPTH (PRQ_SIZE >> 5) struct dmar_pci_notify_info; #ifdef CONFIG_IRQ_REMAP -/* 1MB - maximum possible interrupt remapping table size */ -#define INTR_REMAP_PAGE_ORDER 8 #define INTR_REMAP_TABLE_REG_SIZE 0xf #define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf @@ -722,7 +721,9 @@ struct intel_iommu { unsigned char name[16]; /* Device Name */ #ifdef CONFIG_INTEL_IOMMU - unsigned long *domain_ids; /* bitmap of domains */ + /* mutex to protect domain_ida */ + struct mutex did_lock; + struct ida domain_ida; /* domain id allocator */ unsigned long *copied_tables; /* bitmap of copied tables */ spinlock_t lock; /* protect context, domain ids */ struct root_entry *root_entry; /* virtual address */ @@ -773,6 +774,7 @@ struct device_domain_info { u8 ats_supported:1; u8 ats_enabled:1; u8 dtlb_extra_inval:1; /* Quirk for devices need extra flush */ + u8 domain_attached:1; /* Device has domain attached */ u8 ats_qdep; unsigned int iopf_refcount; struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ @@ -809,11 +811,22 @@ static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom) } /* - * Domain ID reserved for pasid entries programmed for first-level - * only and pass-through transfer modes. + * Domain ID 0 and 1 are reserved: + * + * If Caching mode is set, then invalid translations are tagged + * with domain-id 0, hence we need to pre-allocate it. We also + * use domain-id 0 as a marker for non-allocated domain-id, so + * make sure it is not used for a real domain. + * + * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid + * entry for first-level or pass-through translation modes should + * be programmed with a domain id different from those used for + * second-level or nested translation. We reserve a domain id for + * this purpose. This domain id is also used for identity domain + * in legacy mode. */ #define FLPT_DEFAULT_DID 1 -#define NUM_RESERVED_DID 2 +#define IDA_START_DID 2 /* Retrieve the domain ID which has allocated to the domain */ static inline u16 @@ -1298,6 +1311,39 @@ void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid); int intel_iommu_enable_iopf(struct device *dev); void intel_iommu_disable_iopf(struct device *dev); +static inline int iopf_for_domain_set(struct iommu_domain *domain, + struct device *dev) +{ + if (!domain || !domain->iopf_handler) + return 0; + + return intel_iommu_enable_iopf(dev); +} + +static inline void iopf_for_domain_remove(struct iommu_domain *domain, + struct device *dev) +{ + if (!domain || !domain->iopf_handler) + return; + + intel_iommu_disable_iopf(dev); +} + +static inline int iopf_for_domain_replace(struct iommu_domain *new, + struct iommu_domain *old, + struct device *dev) +{ + int ret; + + ret = iopf_for_domain_set(new, dev); + if (ret) + return ret; + + iopf_for_domain_remove(old, dev); + + return 0; +} + #ifdef CONFIG_INTEL_IOMMU_SVM void intel_svm_check(struct intel_iommu *iommu); struct iommu_domain *intel_svm_domain_alloc(struct device *dev, diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index 3bc2a03cceca..cf7b6882ec75 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -530,11 +530,11 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) if (!ir_table) return -ENOMEM; - ir_table_base = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, - INTR_REMAP_PAGE_ORDER); + /* 1MB - maximum possible interrupt remapping table size */ + ir_table_base = + iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, SZ_1M); if (!ir_table_base) { - pr_err("IR%d: failed to allocate pages of order %d\n", - iommu->seq_id, INTR_REMAP_PAGE_ORDER); + pr_err("IR%d: failed to allocate 1M of pages\n", iommu->seq_id); goto out_free_table; } @@ -612,7 +612,7 @@ out_free_fwnode: out_free_bitmap: bitmap_free(bitmap); out_free_pages: - iommu_free_pages(ir_table_base, INTR_REMAP_PAGE_ORDER); + iommu_free_pages(ir_table_base); out_free_table: kfree(ir_table); @@ -633,7 +633,7 @@ static void intel_teardown_irq_remapping(struct intel_iommu *iommu) irq_domain_free_fwnode(fn); iommu->ir_domain = NULL; } - iommu_free_pages(iommu->ir_table->base, INTR_REMAP_PAGE_ORDER); + iommu_free_pages(iommu->ir_table->base); bitmap_free(iommu->ir_table->bitmap); kfree(iommu->ir_table); iommu->ir_table = NULL; diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c index 6ac5c534bef4..fc312f649f9e 100644 --- a/drivers/iommu/intel/nested.c +++ b/drivers/iommu/intel/nested.c @@ -27,8 +27,7 @@ static int intel_nested_attach_dev(struct iommu_domain *domain, unsigned long flags; int ret = 0; - if (info->domain) - device_block_translation(dev); + device_block_translation(dev); if (iommu->agaw < dmar_domain->s2_domain->agaw) { dev_err_ratelimited(dev, "Adjusted guest address width not compatible\n"); @@ -56,17 +55,24 @@ static int intel_nested_attach_dev(struct iommu_domain *domain, if (ret) goto detach_iommu; + ret = iopf_for_domain_set(domain, dev); + if (ret) + goto unassign_tag; + ret = intel_pasid_setup_nested(iommu, dev, IOMMU_NO_PASID, dmar_domain); if (ret) - goto unassign_tag; + goto disable_iopf; info->domain = dmar_domain; + info->domain_attached = true; spin_lock_irqsave(&dmar_domain->lock, flags); list_add(&info->link, &dmar_domain->devices); spin_unlock_irqrestore(&dmar_domain->lock, flags); return 0; +disable_iopf: + iopf_for_domain_remove(domain, dev); unassign_tag: cache_tag_unassign_domain(dmar_domain, dev, IOMMU_NO_PASID); detach_iommu: @@ -166,14 +172,20 @@ static int intel_nested_set_dev_pasid(struct iommu_domain *domain, if (IS_ERR(dev_pasid)) return PTR_ERR(dev_pasid); - ret = domain_setup_nested(iommu, dmar_domain, dev, pasid, old); + ret = iopf_for_domain_replace(domain, old, dev); if (ret) goto out_remove_dev_pasid; + ret = domain_setup_nested(iommu, dmar_domain, dev, pasid, old); + if (ret) + goto out_unwind_iopf; + domain_remove_dev_pasid(old, dev, pasid); return 0; +out_unwind_iopf: + iopf_for_domain_replace(old, domain, dev); out_remove_dev_pasid: domain_remove_dev_pasid(domain, dev, pasid); return ret; diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index 7ee18bb48bd4..ac67a056b6c8 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -60,14 +60,14 @@ int intel_pasid_alloc_table(struct device *dev) size = max_pasid >> (PASID_PDE_SHIFT - 3); order = size ? get_order(size) : 0; - dir = iommu_alloc_pages_node(info->iommu->node, GFP_KERNEL, order); + dir = iommu_alloc_pages_node_sz(info->iommu->node, GFP_KERNEL, + 1 << (order + PAGE_SHIFT)); if (!dir) { kfree(pasid_table); return -ENOMEM; } pasid_table->table = dir; - pasid_table->order = order; pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3); info->pasid_table = pasid_table; @@ -97,10 +97,10 @@ void intel_pasid_free_table(struct device *dev) max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT; for (i = 0; i < max_pde; i++) { table = get_pasid_table_from_pde(&dir[i]); - iommu_free_page(table); + iommu_free_pages(table); } - iommu_free_pages(pasid_table->table, pasid_table->order); + iommu_free_pages(pasid_table->table); kfree(pasid_table); } @@ -148,7 +148,8 @@ retry: if (!entries) { u64 tmp; - entries = iommu_alloc_page_node(info->iommu->node, GFP_ATOMIC); + entries = iommu_alloc_pages_node_sz(info->iommu->node, + GFP_ATOMIC, SZ_4K); if (!entries) return NULL; @@ -161,7 +162,7 @@ retry: tmp = 0ULL; if (!try_cmpxchg64(&dir[dir_index].val, &tmp, (u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) { - iommu_free_page(entries); + iommu_free_pages(entries); goto retry; } if (!ecap_coherent(info->iommu->ecap)) { diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index 668d8ece6b14..fd0fd1a0df84 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -47,7 +47,6 @@ struct pasid_entry { /* The representative of a PASID table */ struct pasid_table { void *table; /* pasid table pointer */ - int order; /* page order of pasid table */ u32 max_pasid; /* max pasid */ }; diff --git a/drivers/iommu/intel/prq.c b/drivers/iommu/intel/prq.c index 5b6a64d96850..52570e42a14c 100644 --- a/drivers/iommu/intel/prq.c +++ b/drivers/iommu/intel/prq.c @@ -290,7 +290,8 @@ int intel_iommu_enable_prq(struct intel_iommu *iommu) struct iopf_queue *iopfq; int irq, ret; - iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER); + iommu->prq = + iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, PRQ_SIZE); if (!iommu->prq) { pr_warn("IOMMU: %s: Failed to allocate page request queue\n", iommu->name); @@ -340,7 +341,7 @@ free_hwirq: dmar_free_hwirq(irq); iommu->pr_irq = 0; free_prq: - iommu_free_pages(iommu->prq, PRQ_ORDER); + iommu_free_pages(iommu->prq); iommu->prq = NULL; return ret; @@ -363,7 +364,7 @@ int intel_iommu_finish_prq(struct intel_iommu *iommu) iommu->iopf_queue = NULL; } - iommu_free_pages(iommu->prq, PRQ_ORDER); + iommu_free_pages(iommu->prq); iommu->prq = NULL; return 0; diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index ba93123cb4eb..f3da596410b5 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -164,18 +164,23 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain, if (IS_ERR(dev_pasid)) return PTR_ERR(dev_pasid); + ret = iopf_for_domain_replace(domain, old, dev); + if (ret) + goto out_remove_dev_pasid; + /* Setup the pasid table: */ sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0; ret = __domain_setup_first_level(iommu, dev, pasid, FLPT_DEFAULT_DID, mm->pgd, sflags, old); if (ret) - goto out_remove_dev_pasid; + goto out_unwind_iopf; domain_remove_dev_pasid(old, dev, pasid); return 0; - +out_unwind_iopf: + iopf_for_domain_replace(old, domain, dev); out_remove_dev_pasid: domain_remove_dev_pasid(domain, dev, pasid); return ret; diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 7632c80edea6..96425e92f313 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -13,6 +13,7 @@ #include <linux/bitops.h> #include <linux/io-pgtable.h> #include <linux/kernel.h> +#include <linux/device/faux.h> #include <linux/sizes.h> #include <linux/slab.h> #include <linux/types.h> @@ -251,8 +252,6 @@ static inline bool arm_lpae_concat_mandatory(struct io_pgtable_cfg *cfg, (data->start_level == 1) && (oas == 40); } -static bool selftest_running = false; - static dma_addr_t __arm_lpae_dma_addr(void *pages) { return (dma_addr_t)virt_to_phys(pages); @@ -263,16 +262,20 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, void *cookie) { struct device *dev = cfg->iommu_dev; - int order = get_order(size); + size_t alloc_size; dma_addr_t dma; void *pages; - VM_BUG_ON((gfp & __GFP_HIGHMEM)); - + /* + * For very small starting-level translation tables the HW requires a + * minimum alignment of at least 64 to cover all cases. + */ + alloc_size = max(size, 64); if (cfg->alloc) - pages = cfg->alloc(cookie, size, gfp); + pages = cfg->alloc(cookie, alloc_size, gfp); else - pages = iommu_alloc_pages_node(dev_to_node(dev), gfp, order); + pages = iommu_alloc_pages_node_sz(dev_to_node(dev), gfp, + alloc_size); if (!pages) return NULL; @@ -300,7 +303,7 @@ out_free: if (cfg->free) cfg->free(cookie, pages, size); else - iommu_free_pages(pages, order); + iommu_free_pages(pages); return NULL; } @@ -316,7 +319,7 @@ static void __arm_lpae_free_pages(void *pages, size_t size, if (cfg->free) cfg->free(cookie, pages, size); else - iommu_free_pages(pages, get_order(size)); + iommu_free_pages(pages); } static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries, @@ -371,7 +374,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, for (i = 0; i < num_entries; i++) if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) { /* We require an unmap first */ - WARN_ON(!selftest_running); + WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN)); return -EEXIST; } else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) { /* @@ -473,7 +476,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, cptep = iopte_deref(pte, data); } else if (pte) { /* We require an unmap first */ - WARN_ON(!selftest_running); + WARN_ON(!(cfg->quirks & IO_PGTABLE_QUIRK_NO_WARN)); return -EEXIST; } @@ -641,8 +644,10 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); ptep += unmap_idx_start; pte = READ_ONCE(*ptep); - if (WARN_ON(!pte)) - return 0; + if (!pte) { + WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN)); + return -ENOENT; + } /* If the size matches this level, we're in the right place */ if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { @@ -652,8 +657,10 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, /* Find and handle non-leaf entries */ for (i = 0; i < num_entries; i++) { pte = READ_ONCE(ptep[i]); - if (WARN_ON(!pte)) + if (!pte) { + WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN)); break; + } if (!iopte_leaf(pte, lvl, iop->fmt)) { __arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1); @@ -968,7 +975,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_ARM_TTBR1 | IO_PGTABLE_QUIRK_ARM_OUTER_WBWA | - IO_PGTABLE_QUIRK_ARM_HD)) + IO_PGTABLE_QUIRK_ARM_HD | + IO_PGTABLE_QUIRK_NO_WARN)) return NULL; data = arm_lpae_alloc_pgtable(cfg); @@ -1069,7 +1077,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) struct arm_lpae_io_pgtable *data; typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr; - if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB)) + if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB | + IO_PGTABLE_QUIRK_NO_WARN)) return NULL; data = arm_lpae_alloc_pgtable(cfg); @@ -1310,7 +1319,6 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) #define __FAIL(ops, i) ({ \ WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \ arm_lpae_dump_ops(ops); \ - selftest_running = false; \ -EFAULT; \ }) @@ -1326,8 +1334,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) size_t size, mapped; struct io_pgtable_ops *ops; - selftest_running = true; - for (i = 0; i < ARRAY_SIZE(fmts); ++i) { cfg_cookie = cfg; ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); @@ -1416,7 +1422,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) free_io_pgtable_ops(ops); } - selftest_running = false; return 0; } @@ -1433,15 +1438,18 @@ static int __init arm_lpae_do_selftests(void) }; int i, j, k, pass = 0, fail = 0; - struct device dev; + struct faux_device *dev; struct io_pgtable_cfg cfg = { .tlb = &dummy_tlb_ops, .coherent_walk = true, - .iommu_dev = &dev, + .quirks = IO_PGTABLE_QUIRK_NO_WARN, }; - /* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */ - set_dev_node(&dev, NUMA_NO_NODE); + dev = faux_device_create("io-pgtable-test", NULL, 0); + if (!dev) + return -ENOMEM; + + cfg.iommu_dev = &dev->dev; for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { for (j = 0; j < ARRAY_SIZE(address_size); ++j) { @@ -1461,6 +1469,8 @@ static int __init arm_lpae_do_selftests(void) } pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail); + faux_device_destroy(dev); + return fail ? -EFAULT : 0; } subsys_initcall(arm_lpae_do_selftests); diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c index 06aca9ab52f9..679bda104797 100644 --- a/drivers/iommu/io-pgtable-dart.c +++ b/drivers/iommu/io-pgtable-dart.c @@ -107,14 +107,6 @@ static phys_addr_t iopte_to_paddr(dart_iopte pte, return paddr; } -static void *__dart_alloc_pages(size_t size, gfp_t gfp) -{ - int order = get_order(size); - - VM_BUG_ON((gfp & __GFP_HIGHMEM)); - return iommu_alloc_pages(gfp, order); -} - static int dart_init_pte(struct dart_io_pgtable *data, unsigned long iova, phys_addr_t paddr, dart_iopte prot, int num_entries, @@ -256,13 +248,13 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova, /* no L2 table present */ if (!pte) { - cptep = __dart_alloc_pages(tblsz, gfp); + cptep = iommu_alloc_pages_sz(gfp, tblsz); if (!cptep) return -ENOMEM; pte = dart_install_table(cptep, ptep, 0, data); if (pte) - iommu_free_pages(cptep, get_order(tblsz)); + iommu_free_pages(cptep); /* L2 table is present (now) */ pte = READ_ONCE(*ptep); @@ -413,7 +405,8 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) cfg->apple_dart_cfg.n_ttbrs = 1 << data->tbl_bits; for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) { - data->pgd[i] = __dart_alloc_pages(DART_GRANULE(data), GFP_KERNEL); + data->pgd[i] = + iommu_alloc_pages_sz(GFP_KERNEL, DART_GRANULE(data)); if (!data->pgd[i]) goto out_free_data; cfg->apple_dart_cfg.ttbr[i] = virt_to_phys(data->pgd[i]); @@ -423,8 +416,7 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) out_free_data: while (--i >= 0) { - iommu_free_pages(data->pgd[i], - get_order(DART_GRANULE(data))); + iommu_free_pages(data->pgd[i]); } kfree(data); return NULL; @@ -433,7 +425,6 @@ out_free_data: static void apple_dart_free_pgtable(struct io_pgtable *iop) { struct dart_io_pgtable *data = io_pgtable_to_data(iop); - int order = get_order(DART_GRANULE(data)); dart_iopte *ptep, *end; int i; @@ -445,9 +436,9 @@ static void apple_dart_free_pgtable(struct io_pgtable *iop) dart_iopte pte = *ptep++; if (pte) - iommu_free_pages(iopte_deref(pte, data), order); + iommu_free_pages(iopte_deref(pte, data)); } - iommu_free_pages(data->pgd[i], order); + iommu_free_pages(data->pgd[i]); } kfree(data); diff --git a/drivers/iommu/iommu-pages.c b/drivers/iommu/iommu-pages.c new file mode 100644 index 000000000000..238c09e5166b --- /dev/null +++ b/drivers/iommu/iommu-pages.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Google LLC. + * Pasha Tatashin <pasha.tatashin@soleen.com> + */ +#include "iommu-pages.h" +#include <linux/gfp.h> +#include <linux/mm.h> + +#define IOPTDESC_MATCH(pg_elm, elm) \ + static_assert(offsetof(struct page, pg_elm) == \ + offsetof(struct ioptdesc, elm)) +IOPTDESC_MATCH(flags, __page_flags); +IOPTDESC_MATCH(lru, iopt_freelist_elm); /* Ensure bit 0 is clear */ +IOPTDESC_MATCH(mapping, __page_mapping); +IOPTDESC_MATCH(private, _private); +IOPTDESC_MATCH(page_type, __page_type); +IOPTDESC_MATCH(_refcount, __page_refcount); +#ifdef CONFIG_MEMCG +IOPTDESC_MATCH(memcg_data, memcg_data); +#endif +#undef IOPTDESC_MATCH +static_assert(sizeof(struct ioptdesc) <= sizeof(struct page)); + +/** + * iommu_alloc_pages_node_sz - Allocate a zeroed page of a given size from + * specific NUMA node + * @nid: memory NUMA node id + * @gfp: buddy allocator flags + * @size: Memory size to allocate, rounded up to a power of 2 + * + * Returns the virtual address of the allocated page. The page must be freed + * either by calling iommu_free_pages() or via iommu_put_pages_list(). The + * returned allocation is round_up_pow_two(size) big, and is physically aligned + * to its size. + */ +void *iommu_alloc_pages_node_sz(int nid, gfp_t gfp, size_t size) +{ + unsigned long pgcnt; + struct folio *folio; + unsigned int order; + + /* This uses page_address() on the memory. */ + if (WARN_ON(gfp & __GFP_HIGHMEM)) + return NULL; + + /* + * Currently sub page allocations result in a full page being returned. + */ + order = get_order(size); + + /* + * __folio_alloc_node() does not handle NUMA_NO_NODE like + * alloc_pages_node() did. + */ + if (nid == NUMA_NO_NODE) + nid = numa_mem_id(); + + folio = __folio_alloc_node(gfp | __GFP_ZERO, order, nid); + if (unlikely(!folio)) + return NULL; + + /* + * All page allocations that should be reported to as "iommu-pagetables" + * to userspace must use one of the functions below. This includes + * allocations of page-tables and other per-iommu_domain configuration + * structures. + * + * This is necessary for the proper accounting as IOMMU state can be + * rather large, i.e. multiple gigabytes in size. + */ + pgcnt = 1UL << order; + mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, pgcnt); + lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, pgcnt); + + return folio_address(folio); +} +EXPORT_SYMBOL_GPL(iommu_alloc_pages_node_sz); + +static void __iommu_free_desc(struct ioptdesc *iopt) +{ + struct folio *folio = ioptdesc_folio(iopt); + const unsigned long pgcnt = 1UL << folio_order(folio); + + mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, -pgcnt); + lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, -pgcnt); + folio_put(folio); +} + +/** + * iommu_free_pages - free pages + * @virt: virtual address of the page to be freed. + * + * The page must have have been allocated by iommu_alloc_pages_node_sz() + */ +void iommu_free_pages(void *virt) +{ + if (!virt) + return; + __iommu_free_desc(virt_to_ioptdesc(virt)); +} +EXPORT_SYMBOL_GPL(iommu_free_pages); + +/** + * iommu_put_pages_list - free a list of pages. + * @list: The list of pages to be freed + * + * Frees a list of pages allocated by iommu_alloc_pages_node_sz(). On return the + * passed list is invalid, the caller must use IOMMU_PAGES_LIST_INIT to reinit + * the list if it expects to use it again. + */ +void iommu_put_pages_list(struct iommu_pages_list *list) +{ + struct ioptdesc *iopt, *tmp; + + list_for_each_entry_safe(iopt, tmp, &list->pages, iopt_freelist_elm) + __iommu_free_desc(iopt); +} +EXPORT_SYMBOL_GPL(iommu_put_pages_list); diff --git a/drivers/iommu/iommu-pages.h b/drivers/iommu/iommu-pages.h index 82ebf0033081..b3af2813ed0c 100644 --- a/drivers/iommu/iommu-pages.h +++ b/drivers/iommu/iommu-pages.h @@ -7,180 +7,95 @@ #ifndef __IOMMU_PAGES_H #define __IOMMU_PAGES_H -#include <linux/vmstat.h> -#include <linux/gfp.h> -#include <linux/mm.h> - -/* - * All page allocations that should be reported to as "iommu-pagetables" to - * userspace must use one of the functions below. This includes allocations of - * page-tables and other per-iommu_domain configuration structures. - * - * This is necessary for the proper accounting as IOMMU state can be rather - * large, i.e. multiple gigabytes in size. - */ - -/** - * __iommu_alloc_account - account for newly allocated page. - * @page: head struct page of the page. - * @order: order of the page - */ -static inline void __iommu_alloc_account(struct page *page, int order) -{ - const long pgcnt = 1l << order; - - mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, pgcnt); - mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, pgcnt); -} - -/** - * __iommu_free_account - account a page that is about to be freed. - * @page: head struct page of the page. - * @order: order of the page - */ -static inline void __iommu_free_account(struct page *page, int order) -{ - const long pgcnt = 1l << order; - - mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, -pgcnt); - mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, -pgcnt); -} +#include <linux/iommu.h> /** - * __iommu_alloc_pages - allocate a zeroed page of a given order. - * @gfp: buddy allocator flags - * @order: page order + * struct ioptdesc - Memory descriptor for IOMMU page tables + * @iopt_freelist_elm: List element for a struct iommu_pages_list * - * returns the head struct page of the allocated page. + * This struct overlays struct page for now. Do not modify without a good + * understanding of the issues. */ -static inline struct page *__iommu_alloc_pages(gfp_t gfp, int order) +struct ioptdesc { + unsigned long __page_flags; + + struct list_head iopt_freelist_elm; + unsigned long __page_mapping; + pgoff_t __index; + void *_private; + + unsigned int __page_type; + atomic_t __page_refcount; +#ifdef CONFIG_MEMCG + unsigned long memcg_data; +#endif +}; + +static inline struct ioptdesc *folio_ioptdesc(struct folio *folio) { - struct page *page; - - page = alloc_pages(gfp | __GFP_ZERO, order); - if (unlikely(!page)) - return NULL; - - __iommu_alloc_account(page, order); - - return page; + return (struct ioptdesc *)folio; } -/** - * __iommu_free_pages - free page of a given order - * @page: head struct page of the page - * @order: page order - */ -static inline void __iommu_free_pages(struct page *page, int order) +static inline struct folio *ioptdesc_folio(struct ioptdesc *iopt) { - if (!page) - return; - - __iommu_free_account(page, order); - __free_pages(page, order); + return (struct folio *)iopt; } -/** - * iommu_alloc_pages_node - allocate a zeroed page of a given order from - * specific NUMA node. - * @nid: memory NUMA node id - * @gfp: buddy allocator flags - * @order: page order - * - * returns the virtual address of the allocated page - */ -static inline void *iommu_alloc_pages_node(int nid, gfp_t gfp, int order) +static inline struct ioptdesc *virt_to_ioptdesc(void *virt) { - struct page *page = alloc_pages_node(nid, gfp | __GFP_ZERO, order); - - if (unlikely(!page)) - return NULL; - - __iommu_alloc_account(page, order); - - return page_address(page); + return folio_ioptdesc(virt_to_folio(virt)); } -/** - * iommu_alloc_pages - allocate a zeroed page of a given order - * @gfp: buddy allocator flags - * @order: page order - * - * returns the virtual address of the allocated page - */ -static inline void *iommu_alloc_pages(gfp_t gfp, int order) -{ - struct page *page = __iommu_alloc_pages(gfp, order); - - if (unlikely(!page)) - return NULL; - - return page_address(page); -} +void *iommu_alloc_pages_node_sz(int nid, gfp_t gfp, size_t size); +void iommu_free_pages(void *virt); +void iommu_put_pages_list(struct iommu_pages_list *list); /** - * iommu_alloc_page_node - allocate a zeroed page at specific NUMA node. - * @nid: memory NUMA node id - * @gfp: buddy allocator flags - * - * returns the virtual address of the allocated page + * iommu_pages_list_add - add the page to a iommu_pages_list + * @list: List to add the page to + * @virt: Address returned from iommu_alloc_pages_node_sz() */ -static inline void *iommu_alloc_page_node(int nid, gfp_t gfp) +static inline void iommu_pages_list_add(struct iommu_pages_list *list, + void *virt) { - return iommu_alloc_pages_node(nid, gfp, 0); + list_add_tail(&virt_to_ioptdesc(virt)->iopt_freelist_elm, &list->pages); } /** - * iommu_alloc_page - allocate a zeroed page - * @gfp: buddy allocator flags + * iommu_pages_list_splice - Put all the pages in list from into list to + * @from: Source list of pages + * @to: Destination list of pages * - * returns the virtual address of the allocated page + * from must be re-initialized after calling this function if it is to be + * used again. */ -static inline void *iommu_alloc_page(gfp_t gfp) +static inline void iommu_pages_list_splice(struct iommu_pages_list *from, + struct iommu_pages_list *to) { - return iommu_alloc_pages(gfp, 0); + list_splice(&from->pages, &to->pages); } /** - * iommu_free_pages - free page of a given order - * @virt: virtual address of the page to be freed. - * @order: page order + * iommu_pages_list_empty - True if the list is empty + * @list: List to check */ -static inline void iommu_free_pages(void *virt, int order) +static inline bool iommu_pages_list_empty(struct iommu_pages_list *list) { - if (!virt) - return; - - __iommu_free_pages(virt_to_page(virt), order); + return list_empty(&list->pages); } /** - * iommu_free_page - free page - * @virt: virtual address of the page to be freed. - */ -static inline void iommu_free_page(void *virt) -{ - iommu_free_pages(virt, 0); -} - -/** - * iommu_put_pages_list - free a list of pages. - * @page: the head of the lru list to be freed. + * iommu_alloc_pages_sz - Allocate a zeroed page of a given size from + * specific NUMA node + * @nid: memory NUMA node id + * @gfp: buddy allocator flags + * @size: Memory size to allocate, this is rounded up to a power of 2 * - * There are no locking requirement for these pages, as they are going to be - * put on a free list as soon as refcount reaches 0. Pages are put on this LRU - * list once they are removed from the IOMMU page tables. However, they can - * still be access through debugfs. + * Returns the virtual address of the allocated page. */ -static inline void iommu_put_pages_list(struct list_head *page) +static inline void *iommu_alloc_pages_sz(gfp_t gfp, size_t size) { - while (!list_empty(page)) { - struct page *p = list_entry(page->prev, struct page, lru); - - list_del(&p->lru); - __iommu_free_account(p, 0); - put_page(p); - } + return iommu_alloc_pages_node_sz(NUMA_NO_NODE, gfp, size); } #endif /* __IOMMU_PAGES_H */ diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c index ab18bc494eef..1a51cfd82808 100644 --- a/drivers/iommu/iommu-sva.c +++ b/drivers/iommu/iommu-sva.c @@ -63,9 +63,6 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de * reference is taken. Caller must call iommu_sva_unbind_device() * to release each reference. * - * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to - * initialize the required SVA features. - * * On error, returns an ERR_PTR value. */ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) @@ -299,15 +296,12 @@ static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, const struct iommu_ops *ops = dev_iommu_ops(dev); struct iommu_domain *domain; - if (ops->domain_alloc_sva) { - domain = ops->domain_alloc_sva(dev, mm); - if (IS_ERR(domain)) - return domain; - } else { - domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); - if (!domain) - return ERR_PTR(-ENOMEM); - } + if (!ops->domain_alloc_sva) + return ERR_PTR(-EOPNOTSUPP); + + domain = ops->domain_alloc_sva(dev, mm); + if (IS_ERR(domain)) + return domain; domain->type = IOMMU_DOMAIN_SVA; domain->cookie_type = IOMMU_COOKIE_SVA; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 6c02f93422ce..a4b606c591da 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -277,6 +277,8 @@ int iommu_device_register(struct iommu_device *iommu, err = bus_iommu_probe(iommu_buses[i]); if (err) iommu_device_unregister(iommu); + else + WRITE_ONCE(iommu->ready, true); return err; } EXPORT_SYMBOL_GPL(iommu_device_register); @@ -422,13 +424,15 @@ static int iommu_init_device(struct device *dev) * is buried in the bus dma_configure path. Properly unpicking that is * still a big job, so for now just invoke the whole thing. The device * already having a driver bound means dma_configure has already run and - * either found no IOMMU to wait for, or we're in its replay call right - * now, so either way there's no point calling it again. + * found no IOMMU to wait for, so there's no point calling it again. */ - if (!dev->driver && dev->bus->dma_configure) { + if (!dev->iommu->fwspec && !dev->driver && dev->bus->dma_configure) { mutex_unlock(&iommu_probe_device_lock); dev->bus->dma_configure(dev); mutex_lock(&iommu_probe_device_lock); + /* If another instance finished the job for us, skip it */ + if (!dev->iommu || dev->iommu_group) + return -ENODEV; } /* * At this point, relevant devices either now have a fwspec which will @@ -1629,15 +1633,13 @@ static struct iommu_domain *__iommu_alloc_identity_domain(struct device *dev) if (ops->identity_domain) return ops->identity_domain; - /* Older drivers create the identity domain via ops->domain_alloc() */ - if (!ops->domain_alloc) + if (ops->domain_alloc_identity) { + domain = ops->domain_alloc_identity(dev); + if (IS_ERR(domain)) + return domain; + } else { return ERR_PTR(-EOPNOTSUPP); - - domain = ops->domain_alloc(IOMMU_DOMAIN_IDENTITY); - if (IS_ERR(domain)) - return domain; - if (!domain) - return ERR_PTR(-ENOMEM); + } iommu_domain_init(domain, IOMMU_DOMAIN_IDENTITY, ops); return domain; @@ -2025,8 +2027,10 @@ __iommu_paging_domain_alloc_flags(struct device *dev, unsigned int type, domain = ops->domain_alloc_paging(dev); else if (ops->domain_alloc_paging_flags) domain = ops->domain_alloc_paging_flags(dev, flags, NULL); +#if IS_ENABLED(CONFIG_FSL_PAMU) else if (ops->domain_alloc && !flags) domain = ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED); +#endif else return ERR_PTR(-EOPNOTSUPP); @@ -2204,6 +2208,19 @@ static void *iommu_make_pasid_array_entry(struct iommu_domain *domain, return xa_tag_pointer(domain, IOMMU_PASID_ARRAY_DOMAIN); } +static bool domain_iommu_ops_compatible(const struct iommu_ops *ops, + struct iommu_domain *domain) +{ + if (domain->owner == ops) + return true; + + /* For static domains, owner isn't set. */ + if (domain == ops->blocked_domain || domain == ops->identity_domain) + return true; + + return false; +} + static int __iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) { @@ -2214,7 +2231,8 @@ static int __iommu_attach_group(struct iommu_domain *domain, return -EBUSY; dev = iommu_group_first_dev(group); - if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner) + if (!dev_has_iommu(dev) || + !domain_iommu_ops_compatible(dev_iommu_ops(dev), domain)) return -EINVAL; return __iommu_group_set_domain(group, domain); @@ -2395,6 +2413,7 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, unsigned int pgsize_idx, pgsize_idx_next; unsigned long pgsizes; size_t offset, pgsize, pgsize_next; + size_t offset_end; unsigned long addr_merge = paddr | iova; /* Page sizes supported by the hardware and small enough for @size */ @@ -2435,7 +2454,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, * If size is big enough to accommodate the larger page, reduce * the number of smaller pages. */ - if (offset + pgsize_next <= size) + if (!check_add_overflow(offset, pgsize_next, &offset_end) && + offset_end <= size) size = offset; out_set_count: @@ -2842,31 +2862,39 @@ bool iommu_default_passthrough(void) } EXPORT_SYMBOL_GPL(iommu_default_passthrough); -const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode) +static const struct iommu_device *iommu_from_fwnode(const struct fwnode_handle *fwnode) { - const struct iommu_ops *ops = NULL; - struct iommu_device *iommu; + const struct iommu_device *iommu, *ret = NULL; spin_lock(&iommu_device_lock); list_for_each_entry(iommu, &iommu_device_list, list) if (iommu->fwnode == fwnode) { - ops = iommu->ops; + ret = iommu; break; } spin_unlock(&iommu_device_lock); - return ops; + return ret; +} + +const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode) +{ + const struct iommu_device *iommu = iommu_from_fwnode(fwnode); + + return iommu ? iommu->ops : NULL; } int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode) { - const struct iommu_ops *ops = iommu_ops_from_fwnode(iommu_fwnode); + const struct iommu_device *iommu = iommu_from_fwnode(iommu_fwnode); struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - if (!ops) + if (!iommu) return driver_deferred_probe_check_state(dev); + if (!dev->iommu && !READ_ONCE(iommu->ready)) + return -EPROBE_DEFER; if (fwspec) - return ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL; + return iommu->ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL; if (!dev_iommu_get(dev)) return -ENOMEM; @@ -2920,38 +2948,6 @@ int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids) } EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); -/* - * Per device IOMMU features. - */ -int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) -{ - if (dev_has_iommu(dev)) { - const struct iommu_ops *ops = dev_iommu_ops(dev); - - if (ops->dev_enable_feat) - return ops->dev_enable_feat(dev, feat); - } - - return -ENODEV; -} -EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); - -/* - * The device drivers should do the necessary cleanups before calling this. - */ -int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) -{ - if (dev_has_iommu(dev)) { - const struct iommu_ops *ops = dev_iommu_ops(dev); - - if (ops->dev_disable_feat) - return ops->dev_disable_feat(dev, feat); - } - - return -EBUSY; -} -EXPORT_SYMBOL_GPL(iommu_dev_disable_feature); - /** * iommu_setup_default_domain - Set the default_domain for the group * @group: Group to change @@ -3454,7 +3450,8 @@ int iommu_attach_device_pasid(struct iommu_domain *domain, !ops->blocked_domain->ops->set_dev_pasid) return -EOPNOTSUPP; - if (ops != domain->owner || pasid == IOMMU_NO_PASID) + if (!domain_iommu_ops_compatible(ops, domain) || + pasid == IOMMU_NO_PASID) return -EINVAL; mutex_lock(&group->mutex); @@ -3536,7 +3533,7 @@ int iommu_replace_device_pasid(struct iommu_domain *domain, if (!domain->ops->set_dev_pasid) return -EOPNOTSUPP; - if (dev_iommu_ops(dev) != domain->owner || + if (!domain_iommu_ops_compatible(dev_iommu_ops(dev), domain) || pasid == IOMMU_NO_PASID || !handle) return -EINVAL; diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c index 2111bad72c72..86244403b532 100644 --- a/drivers/iommu/iommufd/device.c +++ b/drivers/iommu/iommufd/device.c @@ -221,7 +221,6 @@ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx, refcount_inc(&idev->obj.users); /* igroup refcount moves into iommufd_device */ idev->igroup = igroup; - mutex_init(&idev->iopf_lock); /* * If the caller fails after this success it must call @@ -425,6 +424,25 @@ static int iommufd_hwpt_pasid_compat(struct iommufd_hw_pagetable *hwpt, return 0; } +static bool iommufd_hwpt_compatible_device(struct iommufd_hw_pagetable *hwpt, + struct iommufd_device *idev) +{ + struct pci_dev *pdev; + + if (!hwpt->fault || !dev_is_pci(idev->dev)) + return true; + + /* + * Once we turn on PCI/PRI support for VF, the response failure code + * should not be forwarded to the hardware due to PRI being a shared + * resource between PF and VFs. There is no coordination for this + * shared capability. This waits for a vPRI reset to recover. + */ + pdev = to_pci_dev(idev->dev); + + return (!pdev->is_virtfn || !pci_pri_supported(pdev)); +} + static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt, struct iommufd_device *idev, ioasid_t pasid) @@ -432,6 +450,9 @@ static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt, struct iommufd_attach_handle *handle; int rc; + if (!iommufd_hwpt_compatible_device(hwpt, idev)) + return -EINVAL; + rc = iommufd_hwpt_pasid_compat(hwpt, idev, pasid); if (rc) return rc; @@ -440,12 +461,6 @@ static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt, if (!handle) return -ENOMEM; - if (hwpt->fault) { - rc = iommufd_fault_iopf_enable(idev); - if (rc) - goto out_free_handle; - } - handle->idev = idev; if (pasid == IOMMU_NO_PASID) rc = iommu_attach_group_handle(hwpt->domain, idev->igroup->group, @@ -454,13 +469,10 @@ static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt, rc = iommu_attach_device_pasid(hwpt->domain, idev->dev, pasid, &handle->handle); if (rc) - goto out_disable_iopf; + goto out_free_handle; return 0; -out_disable_iopf: - if (hwpt->fault) - iommufd_fault_iopf_disable(idev); out_free_handle: kfree(handle); return rc; @@ -492,10 +504,7 @@ static void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt, else iommu_detach_device_pasid(hwpt->domain, idev->dev, pasid); - if (hwpt->fault) { - iommufd_auto_response_faults(hwpt, handle); - iommufd_fault_iopf_disable(idev); - } + iommufd_auto_response_faults(hwpt, handle); kfree(handle); } @@ -507,6 +516,9 @@ static int iommufd_hwpt_replace_device(struct iommufd_device *idev, struct iommufd_attach_handle *handle, *old_handle; int rc; + if (!iommufd_hwpt_compatible_device(hwpt, idev)) + return -EINVAL; + rc = iommufd_hwpt_pasid_compat(hwpt, idev, pasid); if (rc) return rc; @@ -517,12 +529,6 @@ static int iommufd_hwpt_replace_device(struct iommufd_device *idev, if (!handle) return -ENOMEM; - if (hwpt->fault && !old->fault) { - rc = iommufd_fault_iopf_enable(idev); - if (rc) - goto out_free_handle; - } - handle->idev = idev; if (pasid == IOMMU_NO_PASID) rc = iommu_replace_group_handle(idev->igroup->group, @@ -531,20 +537,13 @@ static int iommufd_hwpt_replace_device(struct iommufd_device *idev, rc = iommu_replace_device_pasid(hwpt->domain, idev->dev, pasid, &handle->handle); if (rc) - goto out_disable_iopf; + goto out_free_handle; - if (old->fault) { - iommufd_auto_response_faults(hwpt, old_handle); - if (!hwpt->fault) - iommufd_fault_iopf_disable(idev); - } + iommufd_auto_response_faults(hwpt, old_handle); kfree(old_handle); return 0; -out_disable_iopf: - if (hwpt->fault && !old->fault) - iommufd_fault_iopf_disable(idev); out_free_handle: kfree(handle); return rc; diff --git a/drivers/iommu/iommufd/eventq.c b/drivers/iommu/iommufd/eventq.c index f39cf0797347..e373b9eec7f5 100644 --- a/drivers/iommu/iommufd/eventq.c +++ b/drivers/iommu/iommufd/eventq.c @@ -9,8 +9,6 @@ #include <linux/iommufd.h> #include <linux/module.h> #include <linux/mutex.h> -#include <linux/pci.h> -#include <linux/pci-ats.h> #include <linux/poll.h> #include <uapi/linux/iommufd.h> @@ -18,50 +16,6 @@ #include "iommufd_private.h" /* IOMMUFD_OBJ_FAULT Functions */ - -int iommufd_fault_iopf_enable(struct iommufd_device *idev) -{ - struct device *dev = idev->dev; - int ret; - - /* - * Once we turn on PCI/PRI support for VF, the response failure code - * should not be forwarded to the hardware due to PRI being a shared - * resource between PF and VFs. There is no coordination for this - * shared capability. This waits for a vPRI reset to recover. - */ - if (dev_is_pci(dev)) { - struct pci_dev *pdev = to_pci_dev(dev); - - if (pdev->is_virtfn && pci_pri_supported(pdev)) - return -EINVAL; - } - - mutex_lock(&idev->iopf_lock); - /* Device iopf has already been on. */ - if (++idev->iopf_enabled > 1) { - mutex_unlock(&idev->iopf_lock); - return 0; - } - - ret = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_IOPF); - if (ret) - --idev->iopf_enabled; - mutex_unlock(&idev->iopf_lock); - - return ret; -} - -void iommufd_fault_iopf_disable(struct iommufd_device *idev) -{ - mutex_lock(&idev->iopf_lock); - if (!WARN_ON(idev->iopf_enabled == 0)) { - if (--idev->iopf_enabled == 0) - iommu_dev_disable_feature(idev->dev, IOMMU_DEV_FEAT_IOPF); - } - mutex_unlock(&idev->iopf_lock); -} - void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt, struct iommufd_attach_handle *handle) { @@ -70,7 +24,7 @@ void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt, struct list_head free_list; unsigned long index; - if (!fault) + if (!fault || !handle) return; INIT_LIST_HEAD(&free_list); diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h index 80e8c76d25f2..9ccc83341f32 100644 --- a/drivers/iommu/iommufd/iommufd_private.h +++ b/drivers/iommu/iommufd/iommufd_private.h @@ -425,9 +425,6 @@ struct iommufd_device { /* always the physical device */ struct device *dev; bool enforce_cache_coherency; - /* protect iopf_enabled counter */ - struct mutex iopf_lock; - unsigned int iopf_enabled; }; static inline struct iommufd_device * @@ -506,9 +503,6 @@ iommufd_get_fault(struct iommufd_ucmd *ucmd, u32 id) int iommufd_fault_alloc(struct iommufd_ucmd *ucmd); void iommufd_fault_destroy(struct iommufd_object *obj); int iommufd_fault_iopf_handler(struct iopf_group *group); - -int iommufd_fault_iopf_enable(struct iommufd_device *idev); -void iommufd_fault_iopf_disable(struct iommufd_device *idev); void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt, struct iommufd_attach_handle *handle); diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index 18d9a216eb30..6bd0abf9a641 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -58,6 +58,9 @@ enum { MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2, }; +static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain); +static void mock_dev_disable_iopf(struct device *dev, struct iommu_domain *domain); + /* * Syzkaller has trouble randomizing the correct iova to use since it is linked * to the map ioctl's output, and it has no ide about that. So, simplify things. @@ -168,6 +171,8 @@ struct mock_dev { int id; u32 cache[MOCK_DEV_CACHE_NUM]; atomic_t pasid_1024_fake_error; + unsigned int iopf_refcount; + struct iommu_domain *domain; }; static inline struct mock_dev *to_mock_dev(struct device *dev) @@ -221,6 +226,13 @@ static int mock_domain_nop_attach(struct iommu_domain *domain, up_write(&mdev->viommu_rwsem); } + rc = mock_dev_enable_iopf(dev, domain); + if (rc) + return rc; + + mock_dev_disable_iopf(dev, mdev->domain); + mdev->domain = domain; + return 0; } @@ -229,6 +241,7 @@ static int mock_domain_set_dev_pasid_nop(struct iommu_domain *domain, struct iommu_domain *old) { struct mock_dev *mdev = to_mock_dev(dev); + int rc; /* * Per the first attach with pasid 1024, set the @@ -256,6 +269,12 @@ static int mock_domain_set_dev_pasid_nop(struct iommu_domain *domain, } } + rc = mock_dev_enable_iopf(dev, domain); + if (rc) + return rc; + + mock_dev_disable_iopf(dev, old); + return 0; } @@ -610,22 +629,42 @@ static void mock_domain_page_response(struct device *dev, struct iopf_fault *evt { } -static int mock_dev_enable_feat(struct device *dev, enum iommu_dev_features feat) +static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain) { - if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue) + struct mock_dev *mdev = to_mock_dev(dev); + int ret; + + if (!domain || !domain->iopf_handler) + return 0; + + if (!mock_iommu_iopf_queue) return -ENODEV; - return iopf_queue_add_device(mock_iommu_iopf_queue, dev); + if (mdev->iopf_refcount) { + mdev->iopf_refcount++; + return 0; + } + + ret = iopf_queue_add_device(mock_iommu_iopf_queue, dev); + if (ret) + return ret; + + mdev->iopf_refcount = 1; + + return 0; } -static int mock_dev_disable_feat(struct device *dev, enum iommu_dev_features feat) +static void mock_dev_disable_iopf(struct device *dev, struct iommu_domain *domain) { - if (feat != IOMMU_DEV_FEAT_IOPF || !mock_iommu_iopf_queue) - return -ENODEV; + struct mock_dev *mdev = to_mock_dev(dev); - iopf_queue_remove_device(mock_iommu_iopf_queue, dev); + if (!domain || !domain->iopf_handler) + return; - return 0; + if (--mdev->iopf_refcount) + return; + + iopf_queue_remove_device(mock_iommu_iopf_queue, dev); } static void mock_viommu_destroy(struct iommufd_viommu *viommu) @@ -770,8 +809,6 @@ static const struct iommu_ops mock_ops = { .device_group = generic_device_group, .probe_device = mock_probe_device, .page_response = mock_domain_page_response, - .dev_enable_feat = mock_dev_enable_feat, - .dev_disable_feat = mock_dev_disable_feat, .user_pasid_table = true, .viommu_alloc = mock_viommu_alloc, .default_domain_ops = diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index e424b279a8cd..90341b24a811 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -1090,7 +1090,8 @@ static int ipmmu_probe(struct platform_device *pdev) if (mmu->features->has_cache_leaf_nodes && ipmmu_is_root(mmu)) return 0; - ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, dev_name(&pdev->dev)); + ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, "%s", + dev_name(&pdev->dev)); if (ret) return ret; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index df98d0c65f54..cb95fecf6016 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -1550,6 +1550,31 @@ static const struct mtk_iommu_plat_data mt6795_data = { .larbid_remap = {{0}, {1}, {2}, {3}, {4}}, /* Linear mapping. */ }; +static const unsigned int mt8192_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = { + [0] = {~0, ~0}, /* Region0: larb0/1 */ + [1] = {0, 0, 0, 0, ~0, ~0, 0, ~0}, /* Region1: larb4/5/7 */ + [2] = {0, 0, ~0, 0, 0, 0, 0, 0, /* Region2: larb2/9/11/13/14/16/17/18/19/20 */ + 0, ~0, 0, ~0, 0, ~(u32)(BIT(9) | BIT(10)), ~(u32)(BIT(4) | BIT(5)), 0, + ~0, ~0, ~0, ~0, ~0}, + [3] = {0}, + [4] = {[13] = BIT(9) | BIT(10)}, /* larb13 port9/10 */ + [5] = {[14] = BIT(4) | BIT(5)}, /* larb14 port4/5 */ +}; + +static const struct mtk_iommu_plat_data mt6893_data = { + .m4u_plat = M4U_MT8192, + .flags = HAS_BCLK | OUT_ORDER_WR_EN | HAS_SUB_COMM_2BITS | + WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | MTK_IOMMU_TYPE_MM, + .inv_sel_reg = REG_MMU_INV_SEL_GEN2, + .banks_num = 1, + .banks_enable = {true}, + .iova_region = mt8192_multi_dom, + .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), + .iova_region_larb_msk = mt8192_larb_region_msk, + .larbid_remap = {{0}, {1}, {4, 5}, {7}, {2}, {9, 11, 19, 20}, + {0, 14, 16}, {0, 13, 18, 17}}, +}; + static const struct mtk_iommu_plat_data mt8167_data = { .m4u_plat = M4U_MT8167, .flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM, @@ -1673,17 +1698,6 @@ static const struct mtk_iommu_plat_data mt8188_data_vpp = { 27, 28 /* ccu0 */, MTK_INVALID_LARBID}, {4, 6}}, }; -static const unsigned int mt8192_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = { - [0] = {~0, ~0}, /* Region0: larb0/1 */ - [1] = {0, 0, 0, 0, ~0, ~0, 0, ~0}, /* Region1: larb4/5/7 */ - [2] = {0, 0, ~0, 0, 0, 0, 0, 0, /* Region2: larb2/9/11/13/14/16/17/18/19/20 */ - 0, ~0, 0, ~0, 0, ~(u32)(BIT(9) | BIT(10)), ~(u32)(BIT(4) | BIT(5)), 0, - ~0, ~0, ~0, ~0, ~0}, - [3] = {0}, - [4] = {[13] = BIT(9) | BIT(10)}, /* larb13 port9/10 */ - [5] = {[14] = BIT(4) | BIT(5)}, /* larb14 port4/5 */ -}; - static const struct mtk_iommu_plat_data mt8192_data = { .m4u_plat = M4U_MT8192, .flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | @@ -1777,6 +1791,7 @@ static const struct of_device_id mtk_iommu_of_ids[] = { { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data}, { .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data}, { .compatible = "mediatek,mt6795-m4u", .data = &mt6795_data}, + { .compatible = "mediatek,mt6893-iommu-mm", .data = &mt6893_data}, { .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data}, { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data}, { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data}, diff --git a/drivers/iommu/riscv/Makefile b/drivers/iommu/riscv/Makefile index f54c9ed17d41..b5929f9f23e6 100644 --- a/drivers/iommu/riscv/Makefile +++ b/drivers/iommu/riscv/Makefile @@ -1,3 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_RISCV_IOMMU) += iommu.o iommu-platform.o +obj-y += iommu.o iommu-platform.o obj-$(CONFIG_RISCV_IOMMU_PCI) += iommu-pci.o diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c index 8f049d4a0e2c..bb57092ca901 100644 --- a/drivers/iommu/riscv/iommu.c +++ b/drivers/iommu/riscv/iommu.c @@ -48,14 +48,13 @@ static DEFINE_IDA(riscv_iommu_pscids); /* Device resource-managed allocations */ struct riscv_iommu_devres { void *addr; - int order; }; static void riscv_iommu_devres_pages_release(struct device *dev, void *res) { struct riscv_iommu_devres *devres = res; - iommu_free_pages(devres->addr, devres->order); + iommu_free_pages(devres->addr); } static int riscv_iommu_devres_pages_match(struct device *dev, void *res, void *p) @@ -66,13 +65,14 @@ static int riscv_iommu_devres_pages_match(struct device *dev, void *res, void *p return devres->addr == target->addr; } -static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, int order) +static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, + unsigned int size) { struct riscv_iommu_devres *devres; void *addr; - addr = iommu_alloc_pages_node(dev_to_node(iommu->dev), - GFP_KERNEL_ACCOUNT, order); + addr = iommu_alloc_pages_node_sz(dev_to_node(iommu->dev), + GFP_KERNEL_ACCOUNT, size); if (unlikely(!addr)) return NULL; @@ -80,12 +80,11 @@ static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, int order) sizeof(struct riscv_iommu_devres), GFP_KERNEL); if (unlikely(!devres)) { - iommu_free_pages(addr, order); + iommu_free_pages(addr); return NULL; } devres->addr = addr; - devres->order = order; devres_add(iommu->dev, devres); @@ -163,9 +162,9 @@ static int riscv_iommu_queue_alloc(struct riscv_iommu_device *iommu, } else { do { const size_t queue_size = entry_size << (logsz + 1); - const int order = get_order(queue_size); - queue->base = riscv_iommu_get_pages(iommu, order); + queue->base = riscv_iommu_get_pages( + iommu, max(queue_size, SZ_4K)); queue->phys = __pa(queue->base); } while (!queue->base && logsz-- > 0); } @@ -620,7 +619,7 @@ static struct riscv_iommu_dc *riscv_iommu_get_dc(struct riscv_iommu_device *iomm break; } - ptr = riscv_iommu_get_pages(iommu, 0); + ptr = riscv_iommu_get_pages(iommu, SZ_4K); if (!ptr) return NULL; @@ -700,7 +699,7 @@ static int riscv_iommu_iodir_alloc(struct riscv_iommu_device *iommu) } if (!iommu->ddt_root) { - iommu->ddt_root = riscv_iommu_get_pages(iommu, 0); + iommu->ddt_root = riscv_iommu_get_pages(iommu, SZ_4K); iommu->ddt_phys = __pa(iommu->ddt_root); } @@ -1087,7 +1086,8 @@ static void riscv_iommu_iotlb_sync(struct iommu_domain *iommu_domain, #define _io_pte_entry(pn, prot) ((_PAGE_PFN_MASK & ((pn) << _PAGE_PFN_SHIFT)) | (prot)) static void riscv_iommu_pte_free(struct riscv_iommu_domain *domain, - unsigned long pte, struct list_head *freelist) + unsigned long pte, + struct iommu_pages_list *freelist) { unsigned long *ptr; int i; @@ -1105,9 +1105,9 @@ static void riscv_iommu_pte_free(struct riscv_iommu_domain *domain, } if (freelist) - list_add_tail(&virt_to_page(ptr)->lru, freelist); + iommu_pages_list_add(freelist, ptr); else - iommu_free_page(ptr); + iommu_free_pages(ptr); } static unsigned long *riscv_iommu_pte_alloc(struct riscv_iommu_domain *domain, @@ -1144,13 +1144,14 @@ pte_retry: * page table. This might race with other mappings, retry. */ if (_io_pte_none(pte)) { - addr = iommu_alloc_page_node(domain->numa_node, gfp); + addr = iommu_alloc_pages_node_sz(domain->numa_node, gfp, + SZ_4K); if (!addr) return NULL; old = pte; pte = _io_pte_entry(virt_to_pfn(addr), _PAGE_TABLE); if (cmpxchg_relaxed(ptr, old, pte) != old) { - iommu_free_page(addr); + iommu_free_pages(addr); goto pte_retry; } } @@ -1194,7 +1195,7 @@ static int riscv_iommu_map_pages(struct iommu_domain *iommu_domain, unsigned long *ptr; unsigned long pte, old, pte_prot; int rc = 0; - LIST_HEAD(freelist); + struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist); if (!(prot & IOMMU_WRITE)) pte_prot = _PAGE_BASE | _PAGE_READ; @@ -1225,7 +1226,7 @@ static int riscv_iommu_map_pages(struct iommu_domain *iommu_domain, *mapped = size; - if (!list_empty(&freelist)) { + if (!iommu_pages_list_empty(&freelist)) { /* * In 1.0 spec version, the smallest scope we can use to * invalidate all levels of page table (i.e. leaf and non-leaf) @@ -1385,8 +1386,8 @@ static struct iommu_domain *riscv_iommu_alloc_paging_domain(struct device *dev) domain->numa_node = dev_to_node(iommu->dev); domain->amo_enabled = !!(iommu->caps & RISCV_IOMMU_CAPABILITIES_AMO_HWAD); domain->pgd_mode = pgd_mode; - domain->pgd_root = iommu_alloc_page_node(domain->numa_node, - GFP_KERNEL_ACCOUNT); + domain->pgd_root = iommu_alloc_pages_node_sz(domain->numa_node, + GFP_KERNEL_ACCOUNT, SZ_4K); if (!domain->pgd_root) { kfree(domain); return ERR_PTR(-ENOMEM); @@ -1395,7 +1396,7 @@ static struct iommu_domain *riscv_iommu_alloc_paging_domain(struct device *dev) domain->pscid = ida_alloc_range(&riscv_iommu_pscids, 1, RISCV_IOMMU_MAX_PSCID, GFP_KERNEL); if (domain->pscid < 0) { - iommu_free_page(domain->pgd_root); + iommu_free_pages(domain->pgd_root); kfree(domain); return ERR_PTR(-ENOMEM); } diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index af4cc91b2bbf..22f74ba33a0e 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -730,14 +730,15 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, if (rk_dte_is_pt_valid(dte)) goto done; - page_table = iommu_alloc_page(GFP_ATOMIC | rk_ops->gfp_flags); + page_table = iommu_alloc_pages_sz(GFP_ATOMIC | rk_ops->gfp_flags, + SPAGE_SIZE); if (!page_table) return ERR_PTR(-ENOMEM); pt_dma = dma_map_single(rk_domain->dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(rk_domain->dma_dev, pt_dma)) { dev_err(rk_domain->dma_dev, "DMA mapping error while allocating page table\n"); - iommu_free_page(page_table); + iommu_free_pages(page_table); return ERR_PTR(-ENOMEM); } @@ -1062,7 +1063,8 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev) * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries. * Allocate one 4 KiB page for each table. */ - rk_domain->dt = iommu_alloc_page(GFP_KERNEL | rk_ops->gfp_flags); + rk_domain->dt = iommu_alloc_pages_sz(GFP_KERNEL | rk_ops->gfp_flags, + SPAGE_SIZE); if (!rk_domain->dt) goto err_free_domain; @@ -1086,7 +1088,7 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev) return &rk_domain->domain; err_free_dt: - iommu_free_page(rk_domain->dt); + iommu_free_pages(rk_domain->dt); err_free_domain: kfree(rk_domain); @@ -1107,13 +1109,13 @@ static void rk_iommu_domain_free(struct iommu_domain *domain) u32 *page_table = phys_to_virt(pt_phys); dma_unmap_single(rk_domain->dma_dev, pt_phys, SPAGE_SIZE, DMA_TO_DEVICE); - iommu_free_page(page_table); + iommu_free_pages(page_table); } } dma_unmap_single(rk_domain->dma_dev, rk_domain->dt_dma, SPAGE_SIZE, DMA_TO_DEVICE); - iommu_free_page(rk_domain->dt); + iommu_free_pages(rk_domain->dt); kfree(rk_domain); } diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index e1c76e0f9c2b..433b59f43530 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -31,10 +31,21 @@ struct s390_domain { unsigned long *dma_table; spinlock_t list_lock; struct rcu_head rcu; + u8 origin_type; }; static struct iommu_domain blocking_domain; +static inline unsigned int calc_rfx(dma_addr_t ptr) +{ + return ((unsigned long)ptr >> ZPCI_RF_SHIFT) & ZPCI_INDEX_MASK; +} + +static inline unsigned int calc_rsx(dma_addr_t ptr) +{ + return ((unsigned long)ptr >> ZPCI_RS_SHIFT) & ZPCI_INDEX_MASK; +} + static inline unsigned int calc_rtx(dma_addr_t ptr) { return ((unsigned long)ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK; @@ -56,6 +67,20 @@ static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa) *entry |= (pfaa & ZPCI_PTE_ADDR_MASK); } +static inline void set_rf_rso(unsigned long *entry, phys_addr_t rso) +{ + *entry &= ZPCI_RTE_FLAG_MASK; + *entry |= (rso & ZPCI_RTE_ADDR_MASK); + *entry |= ZPCI_TABLE_TYPE_RFX; +} + +static inline void set_rs_rto(unsigned long *entry, phys_addr_t rto) +{ + *entry &= ZPCI_RTE_FLAG_MASK; + *entry |= (rto & ZPCI_RTE_ADDR_MASK); + *entry |= ZPCI_TABLE_TYPE_RSX; +} + static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto) { *entry &= ZPCI_RTE_FLAG_MASK; @@ -70,6 +95,22 @@ static inline void set_st_pto(unsigned long *entry, phys_addr_t pto) *entry |= ZPCI_TABLE_TYPE_SX; } +static inline void validate_rf_entry(unsigned long *entry) +{ + *entry &= ~ZPCI_TABLE_VALID_MASK; + *entry &= ~ZPCI_TABLE_OFFSET_MASK; + *entry |= ZPCI_TABLE_VALID; + *entry |= ZPCI_TABLE_LEN_RFX; +} + +static inline void validate_rs_entry(unsigned long *entry) +{ + *entry &= ~ZPCI_TABLE_VALID_MASK; + *entry &= ~ZPCI_TABLE_OFFSET_MASK; + *entry |= ZPCI_TABLE_VALID; + *entry |= ZPCI_TABLE_LEN_RSX; +} + static inline void validate_rt_entry(unsigned long *entry) { *entry &= ~ZPCI_TABLE_VALID_MASK; @@ -120,6 +161,22 @@ static inline int pt_entry_isvalid(unsigned long entry) return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID; } +static inline unsigned long *get_rf_rso(unsigned long entry) +{ + if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RFX) + return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK); + else + return NULL; +} + +static inline unsigned long *get_rs_rto(unsigned long entry) +{ + if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RSX) + return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK); + else + return NULL; +} + static inline unsigned long *get_rt_sto(unsigned long entry) { if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX) @@ -191,18 +248,59 @@ static void dma_free_seg_table(unsigned long entry) dma_free_cpu_table(sto); } -static void dma_cleanup_tables(unsigned long *table) +static void dma_free_rt_table(unsigned long entry) { + unsigned long *rto = get_rs_rto(entry); int rtx; - if (!table) + for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++) + if (reg_entry_isvalid(rto[rtx])) + dma_free_seg_table(rto[rtx]); + + dma_free_cpu_table(rto); +} + +static void dma_free_rs_table(unsigned long entry) +{ + unsigned long *rso = get_rf_rso(entry); + int rsx; + + for (rsx = 0; rsx < ZPCI_TABLE_ENTRIES; rsx++) + if (reg_entry_isvalid(rso[rsx])) + dma_free_rt_table(rso[rsx]); + + dma_free_cpu_table(rso); +} + +static void dma_cleanup_tables(struct s390_domain *domain) +{ + int rtx, rsx, rfx; + + if (!domain->dma_table) return; - for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++) - if (reg_entry_isvalid(table[rtx])) - dma_free_seg_table(table[rtx]); + switch (domain->origin_type) { + case ZPCI_TABLE_TYPE_RFX: + for (rfx = 0; rfx < ZPCI_TABLE_ENTRIES; rfx++) + if (reg_entry_isvalid(domain->dma_table[rfx])) + dma_free_rs_table(domain->dma_table[rfx]); + break; + case ZPCI_TABLE_TYPE_RSX: + for (rsx = 0; rsx < ZPCI_TABLE_ENTRIES; rsx++) + if (reg_entry_isvalid(domain->dma_table[rsx])) + dma_free_rt_table(domain->dma_table[rsx]); + break; + case ZPCI_TABLE_TYPE_RTX: + for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++) + if (reg_entry_isvalid(domain->dma_table[rtx])) + dma_free_seg_table(domain->dma_table[rtx]); + break; + default: + WARN_ONCE(1, "Invalid IOMMU table (%x)\n", domain->origin_type); + return; + } - dma_free_cpu_table(table); + dma_free_cpu_table(domain->dma_table); } static unsigned long *dma_alloc_page_table(gfp_t gfp) @@ -218,6 +316,70 @@ static unsigned long *dma_alloc_page_table(gfp_t gfp) return table; } +static unsigned long *dma_walk_rs_table(unsigned long *rso, + dma_addr_t dma_addr, gfp_t gfp) +{ + unsigned int rsx = calc_rsx(dma_addr); + unsigned long old_rse, rse; + unsigned long *rsep, *rto; + + rsep = &rso[rsx]; + rse = READ_ONCE(*rsep); + if (reg_entry_isvalid(rse)) { + rto = get_rs_rto(rse); + } else { + rto = dma_alloc_cpu_table(gfp); + if (!rto) + return NULL; + + set_rs_rto(&rse, virt_to_phys(rto)); + validate_rs_entry(&rse); + entry_clr_protected(&rse); + + old_rse = cmpxchg(rsep, ZPCI_TABLE_INVALID, rse); + if (old_rse != ZPCI_TABLE_INVALID) { + /* Somone else was faster, use theirs */ + dma_free_cpu_table(rto); + rto = get_rs_rto(old_rse); + } + } + return rto; +} + +static unsigned long *dma_walk_rf_table(unsigned long *rfo, + dma_addr_t dma_addr, gfp_t gfp) +{ + unsigned int rfx = calc_rfx(dma_addr); + unsigned long old_rfe, rfe; + unsigned long *rfep, *rso; + + rfep = &rfo[rfx]; + rfe = READ_ONCE(*rfep); + if (reg_entry_isvalid(rfe)) { + rso = get_rf_rso(rfe); + } else { + rso = dma_alloc_cpu_table(gfp); + if (!rso) + return NULL; + + set_rf_rso(&rfe, virt_to_phys(rso)); + validate_rf_entry(&rfe); + entry_clr_protected(&rfe); + + old_rfe = cmpxchg(rfep, ZPCI_TABLE_INVALID, rfe); + if (old_rfe != ZPCI_TABLE_INVALID) { + /* Somone else was faster, use theirs */ + dma_free_cpu_table(rso); + rso = get_rf_rso(old_rfe); + } + } + + if (!rso) + return NULL; + + return dma_walk_rs_table(rso, dma_addr, gfp); +} + static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp) { unsigned long old_rte, rte; @@ -271,11 +433,31 @@ static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp) return pto; } -static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr, gfp_t gfp) +static unsigned long *dma_walk_region_tables(struct s390_domain *domain, + dma_addr_t dma_addr, gfp_t gfp) +{ + switch (domain->origin_type) { + case ZPCI_TABLE_TYPE_RFX: + return dma_walk_rf_table(domain->dma_table, dma_addr, gfp); + case ZPCI_TABLE_TYPE_RSX: + return dma_walk_rs_table(domain->dma_table, dma_addr, gfp); + case ZPCI_TABLE_TYPE_RTX: + return domain->dma_table; + default: + return NULL; + } +} + +static unsigned long *dma_walk_cpu_trans(struct s390_domain *domain, + dma_addr_t dma_addr, gfp_t gfp) { - unsigned long *sto, *pto; + unsigned long *rto, *sto, *pto; unsigned int rtx, sx, px; + rto = dma_walk_region_tables(domain, dma_addr, gfp); + if (!rto) + return NULL; + rtx = calc_rtx(dma_addr); sto = dma_get_seg_table_origin(&rto[rtx], gfp); if (!sto) @@ -329,9 +511,25 @@ static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap) } } +static inline u64 max_tbl_size(struct s390_domain *domain) +{ + switch (domain->origin_type) { + case ZPCI_TABLE_TYPE_RTX: + return ZPCI_TABLE_SIZE_RT - 1; + case ZPCI_TABLE_TYPE_RSX: + return ZPCI_TABLE_SIZE_RS - 1; + case ZPCI_TABLE_TYPE_RFX: + return U64_MAX; + default: + return 0; + } +} + static struct iommu_domain *s390_domain_alloc_paging(struct device *dev) { + struct zpci_dev *zdev = to_zpci_dev(dev); struct s390_domain *s390_domain; + u64 aperture_size; s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL); if (!s390_domain) @@ -342,9 +540,26 @@ static struct iommu_domain *s390_domain_alloc_paging(struct device *dev) kfree(s390_domain); return NULL; } + + aperture_size = min(s390_iommu_aperture, + zdev->end_dma - zdev->start_dma + 1); + if (aperture_size <= (ZPCI_TABLE_SIZE_RT - zdev->start_dma)) { + s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX; + } else if (aperture_size <= (ZPCI_TABLE_SIZE_RS - zdev->start_dma) && + (zdev->dtsm & ZPCI_IOTA_DT_RS)) { + s390_domain->origin_type = ZPCI_TABLE_TYPE_RSX; + } else if (zdev->dtsm & ZPCI_IOTA_DT_RF) { + s390_domain->origin_type = ZPCI_TABLE_TYPE_RFX; + } else { + /* Assume RTX available */ + s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX; + aperture_size = ZPCI_TABLE_SIZE_RT - zdev->start_dma; + } + zdev->end_dma = zdev->start_dma + aperture_size - 1; + s390_domain->domain.geometry.force_aperture = true; s390_domain->domain.geometry.aperture_start = 0; - s390_domain->domain.geometry.aperture_end = ZPCI_TABLE_SIZE_RT - 1; + s390_domain->domain.geometry.aperture_end = max_tbl_size(s390_domain); spin_lock_init(&s390_domain->list_lock); INIT_LIST_HEAD_RCU(&s390_domain->devices); @@ -356,7 +571,7 @@ static void s390_iommu_rcu_free_domain(struct rcu_head *head) { struct s390_domain *s390_domain = container_of(head, struct s390_domain, rcu); - dma_cleanup_tables(s390_domain->dma_table); + dma_cleanup_tables(s390_domain); kfree(s390_domain); } @@ -381,6 +596,21 @@ static void zdev_s390_domain_update(struct zpci_dev *zdev, spin_unlock_irqrestore(&zdev->dom_lock, flags); } +static u64 get_iota_region_flag(struct s390_domain *domain) +{ + switch (domain->origin_type) { + case ZPCI_TABLE_TYPE_RTX: + return ZPCI_IOTA_RTTO_FLAG; + case ZPCI_TABLE_TYPE_RSX: + return ZPCI_IOTA_RSTO_FLAG; + case ZPCI_TABLE_TYPE_RFX: + return ZPCI_IOTA_RFTO_FLAG; + default: + WARN_ONCE(1, "Invalid IOMMU table (%x)\n", domain->origin_type); + return 0; + } +} + static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev, struct iommu_domain *domain, u8 *status) { @@ -399,7 +629,7 @@ static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev, default: s390_domain = to_s390_domain(domain); iota = virt_to_phys(s390_domain->dma_table) | - ZPCI_IOTA_RTTO_FLAG; + get_iota_region_flag(s390_domain); rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, iota, status); } @@ -482,6 +712,8 @@ static void s390_iommu_get_resv_regions(struct device *dev, { struct zpci_dev *zdev = to_zpci_dev(dev); struct iommu_resv_region *region; + u64 max_size, end_resv; + unsigned long flags; if (zdev->start_dma) { region = iommu_alloc_resv_region(0, zdev->start_dma, 0, @@ -491,10 +723,21 @@ static void s390_iommu_get_resv_regions(struct device *dev, list_add_tail(®ion->list, list); } - if (zdev->end_dma < ZPCI_TABLE_SIZE_RT - 1) { - region = iommu_alloc_resv_region(zdev->end_dma + 1, - ZPCI_TABLE_SIZE_RT - zdev->end_dma - 1, - 0, IOMMU_RESV_RESERVED, GFP_KERNEL); + spin_lock_irqsave(&zdev->dom_lock, flags); + if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED || + zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY) { + spin_unlock_irqrestore(&zdev->dom_lock, flags); + return; + } + + max_size = max_tbl_size(to_s390_domain(zdev->s390_domain)); + spin_unlock_irqrestore(&zdev->dom_lock, flags); + + if (zdev->end_dma < max_size) { + end_resv = max_size - zdev->end_dma; + region = iommu_alloc_resv_region(zdev->end_dma + 1, end_resv, + 0, IOMMU_RESV_RESERVED, + GFP_KERNEL); if (!region) return; list_add_tail(®ion->list, list); @@ -510,13 +753,9 @@ static struct iommu_device *s390_iommu_probe_device(struct device *dev) zdev = to_zpci_dev(dev); - if (zdev->start_dma > zdev->end_dma || - zdev->start_dma > ZPCI_TABLE_SIZE_RT - 1) + if (zdev->start_dma > zdev->end_dma) return ERR_PTR(-EINVAL); - if (zdev->end_dma > ZPCI_TABLE_SIZE_RT - 1) - zdev->end_dma = ZPCI_TABLE_SIZE_RT - 1; - if (zdev->tlb_refresh) dev->iommu->shadow_on_flush = 1; @@ -606,8 +845,7 @@ static int s390_iommu_validate_trans(struct s390_domain *s390_domain, int rc; for (i = 0; i < nr_pages; i++) { - entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr, - gfp); + entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp); if (unlikely(!entry)) { rc = -ENOMEM; goto undo_cpu_trans; @@ -622,8 +860,7 @@ static int s390_iommu_validate_trans(struct s390_domain *s390_domain, undo_cpu_trans: while (i-- > 0) { dma_addr -= PAGE_SIZE; - entry = dma_walk_cpu_trans(s390_domain->dma_table, - dma_addr, gfp); + entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp); if (!entry) break; dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID); @@ -640,8 +877,7 @@ static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain, int rc = 0; for (i = 0; i < nr_pages; i++) { - entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr, - GFP_ATOMIC); + entry = dma_walk_cpu_trans(s390_domain, dma_addr, GFP_ATOMIC); if (unlikely(!entry)) { rc = -EINVAL; break; @@ -685,6 +921,51 @@ static int s390_iommu_map_pages(struct iommu_domain *domain, return rc; } +static unsigned long *get_rso_from_iova(struct s390_domain *domain, + dma_addr_t iova) +{ + unsigned long *rfo; + unsigned long rfe; + unsigned int rfx; + + switch (domain->origin_type) { + case ZPCI_TABLE_TYPE_RFX: + rfo = domain->dma_table; + rfx = calc_rfx(iova); + rfe = READ_ONCE(rfo[rfx]); + if (!reg_entry_isvalid(rfe)) + return NULL; + return get_rf_rso(rfe); + case ZPCI_TABLE_TYPE_RSX: + return domain->dma_table; + default: + return NULL; + } +} + +static unsigned long *get_rto_from_iova(struct s390_domain *domain, + dma_addr_t iova) +{ + unsigned long *rso; + unsigned long rse; + unsigned int rsx; + + switch (domain->origin_type) { + case ZPCI_TABLE_TYPE_RFX: + case ZPCI_TABLE_TYPE_RSX: + rso = get_rso_from_iova(domain, iova); + rsx = calc_rsx(iova); + rse = READ_ONCE(rso[rsx]); + if (!reg_entry_isvalid(rse)) + return NULL; + return get_rs_rto(rse); + case ZPCI_TABLE_TYPE_RTX: + return domain->dma_table; + default: + return NULL; + } +} + static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { @@ -698,10 +979,13 @@ static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain, iova > domain->geometry.aperture_end) return 0; + rto = get_rto_from_iova(s390_domain, iova); + if (!rto) + return 0; + rtx = calc_rtx(iova); sx = calc_sx(iova); px = calc_px(iova); - rto = s390_domain->dma_table; rte = READ_ONCE(rto[rtx]); if (reg_entry_isvalid(rte)) { @@ -756,7 +1040,6 @@ struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev) int zpci_init_iommu(struct zpci_dev *zdev) { - u64 aperture_size; int rc = 0; rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL, @@ -774,12 +1057,6 @@ int zpci_init_iommu(struct zpci_dev *zdev) if (rc) goto out_sysfs; - zdev->start_dma = PAGE_ALIGN(zdev->start_dma); - aperture_size = min3(s390_iommu_aperture, - ZPCI_TABLE_SIZE_RT - zdev->start_dma, - zdev->end_dma - zdev->start_dma + 1); - zdev->end_dma = zdev->start_dma + aperture_size - 1; - return 0; out_sysfs: diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c index 8d8f11854676..76c9620af4bb 100644 --- a/drivers/iommu/sun50i-iommu.c +++ b/drivers/iommu/sun50i-iommu.c @@ -690,8 +690,8 @@ sun50i_iommu_domain_alloc_paging(struct device *dev) if (!sun50i_domain) return NULL; - sun50i_domain->dt = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32, - get_order(DT_SIZE)); + sun50i_domain->dt = + iommu_alloc_pages_sz(GFP_KERNEL | GFP_DMA32, DT_SIZE); if (!sun50i_domain->dt) goto err_free_domain; @@ -713,7 +713,7 @@ static void sun50i_iommu_domain_free(struct iommu_domain *domain) { struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); - iommu_free_pages(sun50i_domain->dt, get_order(DT_SIZE)); + iommu_free_pages(sun50i_domain->dt); sun50i_domain->dt = NULL; kfree(sun50i_domain); diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 69d353e1df84..61897d50162d 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -51,14 +51,17 @@ struct tegra_smmu { struct iommu_device iommu; /* IOMMU Core code handle */ }; +struct tegra_pd; +struct tegra_pt; + struct tegra_smmu_as { struct iommu_domain domain; struct tegra_smmu *smmu; unsigned int use_count; spinlock_t lock; u32 *count; - struct page **pts; - struct page *pd; + struct tegra_pt **pts; + struct tegra_pd *pd; dma_addr_t pd_dma; unsigned id; u32 attr; @@ -155,6 +158,14 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \ SMMU_PDE_NONSECURE) +struct tegra_pd { + u32 val[SMMU_NUM_PDE]; +}; + +struct tegra_pt { + u32 val[SMMU_NUM_PTE]; +}; + static unsigned int iova_pd_index(unsigned long iova) { return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1); @@ -284,7 +295,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev) as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE; - as->pd = __iommu_alloc_pages(GFP_KERNEL | __GFP_DMA, 0); + as->pd = iommu_alloc_pages_sz(GFP_KERNEL | __GFP_DMA, SMMU_SIZE_PD); if (!as->pd) { kfree(as); return NULL; @@ -292,7 +303,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev) as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL); if (!as->count) { - __iommu_free_pages(as->pd, 0); + iommu_free_pages(as->pd); kfree(as); return NULL; } @@ -300,7 +311,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev) as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL); if (!as->pts) { kfree(as->count); - __iommu_free_pages(as->pd, 0); + iommu_free_pages(as->pd); kfree(as); return NULL; } @@ -417,8 +428,8 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu, goto unlock; } - as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD, - DMA_TO_DEVICE); + as->pd_dma = + dma_map_single(smmu->dev, as->pd, SMMU_SIZE_PD, DMA_TO_DEVICE); if (dma_mapping_error(smmu->dev, as->pd_dma)) { err = -ENOMEM; goto unlock; @@ -450,7 +461,7 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu, return 0; err_unmap: - dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); + dma_unmap_single(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); unlock: mutex_unlock(&smmu->lock); @@ -469,7 +480,7 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu, tegra_smmu_free_asid(smmu, as->id); - dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); + dma_unmap_single(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); as->smmu = NULL; @@ -548,11 +559,11 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova, { unsigned int pd_index = iova_pd_index(iova); struct tegra_smmu *smmu = as->smmu; - u32 *pd = page_address(as->pd); + struct tegra_pd *pd = as->pd; unsigned long offset = pd_index * sizeof(*pd); /* Set the page directory entry first */ - pd[pd_index] = value; + pd->val[pd_index] = value; /* The flush the page directory entry from caches */ dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset, @@ -564,11 +575,9 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova, smmu_flush(smmu); } -static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova) +static u32 *tegra_smmu_pte_offset(struct tegra_pt *pt, unsigned long iova) { - u32 *pt = page_address(pt_page); - - return pt + iova_pt_index(iova); + return &pt->val[iova_pt_index(iova)]; } static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, @@ -576,21 +585,19 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, { unsigned int pd_index = iova_pd_index(iova); struct tegra_smmu *smmu = as->smmu; - struct page *pt_page; - u32 *pd; + struct tegra_pt *pt; - pt_page = as->pts[pd_index]; - if (!pt_page) + pt = as->pts[pd_index]; + if (!pt) return NULL; - pd = page_address(as->pd); - *dmap = smmu_pde_to_dma(smmu, pd[pd_index]); + *dmap = smmu_pde_to_dma(smmu, as->pd->val[pd_index]); - return tegra_smmu_pte_offset(pt_page, iova); + return tegra_smmu_pte_offset(pt, iova); } static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, - dma_addr_t *dmap, struct page *page) + dma_addr_t *dmap, struct tegra_pt *pt) { unsigned int pde = iova_pd_index(iova); struct tegra_smmu *smmu = as->smmu; @@ -598,30 +605,28 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, if (!as->pts[pde]) { dma_addr_t dma; - dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT, - DMA_TO_DEVICE); + dma = dma_map_single(smmu->dev, pt, SMMU_SIZE_PT, + DMA_TO_DEVICE); if (dma_mapping_error(smmu->dev, dma)) { - __iommu_free_pages(page, 0); + iommu_free_pages(pt); return NULL; } if (!smmu_dma_addr_valid(smmu, dma)) { - dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT, - DMA_TO_DEVICE); - __iommu_free_pages(page, 0); + dma_unmap_single(smmu->dev, dma, SMMU_SIZE_PT, + DMA_TO_DEVICE); + iommu_free_pages(pt); return NULL; } - as->pts[pde] = page; + as->pts[pde] = pt; tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR | SMMU_PDE_NEXT)); *dmap = dma; } else { - u32 *pd = page_address(as->pd); - - *dmap = smmu_pde_to_dma(smmu, pd[pde]); + *dmap = smmu_pde_to_dma(smmu, as->pd->val[pde]); } return tegra_smmu_pte_offset(as->pts[pde], iova); @@ -637,7 +642,7 @@ static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova) static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) { unsigned int pde = iova_pd_index(iova); - struct page *page = as->pts[pde]; + struct tegra_pt *pt = as->pts[pde]; /* * When no entries in this page table are used anymore, return the @@ -645,13 +650,13 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) */ if (--as->count[pde] == 0) { struct tegra_smmu *smmu = as->smmu; - u32 *pd = page_address(as->pd); - dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]); + dma_addr_t pte_dma = smmu_pde_to_dma(smmu, as->pd->val[pde]); tegra_smmu_set_pde(as, iova, 0); - dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE); - __iommu_free_pages(page, 0); + dma_unmap_single(smmu->dev, pte_dma, SMMU_SIZE_PT, + DMA_TO_DEVICE); + iommu_free_pages(pt); as->pts[pde] = NULL; } } @@ -671,16 +676,16 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova, smmu_flush(smmu); } -static struct page *as_get_pde_page(struct tegra_smmu_as *as, - unsigned long iova, gfp_t gfp, - unsigned long *flags) +static struct tegra_pt *as_get_pde_page(struct tegra_smmu_as *as, + unsigned long iova, gfp_t gfp, + unsigned long *flags) { unsigned int pde = iova_pd_index(iova); - struct page *page = as->pts[pde]; + struct tegra_pt *pt = as->pts[pde]; /* at first check whether allocation needs to be done at all */ - if (page) - return page; + if (pt) + return pt; /* * In order to prevent exhaustion of the atomic memory pool, we @@ -690,7 +695,7 @@ static struct page *as_get_pde_page(struct tegra_smmu_as *as, if (gfpflags_allow_blocking(gfp)) spin_unlock_irqrestore(&as->lock, *flags); - page = __iommu_alloc_pages(gfp | __GFP_DMA, 0); + pt = iommu_alloc_pages_sz(gfp | __GFP_DMA, SMMU_SIZE_PT); if (gfpflags_allow_blocking(gfp)) spin_lock_irqsave(&as->lock, *flags); @@ -701,13 +706,13 @@ static struct page *as_get_pde_page(struct tegra_smmu_as *as, * if allocation succeeded and the allocation failure isn't fatal. */ if (as->pts[pde]) { - if (page) - __iommu_free_pages(page, 0); + if (pt) + iommu_free_pages(pt); - page = as->pts[pde]; + pt = as->pts[pde]; } - return page; + return pt; } static int @@ -717,15 +722,15 @@ __tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, { struct tegra_smmu_as *as = to_smmu_as(domain); dma_addr_t pte_dma; - struct page *page; + struct tegra_pt *pt; u32 pte_attrs; u32 *pte; - page = as_get_pde_page(as, iova, gfp, flags); - if (!page) + pt = as_get_pde_page(as, iova, gfp, flags); + if (!pt) return -ENOMEM; - pte = as_get_pte(as, iova, &pte_dma, page); + pte = as_get_pte(as, iova, &pte_dma, pt); if (!pte) return -ENOMEM; diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index b85ce6310ddb..ecd41fb03e5a 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -48,6 +48,7 @@ struct viommu_dev { u64 pgsize_bitmap; u32 first_domain; u32 last_domain; + u32 identity_domain_id; /* Supported MAP flags */ u32 map_flags; u32 probe_size; @@ -62,7 +63,6 @@ struct viommu_mapping { struct viommu_domain { struct iommu_domain domain; struct viommu_dev *viommu; - struct mutex mutex; /* protects viommu pointer */ unsigned int id; u32 map_flags; @@ -70,7 +70,6 @@ struct viommu_domain { struct rb_root_cached mappings; unsigned long nr_endpoints; - bool bypass; }; struct viommu_endpoint { @@ -97,6 +96,8 @@ struct viommu_event { }; }; +static struct viommu_domain viommu_identity_domain; + #define to_viommu_domain(domain) \ container_of(domain, struct viommu_domain, domain) @@ -305,6 +306,22 @@ out_unlock: return ret; } +static int viommu_send_attach_req(struct viommu_dev *viommu, struct device *dev, + struct virtio_iommu_req_attach *req) +{ + int ret; + unsigned int i; + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + + for (i = 0; i < fwspec->num_ids; i++) { + req->endpoint = cpu_to_le32(fwspec->ids[i]); + ret = viommu_send_req_sync(viommu, req, sizeof(*req)); + if (ret) + return ret; + } + return 0; +} + /* * viommu_add_mapping - add a mapping to the internal tree * @@ -637,71 +654,45 @@ static void viommu_event_handler(struct virtqueue *vq) /* IOMMU API */ -static struct iommu_domain *viommu_domain_alloc(unsigned type) +static struct iommu_domain *viommu_domain_alloc_paging(struct device *dev) { + struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); + struct viommu_dev *viommu = vdev->viommu; + unsigned long viommu_page_size; struct viommu_domain *vdomain; - - if (type != IOMMU_DOMAIN_UNMANAGED && - type != IOMMU_DOMAIN_DMA && - type != IOMMU_DOMAIN_IDENTITY) - return NULL; - - vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL); - if (!vdomain) - return NULL; - - mutex_init(&vdomain->mutex); - spin_lock_init(&vdomain->mappings_lock); - vdomain->mappings = RB_ROOT_CACHED; - - return &vdomain->domain; -} - -static int viommu_domain_finalise(struct viommu_endpoint *vdev, - struct iommu_domain *domain) -{ int ret; - unsigned long viommu_page_size; - struct viommu_dev *viommu = vdev->viommu; - struct viommu_domain *vdomain = to_viommu_domain(domain); viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap); if (viommu_page_size > PAGE_SIZE) { dev_err(vdev->dev, "granule 0x%lx larger than system page size 0x%lx\n", viommu_page_size, PAGE_SIZE); - return -ENODEV; + return ERR_PTR(-ENODEV); } - ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, - viommu->last_domain, GFP_KERNEL); - if (ret < 0) - return ret; + vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL); + if (!vdomain) + return ERR_PTR(-ENOMEM); - vdomain->id = (unsigned int)ret; + spin_lock_init(&vdomain->mappings_lock); + vdomain->mappings = RB_ROOT_CACHED; - domain->pgsize_bitmap = viommu->pgsize_bitmap; - domain->geometry = viommu->geometry; + ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, + viommu->last_domain, GFP_KERNEL); + if (ret < 0) { + kfree(vdomain); + return ERR_PTR(ret); + } - vdomain->map_flags = viommu->map_flags; - vdomain->viommu = viommu; + vdomain->id = (unsigned int)ret; - if (domain->type == IOMMU_DOMAIN_IDENTITY) { - if (virtio_has_feature(viommu->vdev, - VIRTIO_IOMMU_F_BYPASS_CONFIG)) { - vdomain->bypass = true; - return 0; - } + vdomain->domain.pgsize_bitmap = viommu->pgsize_bitmap; + vdomain->domain.geometry = viommu->geometry; - ret = viommu_domain_map_identity(vdev, vdomain); - if (ret) { - ida_free(&viommu->domain_ids, vdomain->id); - vdomain->viommu = NULL; - return ret; - } - } + vdomain->map_flags = viommu->map_flags; + vdomain->viommu = viommu; - return 0; + return &vdomain->domain; } static void viommu_domain_free(struct iommu_domain *domain) @@ -717,29 +708,37 @@ static void viommu_domain_free(struct iommu_domain *domain) kfree(vdomain); } +static struct iommu_domain *viommu_domain_alloc_identity(struct device *dev) +{ + struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); + struct iommu_domain *domain; + int ret; + + if (virtio_has_feature(vdev->viommu->vdev, + VIRTIO_IOMMU_F_BYPASS_CONFIG)) + return &viommu_identity_domain.domain; + + domain = viommu_domain_alloc_paging(dev); + if (IS_ERR(domain)) + return domain; + + ret = viommu_domain_map_identity(vdev, to_viommu_domain(domain)); + if (ret) { + viommu_domain_free(domain); + return ERR_PTR(ret); + } + return domain; +} + static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) { - int i; int ret = 0; struct virtio_iommu_req_attach req; - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); struct viommu_domain *vdomain = to_viommu_domain(domain); - mutex_lock(&vdomain->mutex); - if (!vdomain->viommu) { - /* - * Properly initialize the domain now that we know which viommu - * owns it. - */ - ret = viommu_domain_finalise(vdev, domain); - } else if (vdomain->viommu != vdev->viommu) { - ret = -EINVAL; - } - mutex_unlock(&vdomain->mutex); - - if (ret) - return ret; + if (vdomain->viommu != vdev->viommu) + return -EINVAL; /* * In the virtio-iommu device, when attaching the endpoint to a new @@ -761,16 +760,9 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) .domain = cpu_to_le32(vdomain->id), }; - if (vdomain->bypass) - req.flags |= cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS); - - for (i = 0; i < fwspec->num_ids; i++) { - req.endpoint = cpu_to_le32(fwspec->ids[i]); - - ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req)); - if (ret) - return ret; - } + ret = viommu_send_attach_req(vdomain->viommu, dev, &req); + if (ret) + return ret; if (!vdomain->nr_endpoints) { /* @@ -788,6 +780,40 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) return 0; } +static int viommu_attach_identity_domain(struct iommu_domain *domain, + struct device *dev) +{ + int ret = 0; + struct virtio_iommu_req_attach req; + struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); + struct viommu_domain *vdomain = to_viommu_domain(domain); + + req = (struct virtio_iommu_req_attach) { + .head.type = VIRTIO_IOMMU_T_ATTACH, + .domain = cpu_to_le32(vdev->viommu->identity_domain_id), + .flags = cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS), + }; + + ret = viommu_send_attach_req(vdev->viommu, dev, &req); + if (ret) + return ret; + + if (vdev->vdomain) + vdev->vdomain->nr_endpoints--; + vdomain->nr_endpoints++; + vdev->vdomain = vdomain; + return 0; +} + +static struct viommu_domain viommu_identity_domain = { + .domain = { + .type = IOMMU_DOMAIN_IDENTITY, + .ops = &(const struct iommu_domain_ops) { + .attach_dev = viommu_attach_identity_domain, + }, + }, +}; + static void viommu_detach_dev(struct viommu_endpoint *vdev) { int i; @@ -1062,7 +1088,8 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap) static struct iommu_ops viommu_ops = { .capable = viommu_capable, - .domain_alloc = viommu_domain_alloc, + .domain_alloc_identity = viommu_domain_alloc_identity, + .domain_alloc_paging = viommu_domain_alloc_paging, .probe_device = viommu_probe_device, .release_device = viommu_release_device, .device_group = viommu_device_group, @@ -1184,6 +1211,12 @@ static int viommu_probe(struct virtio_device *vdev) if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO)) viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO; + /* Reserve an ID to use as the bypass domain */ + if (virtio_has_feature(viommu->vdev, VIRTIO_IOMMU_F_BYPASS_CONFIG)) { + viommu->identity_domain_id = viommu->first_domain; + viommu->first_domain++; + } + viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; virtio_device_ready(vdev); diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index ed40d8600656..2cc2eb24dc8a 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -36,6 +36,7 @@ #include <linux/sched/clock.h> #include <linux/rculist.h> #include <linux/delay.h> +#include <linux/sort.h> #include <trace/events/bcache.h> /* @@ -559,8 +560,6 @@ static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) } } -#define cmp_int(l, r) ((l > r) - (l < r)) - #ifdef CONFIG_PROVE_LOCKING static int btree_lock_cmp_fn(const struct lockdep_map *_a, const struct lockdep_map *_b) diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c index b1e67e514c6a..6d3f8617ef13 100644 --- a/drivers/media/i2c/ds90ub913.c +++ b/drivers/media/i2c/ds90ub913.c @@ -707,6 +707,7 @@ static int ub913_i2c_master_init(struct ub913_data *priv) static int ub913_add_i2c_adapter(struct ub913_data *priv) { struct device *dev = &priv->client->dev; + struct i2c_atr_adap_desc desc = { }; struct fwnode_handle *i2c_handle; int ret; @@ -714,8 +715,12 @@ static int ub913_add_i2c_adapter(struct ub913_data *priv) if (!i2c_handle) return 0; - ret = i2c_atr_add_adapter(priv->plat_data->atr, priv->plat_data->port, - dev, i2c_handle); + desc.chan_id = priv->plat_data->port; + desc.parent = dev; + desc.bus_handle = i2c_handle; + desc.num_aliases = 0; + + ret = i2c_atr_add_adapter(priv->plat_data->atr, &desc); fwnode_handle_put(i2c_handle); diff --git a/drivers/media/i2c/ds90ub953.c b/drivers/media/i2c/ds90ub953.c index 89e3132e81c5..59bd92388845 100644 --- a/drivers/media/i2c/ds90ub953.c +++ b/drivers/media/i2c/ds90ub953.c @@ -1102,6 +1102,7 @@ static int ub953_register_clkout(struct ub953_data *priv) static int ub953_add_i2c_adapter(struct ub953_data *priv) { struct device *dev = &priv->client->dev; + struct i2c_atr_adap_desc desc = { }; struct fwnode_handle *i2c_handle; int ret; @@ -1109,8 +1110,12 @@ static int ub953_add_i2c_adapter(struct ub953_data *priv) if (!i2c_handle) return 0; - ret = i2c_atr_add_adapter(priv->plat_data->atr, priv->plat_data->port, - dev, i2c_handle); + desc.chan_id = priv->plat_data->port; + desc.parent = dev; + desc.bus_handle = i2c_handle; + desc.num_aliases = 0; + + ret = i2c_atr_add_adapter(priv->plat_data->atr, &desc); fwnode_handle_put(i2c_handle); diff --git a/drivers/media/i2c/ds90ub960.c b/drivers/media/i2c/ds90ub960.c index ed9ace1a5476..082fc62b0f5b 100644 --- a/drivers/media/i2c/ds90ub960.c +++ b/drivers/media/i2c/ds90ub960.c @@ -27,6 +27,7 @@ */ #include <linux/bitops.h> +#include <linux/cleanup.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/fwnode.h> @@ -521,7 +522,9 @@ struct ub960_rxport { }; } eq; - const struct i2c_client *aliased_clients[UB960_MAX_PORT_ALIASES]; + /* lock for aliased_addrs and associated registers */ + struct mutex aliased_addrs_lock; + u16 aliased_addrs[UB960_MAX_PORT_ALIASES]; }; struct ub960_asd { @@ -1264,8 +1267,8 @@ static int ub960_reset(struct ub960_data *priv, bool reset_regs) * I2C-ATR (address translator) */ -static int ub960_atr_attach_client(struct i2c_atr *atr, u32 chan_id, - const struct i2c_client *client, u16 alias) +static int ub960_atr_attach_addr(struct i2c_atr *atr, u32 chan_id, + u16 addr, u16 alias) { struct ub960_data *priv = i2c_atr_get_driver_data(atr); struct ub960_rxport *rxport = priv->rxports[chan_id]; @@ -1273,20 +1276,22 @@ static int ub960_atr_attach_client(struct i2c_atr *atr, u32 chan_id, unsigned int reg_idx; int ret = 0; - for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_clients); reg_idx++) { - if (!rxport->aliased_clients[reg_idx]) + guard(mutex)(&rxport->aliased_addrs_lock); + + for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_addrs); reg_idx++) { + if (!rxport->aliased_addrs[reg_idx]) break; } - if (reg_idx == ARRAY_SIZE(rxport->aliased_clients)) { + if (reg_idx == ARRAY_SIZE(rxport->aliased_addrs)) { dev_err(dev, "rx%u: alias pool exhausted\n", rxport->nport); return -EADDRNOTAVAIL; } - rxport->aliased_clients[reg_idx] = client; + rxport->aliased_addrs[reg_idx] = addr; ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ID(reg_idx), - client->addr << 1, &ret); + addr << 1, &ret); ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx), alias << 1, &ret); @@ -1294,13 +1299,13 @@ static int ub960_atr_attach_client(struct i2c_atr *atr, u32 chan_id, return ret; dev_dbg(dev, "rx%u: client 0x%02x assigned alias 0x%02x at slot %u\n", - rxport->nport, client->addr, alias, reg_idx); + rxport->nport, addr, alias, reg_idx); return 0; } -static void ub960_atr_detach_client(struct i2c_atr *atr, u32 chan_id, - const struct i2c_client *client) +static void ub960_atr_detach_addr(struct i2c_atr *atr, u32 chan_id, + u16 addr) { struct ub960_data *priv = i2c_atr_get_driver_data(atr); struct ub960_rxport *rxport = priv->rxports[chan_id]; @@ -1308,34 +1313,36 @@ static void ub960_atr_detach_client(struct i2c_atr *atr, u32 chan_id, unsigned int reg_idx; int ret; - for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_clients); reg_idx++) { - if (rxport->aliased_clients[reg_idx] == client) + guard(mutex)(&rxport->aliased_addrs_lock); + + for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_addrs); reg_idx++) { + if (rxport->aliased_addrs[reg_idx] == addr) break; } - if (reg_idx == ARRAY_SIZE(rxport->aliased_clients)) { + if (reg_idx == ARRAY_SIZE(rxport->aliased_addrs)) { dev_err(dev, "rx%u: client 0x%02x is not mapped!\n", - rxport->nport, client->addr); + rxport->nport, addr); return; } - rxport->aliased_clients[reg_idx] = NULL; + rxport->aliased_addrs[reg_idx] = 0; ret = ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx), 0, NULL); if (ret) { dev_err(dev, "rx%u: unable to fully unmap client 0x%02x: %d\n", - rxport->nport, client->addr, ret); + rxport->nport, addr, ret); return; } dev_dbg(dev, "rx%u: client 0x%02x released at slot %u\n", rxport->nport, - client->addr, reg_idx); + addr, reg_idx); } static const struct i2c_atr_ops ub960_atr_ops = { - .attach_client = ub960_atr_attach_client, - .detach_client = ub960_atr_detach_client, + .attach_addr = ub960_atr_attach_addr, + .detach_addr = ub960_atr_detach_addr, }; static int ub960_init_atr(struct ub960_data *priv) @@ -1344,7 +1351,7 @@ static int ub960_init_atr(struct ub960_data *priv) struct i2c_adapter *parent_adap = priv->client->adapter; priv->atr = i2c_atr_new(parent_adap, dev, &ub960_atr_ops, - priv->hw_data->num_rxports); + priv->hw_data->num_rxports, 0); if (IS_ERR(priv->atr)) return PTR_ERR(priv->atr); @@ -2173,7 +2180,6 @@ static int ub960_rxport_add_serializer(struct ub960_data *priv, u8 nport) struct device *dev = &priv->client->dev; struct ds90ub9xx_platform_data *ser_pdata = &rxport->ser.pdata; struct i2c_board_info ser_info = { - .of_node = to_of_node(rxport->ser.fwnode), .fwnode = rxport->ser.fwnode, .platform_data = ser_pdata, }; @@ -4374,6 +4380,8 @@ static void ub960_rxport_free_ports(struct ub960_data *priv) fwnode_handle_put(it.rxport->source.ep_fwnode); fwnode_handle_put(it.rxport->ser.fwnode); + mutex_destroy(&it.rxport->aliased_addrs_lock); + kfree(it.rxport); priv->rxports[it.nport] = NULL; } @@ -4602,6 +4610,8 @@ static int ub960_parse_dt_rxport(struct ub960_data *priv, unsigned int nport, if (ret) goto err_put_remote_fwnode; + mutex_init(&rxport->aliased_addrs_lock); + return 0; err_put_remote_fwnode: diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig index c82d8d8a16ea..79df0d22e218 100644 --- a/drivers/memory/Kconfig +++ b/drivers/memory/Kconfig @@ -32,7 +32,7 @@ config ARM_PL172_MPMC config ATMEL_EBI bool "Atmel EBI driver" - default y if ARCH_AT91 + default ARCH_AT91 depends on ARCH_AT91 || COMPILE_TEST depends on OF select MFD_SYSCON @@ -147,7 +147,7 @@ config FPGA_DFL_EMIF config MVEBU_DEVBUS bool "Marvell EBU Device Bus Controller" - default y if PLAT_ORION + default PLAT_ORION depends on PLAT_ORION || COMPILE_TEST depends on OF help @@ -198,7 +198,7 @@ config DA8XX_DDRCTL config PL353_SMC tristate "ARM PL35X Static Memory Controller(SMC) driver" - default y if ARM + default ARM depends on ARM || COMPILE_TEST depends on ARM_AMBA help @@ -225,6 +225,23 @@ config STM32_FMC2_EBI devices (like SRAM, ethernet adapters, FPGAs, LCD displays, ...) on SOCs containing the FMC2 External Bus Interface. +config STM32_OMM + tristate "STM32 Octo Memory Manager" + depends on SPI_STM32_OSPI || COMPILE_TEST + help + This driver manages the muxing between the 2 OSPI busses and + the 2 output ports. There are 4 possible muxing configurations: + - direct mode (no multiplexing): OSPI1 output is on port 1 and OSPI2 + output is on port 2 + - OSPI1 and OSPI2 are multiplexed over the same output port 1 + - swapped mode (no multiplexing), OSPI1 output is on port 2, + OSPI2 output is on port 1 + - OSPI1 and OSPI2 are multiplexed over the same output port 2 + It also manages : + - the split of the memory area shared between the 2 OSPI instances. + - chip select selection override. + - the time between 2 transactions in multiplexed mode. + source "drivers/memory/samsung/Kconfig" source "drivers/memory/tegra/Kconfig" diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile index d2e6ca9abbe0..c1959661bf63 100644 --- a/drivers/memory/Makefile +++ b/drivers/memory/Makefile @@ -24,6 +24,7 @@ obj-$(CONFIG_DA8XX_DDRCTL) += da8xx-ddrctl.o obj-$(CONFIG_PL353_SMC) += pl353-smc.o obj-$(CONFIG_RENESAS_RPCIF) += renesas-rpc-if.o obj-$(CONFIG_STM32_FMC2_EBI) += stm32-fmc2-ebi.o +obj-$(CONFIG_STM32_OMM) += stm32_omm.o obj-$(CONFIG_SAMSUNG_MC) += samsung/ obj-$(CONFIG_TEGRA_MC) += tegra/ diff --git a/drivers/memory/bt1-l2-ctl.c b/drivers/memory/bt1-l2-ctl.c index 78bd71b203f2..0fd96abc172a 100644 --- a/drivers/memory/bt1-l2-ctl.c +++ b/drivers/memory/bt1-l2-ctl.c @@ -222,7 +222,7 @@ static ssize_t l2_ctl_latency_show(struct device *dev, if (ret) return ret; - return scnprintf(buf, PAGE_SIZE, "%u\n", data); + return sysfs_emit(buf, "%u\n", data); } static ssize_t l2_ctl_latency_store(struct device *dev, diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index a8f5467d6b31..c086c22511f7 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -283,6 +283,43 @@ static int mtk_smi_larb_config_port_gen2_general(struct device *dev) return 0; } +static const u8 mtk_smi_larb_mt6893_ostd[][SMI_LARB_PORT_NR_MAX] = { + [0] = {0x2, 0x6, 0x2, 0x2, 0x2, 0x28, 0x18, 0x18, 0x1, 0x1, 0x1, 0x8, + 0x8, 0x1, 0x3f}, + [1] = {0x2, 0x6, 0x2, 0x2, 0x2, 0x28, 0x18, 0x18, 0x1, 0x1, 0x1, 0x8, + 0x8, 0x1, 0x3f}, + [2] = {0x5, 0x5, 0x5, 0x5, 0x1, 0x3f}, + [3] = {0x5, 0x5, 0x5, 0x5, 0x1, 0x3f}, + [4] = {0x28, 0x19, 0xb, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x4, 0x1}, + [5] = {0x1, 0x1, 0x4, 0x1, 0x1, 0x1, 0x1, 0x16}, + [6] = {}, + [7] = {0x1, 0x4, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x4, 0x4, 0x1, + 0x4, 0x1, 0xa, 0x6, 0x1, 0xa, 0x6, 0x1, 0x1, 0x1, 0x1, 0x5, + 0x3, 0x3, 0x4}, + [8] = {0x1, 0x4, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x4, 0x4, 0x1, + 0x4, 0x1, 0xa, 0x6, 0x1, 0xa, 0x6, 0x1, 0x1, 0x1, 0x1, 0x5, + 0x3, 0x3, 0x4}, + [9] = {0x9, 0x7, 0xf, 0x8, 0x1, 0x8, 0x9, 0x3, 0x3, 0x6, 0x7, 0x4, + 0x9, 0x3, 0x4, 0xe, 0x1, 0x7, 0x8, 0x7, 0x7, 0x1, 0x6, 0x2, + 0xf, 0x8, 0x1, 0x1, 0x1}, + [10] = {}, + [11] = {0x9, 0x7, 0xf, 0x8, 0x1, 0x8, 0x9, 0x3, 0x3, 0x6, 0x7, 0x4, + 0x9, 0x3, 0x4, 0xe, 0x1, 0x7, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, + 0x1, 0x1, 0x1, 0x1, 0x1}, + [12] = {}, + [13] = {0x2, 0xc, 0xc, 0xe, 0x6, 0x6, 0x6, 0x6, 0x6, 0x12, 0x6, 0x1}, + [14] = {0x2, 0xc, 0xc, 0x28, 0x12, 0x6}, + [15] = {0x28, 0x1, 0x2, 0x28, 0x1}, + [16] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x2, 0x14, 0x14, 0x4, 0x4, 0x4, 0x2, + 0x4, 0x2, 0x8, 0x4, 0x4}, + [17] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x2, 0x14, 0x14, 0x4, 0x4, 0x4, 0x2, + 0x4, 0x2, 0x8, 0x4, 0x4}, + [18] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x2, 0x14, 0x14, 0x4, 0x4, 0x4, 0x2, + 0x4, 0x2, 0x8, 0x4, 0x4}, + [19] = {0x2, 0x2, 0x4, 0x2}, + [20] = {0x9, 0x9, 0x5, 0x5, 0x1, 0x1}, +}; + static const u8 mtk_smi_larb_mt8188_ostd[][SMI_LARB_PORT_NR_MAX] = { [0] = {0x02, 0x18, 0x22, 0x22, 0x01, 0x02, 0x0a,}, [1] = {0x12, 0x02, 0x14, 0x14, 0x01, 0x18, 0x0a,}, @@ -429,6 +466,12 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt6779 = { /* DUMMY | IPU0 | IPU1 | CCU | MDLA */ }; +static const struct mtk_smi_larb_gen mtk_smi_larb_mt6893 = { + .config_port = mtk_smi_larb_config_port_gen2_general, + .flags_general = MTK_SMI_FLAG_THRT_UPDATE | MTK_SMI_FLAG_SW_FLAG, + .ostd = mtk_smi_larb_mt6893_ostd, +}; + static const struct mtk_smi_larb_gen mtk_smi_larb_mt8167 = { /* mt8167 do not need the port in larb */ .config_port = mtk_smi_larb_config_port_mt8167, @@ -474,6 +517,7 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = { {.compatible = "mediatek,mt2712-smi-larb", .data = &mtk_smi_larb_mt2712}, {.compatible = "mediatek,mt6779-smi-larb", .data = &mtk_smi_larb_mt6779}, {.compatible = "mediatek,mt6795-smi-larb", .data = &mtk_smi_larb_mt8173}, + {.compatible = "mediatek,mt6893-smi-larb", .data = &mtk_smi_larb_mt6893}, {.compatible = "mediatek,mt8167-smi-larb", .data = &mtk_smi_larb_mt8167}, {.compatible = "mediatek,mt8173-smi-larb", .data = &mtk_smi_larb_mt8173}, {.compatible = "mediatek,mt8183-smi-larb", .data = &mtk_smi_larb_mt8183}, @@ -694,6 +738,13 @@ static const struct mtk_smi_common_plat mtk_smi_common_mt6795 = { .init = mtk_smi_common_mt6795_init, }; +static const struct mtk_smi_common_plat mtk_smi_common_mt6893 = { + .type = MTK_SMI_GEN2, + .has_gals = true, + .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(4) | + F_MMU1_LARB(5) | F_MMU1_LARB(7), +}; + static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = { .type = MTK_SMI_GEN2, .has_gals = true, @@ -756,6 +807,7 @@ static const struct of_device_id mtk_smi_common_of_ids[] = { {.compatible = "mediatek,mt2712-smi-common", .data = &mtk_smi_common_gen2}, {.compatible = "mediatek,mt6779-smi-common", .data = &mtk_smi_common_mt6779}, {.compatible = "mediatek,mt6795-smi-common", .data = &mtk_smi_common_mt6795}, + {.compatible = "mediatek,mt6893-smi-common", .data = &mtk_smi_common_mt6893}, {.compatible = "mediatek,mt8167-smi-common", .data = &mtk_smi_common_gen2}, {.compatible = "mediatek,mt8173-smi-common", .data = &mtk_smi_common_gen2}, {.compatible = "mediatek,mt8183-smi-common", .data = &mtk_smi_common_mt8183}, diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index d5bf3243fe78..9c96eed00194 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -2374,7 +2374,7 @@ static void gpmc_probe_dt_children(struct platform_device *pdev) static int gpmc_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) { - return 1; /* we're input only */ + return GPIO_LINE_DIRECTION_IN; /* we're input only */ } static int gpmc_gpio_direction_input(struct gpio_chip *chip, @@ -2383,17 +2383,6 @@ static int gpmc_gpio_direction_input(struct gpio_chip *chip, return 0; /* we're input only */ } -static int gpmc_gpio_direction_output(struct gpio_chip *chip, - unsigned int offset, int value) -{ - return -EINVAL; /* we're input only */ -} - -static void gpmc_gpio_set(struct gpio_chip *chip, unsigned int offset, - int value) -{ -} - static int gpmc_gpio_get(struct gpio_chip *chip, unsigned int offset) { u32 reg; @@ -2415,8 +2404,6 @@ static int gpmc_gpio_init(struct gpmc_device *gpmc) gpmc->gpio_chip.ngpio = gpmc_nr_waitpins; gpmc->gpio_chip.get_direction = gpmc_gpio_get_direction; gpmc->gpio_chip.direction_input = gpmc_gpio_direction_input; - gpmc->gpio_chip.direction_output = gpmc_gpio_direction_output; - gpmc->gpio_chip.set = gpmc_gpio_set; gpmc->gpio_chip.get = gpmc_gpio_get; gpmc->gpio_chip.base = -1; diff --git a/drivers/memory/stm32_omm.c b/drivers/memory/stm32_omm.c new file mode 100644 index 000000000000..79ceb1635698 --- /dev/null +++ b/drivers/memory/stm32_omm.c @@ -0,0 +1,479 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) STMicroelectronics 2025 - All Rights Reserved + * Author(s): Patrice Chotard <patrice.chotard@foss.st.com> for STMicroelectronics. + */ + +#include <linux/bitfield.h> +#include <linux/bus/stm32_firewall_device.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/mfd/syscon.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/pinctrl/consumer.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> +#include <linux/reset.h> + +#define OMM_CR 0 +#define CR_MUXEN BIT(0) +#define CR_MUXENMODE_MASK GENMASK(1, 0) +#define CR_CSSEL_OVR_EN BIT(4) +#define CR_CSSEL_OVR_MASK GENMASK(6, 5) +#define CR_REQ2ACK_MASK GENMASK(23, 16) + +#define OMM_CHILD_NB 2 +#define OMM_CLK_NB 3 + +struct stm32_omm { + struct resource *mm_res; + struct clk_bulk_data clk_bulk[OMM_CLK_NB]; + struct reset_control *child_reset[OMM_CHILD_NB]; + void __iomem *io_base; + u32 cr; + u8 nb_child; + bool restore_omm; +}; + +static int stm32_omm_set_amcr(struct device *dev, bool set) +{ + struct stm32_omm *omm = dev_get_drvdata(dev); + resource_size_t mm_ospi2_size = 0; + static const char * const mm_name[] = { "ospi1", "ospi2" }; + struct regmap *syscfg_regmap; + struct device_node *node; + struct resource res, res1; + u32 amcr_base, amcr_mask; + int ret, idx; + unsigned int i, amcr, read_amcr; + + for (i = 0; i < omm->nb_child; i++) { + idx = of_property_match_string(dev->of_node, + "memory-region-names", + mm_name[i]); + if (idx < 0) + continue; + + /* res1 only used on second loop iteration */ + res1.start = res.start; + res1.end = res.end; + + node = of_parse_phandle(dev->of_node, "memory-region", idx); + if (!node) + continue; + + ret = of_address_to_resource(node, 0, &res); + if (ret) { + of_node_put(node); + dev_err(dev, "unable to resolve memory region\n"); + return ret; + } + + /* check that memory region fits inside OMM memory map area */ + if (!resource_contains(omm->mm_res, &res)) { + dev_err(dev, "%s doesn't fit inside OMM memory map area\n", + mm_name[i]); + dev_err(dev, "%pR doesn't fit inside %pR\n", &res, omm->mm_res); + of_node_put(node); + + return -EFAULT; + } + + if (i == 1) { + mm_ospi2_size = resource_size(&res); + + /* check that OMM memory region 1 doesn't overlap memory region 2 */ + if (resource_overlaps(&res, &res1)) { + dev_err(dev, "OMM memory-region %s overlaps memory region %s\n", + mm_name[0], mm_name[1]); + dev_err(dev, "%pR overlaps %pR\n", &res1, &res); + of_node_put(node); + + return -EFAULT; + } + } + of_node_put(node); + } + + syscfg_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "st,syscfg-amcr"); + if (IS_ERR(syscfg_regmap)) + return dev_err_probe(dev, PTR_ERR(syscfg_regmap), + "Failed to get st,syscfg-amcr property\n"); + + ret = of_property_read_u32_index(dev->of_node, "st,syscfg-amcr", 1, + &amcr_base); + if (ret) + return ret; + + ret = of_property_read_u32_index(dev->of_node, "st,syscfg-amcr", 2, + &amcr_mask); + if (ret) + return ret; + + amcr = mm_ospi2_size / SZ_64M; + + if (set) + regmap_update_bits(syscfg_regmap, amcr_base, amcr_mask, amcr); + + /* read AMCR and check coherency with memory-map areas defined in DT */ + regmap_read(syscfg_regmap, amcr_base, &read_amcr); + read_amcr = read_amcr >> (ffs(amcr_mask) - 1); + + if (amcr != read_amcr) { + dev_err(dev, "AMCR value not coherent with DT memory-map areas\n"); + ret = -EINVAL; + } + + return ret; +} + +static int stm32_omm_toggle_child_clock(struct device *dev, bool enable) +{ + struct stm32_omm *omm = dev_get_drvdata(dev); + int i, ret; + + for (i = 0; i < omm->nb_child; i++) { + if (enable) { + ret = clk_prepare_enable(omm->clk_bulk[i + 1].clk); + if (ret) { + dev_err(dev, "Can not enable clock\n"); + goto clk_error; + } + } else { + clk_disable_unprepare(omm->clk_bulk[i + 1].clk); + } + } + + return 0; + +clk_error: + while (i--) + clk_disable_unprepare(omm->clk_bulk[i + 1].clk); + + return ret; +} + +static int stm32_omm_disable_child(struct device *dev) +{ + struct stm32_omm *omm = dev_get_drvdata(dev); + struct reset_control *reset; + int ret; + u8 i; + + ret = stm32_omm_toggle_child_clock(dev, true); + if (ret) + return ret; + + for (i = 0; i < omm->nb_child; i++) { + /* reset OSPI to ensure CR_EN bit is set to 0 */ + reset = omm->child_reset[i]; + ret = reset_control_acquire(reset); + if (ret) { + stm32_omm_toggle_child_clock(dev, false); + dev_err(dev, "Can not acquire reset %d\n", ret); + return ret; + } + + reset_control_assert(reset); + udelay(2); + reset_control_deassert(reset); + + reset_control_release(reset); + } + + return stm32_omm_toggle_child_clock(dev, false); +} + +static int stm32_omm_configure(struct device *dev) +{ + static const char * const clocks_name[] = {"omm", "ospi1", "ospi2"}; + struct stm32_omm *omm = dev_get_drvdata(dev); + unsigned long clk_rate_max = 0; + u32 mux = 0; + u32 cssel_ovr = 0; + u32 req2ack = 0; + struct reset_control *rstc; + unsigned long clk_rate; + int ret; + u8 i; + + for (i = 0; i < OMM_CLK_NB; i++) + omm->clk_bulk[i].id = clocks_name[i]; + + /* retrieve OMM, OSPI1 and OSPI2 clocks */ + ret = devm_clk_bulk_get(dev, OMM_CLK_NB, omm->clk_bulk); + if (ret) + return dev_err_probe(dev, ret, "Failed to get OMM/OSPI's clocks\n"); + + /* Ensure both OSPI instance are disabled before configuring OMM */ + ret = stm32_omm_disable_child(dev); + if (ret) + return ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; + + /* parse children's clock */ + for (i = 1; i <= omm->nb_child; i++) { + clk_rate = clk_get_rate(omm->clk_bulk[i].clk); + if (!clk_rate) { + dev_err(dev, "Invalid clock rate\n"); + ret = -EINVAL; + goto error; + } + + if (clk_rate > clk_rate_max) + clk_rate_max = clk_rate; + } + + rstc = devm_reset_control_get_exclusive(dev, "omm"); + if (IS_ERR(rstc)) { + ret = dev_err_probe(dev, PTR_ERR(rstc), "reset get failed\n"); + goto error; + } + + reset_control_assert(rstc); + udelay(2); + reset_control_deassert(rstc); + + omm->cr = readl_relaxed(omm->io_base + OMM_CR); + /* optional */ + ret = of_property_read_u32(dev->of_node, "st,omm-mux", &mux); + if (!ret) { + if (mux & CR_MUXEN) { + ret = of_property_read_u32(dev->of_node, "st,omm-req2ack-ns", + &req2ack); + if (!ret && !req2ack) { + req2ack = DIV_ROUND_UP(req2ack, NSEC_PER_SEC / clk_rate_max) - 1; + + if (req2ack > 256) + req2ack = 256; + } + + req2ack = FIELD_PREP(CR_REQ2ACK_MASK, req2ack); + + omm->cr &= ~CR_REQ2ACK_MASK; + omm->cr |= FIELD_PREP(CR_REQ2ACK_MASK, req2ack); + + /* + * If the mux is enabled, the 2 OSPI clocks have to be + * always enabled + */ + ret = stm32_omm_toggle_child_clock(dev, true); + if (ret) + goto error; + } + + omm->cr &= ~CR_MUXENMODE_MASK; + omm->cr |= FIELD_PREP(CR_MUXENMODE_MASK, mux); + } + + /* optional */ + ret = of_property_read_u32(dev->of_node, "st,omm-cssel-ovr", &cssel_ovr); + if (!ret) { + omm->cr &= ~CR_CSSEL_OVR_MASK; + omm->cr |= FIELD_PREP(CR_CSSEL_OVR_MASK, cssel_ovr); + omm->cr |= CR_CSSEL_OVR_EN; + } + + omm->restore_omm = true; + writel_relaxed(omm->cr, omm->io_base + OMM_CR); + + ret = stm32_omm_set_amcr(dev, true); + +error: + pm_runtime_put_sync_suspend(dev); + + return ret; +} + +static int stm32_omm_check_access(struct device_node *np) +{ + struct stm32_firewall firewall; + int ret; + + ret = stm32_firewall_get_firewall(np, &firewall, 1); + if (ret) + return ret; + + return stm32_firewall_grant_access(&firewall); +} + +static int stm32_omm_probe(struct platform_device *pdev) +{ + static const char * const resets_name[] = {"ospi1", "ospi2"}; + struct device *dev = &pdev->dev; + u8 child_access_granted = 0; + struct stm32_omm *omm; + int i, ret; + + omm = devm_kzalloc(dev, sizeof(*omm), GFP_KERNEL); + if (!omm) + return -ENOMEM; + + omm->io_base = devm_platform_ioremap_resource_byname(pdev, "regs"); + if (IS_ERR(omm->io_base)) + return PTR_ERR(omm->io_base); + + omm->mm_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "memory_map"); + if (!omm->mm_res) + return -ENODEV; + + /* check child's access */ + for_each_child_of_node_scoped(dev->of_node, child) { + if (omm->nb_child >= OMM_CHILD_NB) { + dev_err(dev, "Bad DT, found too much children\n"); + return -E2BIG; + } + + ret = stm32_omm_check_access(child); + if (ret < 0 && ret != -EACCES) + return ret; + + if (!ret) + child_access_granted++; + + omm->nb_child++; + } + + if (omm->nb_child != OMM_CHILD_NB) + return -EINVAL; + + platform_set_drvdata(pdev, omm); + + devm_pm_runtime_enable(dev); + + /* check if OMM's resource access is granted */ + ret = stm32_omm_check_access(dev->of_node); + if (ret < 0 && ret != -EACCES) + return ret; + + for (i = 0; i < omm->nb_child; i++) { + omm->child_reset[i] = devm_reset_control_get_exclusive_released(dev, + resets_name[i]); + + if (IS_ERR(omm->child_reset[i])) + return dev_err_probe(dev, PTR_ERR(omm->child_reset[i]), + "Can't get %s reset\n", resets_name[i]); + } + + if (!ret && child_access_granted == OMM_CHILD_NB) { + ret = stm32_omm_configure(dev); + if (ret) + return ret; + } else { + dev_dbg(dev, "Octo Memory Manager resource's access not granted\n"); + /* + * AMCR can't be set, so check if current value is coherent + * with memory-map areas defined in DT + */ + ret = stm32_omm_set_amcr(dev, false); + if (ret) + return ret; + } + + ret = devm_of_platform_populate(dev); + if (ret) { + if (omm->cr & CR_MUXEN) + stm32_omm_toggle_child_clock(&pdev->dev, false); + + return dev_err_probe(dev, ret, "Failed to create Octo Memory Manager child\n"); + } + + return 0; +} + +static void stm32_omm_remove(struct platform_device *pdev) +{ + struct stm32_omm *omm = platform_get_drvdata(pdev); + + if (omm->cr & CR_MUXEN) + stm32_omm_toggle_child_clock(&pdev->dev, false); +} + +static const struct of_device_id stm32_omm_of_match[] = { + { .compatible = "st,stm32mp25-omm", }, + {} +}; +MODULE_DEVICE_TABLE(of, stm32_omm_of_match); + +static int __maybe_unused stm32_omm_runtime_suspend(struct device *dev) +{ + struct stm32_omm *omm = dev_get_drvdata(dev); + + clk_disable_unprepare(omm->clk_bulk[0].clk); + + return 0; +} + +static int __maybe_unused stm32_omm_runtime_resume(struct device *dev) +{ + struct stm32_omm *omm = dev_get_drvdata(dev); + + return clk_prepare_enable(omm->clk_bulk[0].clk); +} + +static int __maybe_unused stm32_omm_suspend(struct device *dev) +{ + struct stm32_omm *omm = dev_get_drvdata(dev); + + if (omm->restore_omm && omm->cr & CR_MUXEN) + stm32_omm_toggle_child_clock(dev, false); + + return pinctrl_pm_select_sleep_state(dev); +} + +static int __maybe_unused stm32_omm_resume(struct device *dev) +{ + struct stm32_omm *omm = dev_get_drvdata(dev); + int ret; + + pinctrl_pm_select_default_state(dev); + + if (!omm->restore_omm) + return 0; + + /* Ensure both OSPI instance are disabled before configuring OMM */ + ret = stm32_omm_disable_child(dev); + if (ret) + return ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; + + writel_relaxed(omm->cr, omm->io_base + OMM_CR); + ret = stm32_omm_set_amcr(dev, true); + pm_runtime_put_sync_suspend(dev); + if (ret) + return ret; + + if (omm->cr & CR_MUXEN) + ret = stm32_omm_toggle_child_clock(dev, true); + + return ret; +} + +static const struct dev_pm_ops stm32_omm_pm_ops = { + SET_RUNTIME_PM_OPS(stm32_omm_runtime_suspend, + stm32_omm_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(stm32_omm_suspend, stm32_omm_resume) +}; + +static struct platform_driver stm32_omm_driver = { + .probe = stm32_omm_probe, + .remove = stm32_omm_remove, + .driver = { + .name = "stm32-omm", + .of_match_table = stm32_omm_of_match, + .pm = &stm32_omm_pm_ops, + }, +}; +module_platform_driver(stm32_omm_driver); + +MODULE_DESCRIPTION("STMicroelectronics Octo Memory Manager driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig index 3fe83d7c2bf8..fc5a27791826 100644 --- a/drivers/memory/tegra/Kconfig +++ b/drivers/memory/tegra/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config TEGRA_MC bool "NVIDIA Tegra Memory Controller support" - default y + default ARCH_TEGRA depends on ARCH_TEGRA || (COMPILE_TEST && COMMON_CLK) select INTERCONNECT help @@ -12,7 +12,7 @@ if TEGRA_MC config TEGRA20_EMC tristate "NVIDIA Tegra20 External Memory Controller driver" - default y + default ARCH_TEGRA_2x_SOC depends on ARCH_TEGRA_2x_SOC || COMPILE_TEST select DEVFREQ_GOV_SIMPLE_ONDEMAND select PM_DEVFREQ @@ -25,7 +25,7 @@ config TEGRA20_EMC config TEGRA30_EMC tristate "NVIDIA Tegra30 External Memory Controller driver" - default y + default ARCH_TEGRA_3x_SOC depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST select PM_OPP select DDR @@ -37,7 +37,7 @@ config TEGRA30_EMC config TEGRA124_EMC tristate "NVIDIA Tegra124 External Memory Controller driver" - default y + default ARCH_TEGRA_124_SOC depends on ARCH_TEGRA_124_SOC || COMPILE_TEST select TEGRA124_CLK_EMC if ARCH_TEGRA select PM_OPP diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 6b37d61150ee..c161546d728f 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -114,6 +114,18 @@ config RPMB If unsure, select N. +config TI_FPC202 + tristate "TI FPC202 Dual Port Controller" + depends on I2C + select GPIOLIB + select I2C_ATR + help + If you say yes here you get support for the Texas Instruments FPC202 + Dual Port Controller. + + This driver can also be built as a module. If so, the module will be + called fpc202. + config TIFM_CORE tristate "TI Flash Media interface support" depends on PCI diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index d6c917229c45..054cee9b08a4 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o obj-$(CONFIG_DUMMY_IRQ) += dummy-irq.o obj-$(CONFIG_ICS932S401) += ics932s401.o obj-$(CONFIG_LKDTM) += lkdtm/ +obj-$(CONFIG_TI_FPC202) += ti_fpc202.o obj-$(CONFIG_TIFM_CORE) += tifm_core.o obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o obj-$(CONFIG_PHANTOM) += phantom.o diff --git a/drivers/misc/ti_fpc202.c b/drivers/misc/ti_fpc202.c new file mode 100644 index 000000000000..f7cde245ac95 --- /dev/null +++ b/drivers/misc/ti_fpc202.c @@ -0,0 +1,438 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ti_fpc202.c - FPC202 Dual Port Controller driver + * + * Copyright (C) 2024 Bootlin + * + */ + +#include <linux/cleanup.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/i2c-atr.h> +#include <linux/gpio/consumer.h> +#include <linux/gpio/driver.h> +#include <linux/module.h> + +#define FPC202_NUM_PORTS 2 +#define FPC202_ALIASES_PER_PORT 2 + +/* + * GPIO: port mapping + * + * 0: P0_S0_IN_A + * 1: P0_S1_IN_A + * 2: P1_S0_IN_A + * 3: P1_S1_IN_A + * 4: P0_S0_IN_B + * ... + * 8: P0_S0_IN_C + * ... + * 12: P0_S0_OUT_A + * ... + * 16: P0_S0_OUT_B + * ... + * 19: P1_S1_OUT_B + * + */ + +#define FPC202_GPIO_COUNT 20 +#define FPC202_GPIO_P0_S0_IN_B 4 +#define FPC202_GPIO_P0_S0_OUT_A 12 + +#define FPC202_REG_IN_A_INT 0x6 +#define FPC202_REG_IN_C_IN_B 0x7 +#define FPC202_REG_OUT_A_OUT_B 0x8 + +#define FPC202_REG_OUT_A_OUT_B_VAL 0xa + +#define FPC202_REG_MOD_DEV(port, dev) (0xb4 + ((port) * 4) + (dev)) +#define FPC202_REG_AUX_DEV(port, dev) (0xb6 + ((port) * 4) + (dev)) + +/* + * The FPC202 doesn't support turning off address translation on a single port. + * So just set an invalid I2C address as the translation target when no client + * address is attached. + */ +#define FPC202_REG_DEV_INVALID 0 + +/* Even aliases are assigned to device 0 and odd aliases to device 1 */ +#define fpc202_dev_num_from_alias(alias) ((alias) % 2) + +struct fpc202_priv { + struct i2c_client *client; + struct i2c_atr *atr; + struct gpio_desc *en_gpio; + struct gpio_chip gpio; + + /* Lock REG_MOD/AUX_DEV and addr_caches during attach/detach */ + struct mutex reg_dev_lock; + + /* Cached device addresses for both ports and their devices */ + u8 addr_caches[2][2]; + + /* Keep track of which ports were probed */ + DECLARE_BITMAP(probed_ports, FPC202_NUM_PORTS); +}; + +static void fpc202_fill_alias_table(struct i2c_client *client, u16 *aliases, int port_id) +{ + u16 first_alias; + int i; + + /* + * There is a predefined list of aliases for each FPC202 I2C + * self-address. This allows daisy-chained FPC202 units to + * automatically take on different sets of aliases. + * Each port of an FPC202 unit is assigned two aliases from this list. + */ + first_alias = 0x10 + 4 * port_id + 8 * ((u16)client->addr - 2); + + for (i = 0; i < FPC202_ALIASES_PER_PORT; i++) + aliases[i] = first_alias + i; +} + +static int fpc202_gpio_get_dir(int offset) +{ + return offset < FPC202_GPIO_P0_S0_OUT_A ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT; +} + +static int fpc202_read(struct fpc202_priv *priv, u8 reg) +{ + int val; + + val = i2c_smbus_read_byte_data(priv->client, reg); + return val; +} + +static int fpc202_write(struct fpc202_priv *priv, u8 reg, u8 value) +{ + return i2c_smbus_write_byte_data(priv->client, reg, value); +} + +static void fpc202_set_enable(struct fpc202_priv *priv, int enable) +{ + if (!priv->en_gpio) + return; + + gpiod_set_value(priv->en_gpio, enable); +} + +static void fpc202_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) +{ + struct fpc202_priv *priv = gpiochip_get_data(chip); + int ret; + u8 val; + + if (fpc202_gpio_get_dir(offset) == GPIO_LINE_DIRECTION_IN) + return; + + ret = fpc202_read(priv, FPC202_REG_OUT_A_OUT_B_VAL); + if (ret < 0) { + dev_err(&priv->client->dev, "Failed to set GPIO %d value! err %d\n", offset, ret); + return; + } + + val = (u8)ret; + + if (value) + val |= BIT(offset - FPC202_GPIO_P0_S0_OUT_A); + else + val &= ~BIT(offset - FPC202_GPIO_P0_S0_OUT_A); + + fpc202_write(priv, FPC202_REG_OUT_A_OUT_B_VAL, val); +} + +static int fpc202_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct fpc202_priv *priv = gpiochip_get_data(chip); + u8 reg, bit; + int ret; + + if (offset < FPC202_GPIO_P0_S0_IN_B) { + reg = FPC202_REG_IN_A_INT; + bit = BIT(4 + offset); + } else if (offset < FPC202_GPIO_P0_S0_OUT_A) { + reg = FPC202_REG_IN_C_IN_B; + bit = BIT(offset - FPC202_GPIO_P0_S0_IN_B); + } else { + reg = FPC202_REG_OUT_A_OUT_B_VAL; + bit = BIT(offset - FPC202_GPIO_P0_S0_OUT_A); + } + + ret = fpc202_read(priv, reg); + if (ret < 0) + return ret; + + return !!(((u8)ret) & bit); +} + +static int fpc202_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) +{ + if (fpc202_gpio_get_dir(offset) == GPIO_LINE_DIRECTION_OUT) + return -EINVAL; + + return 0; +} + +static int fpc202_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, + int value) +{ + struct fpc202_priv *priv = gpiochip_get_data(chip); + int ret; + u8 val; + + if (fpc202_gpio_get_dir(offset) == GPIO_LINE_DIRECTION_IN) + return -EINVAL; + + fpc202_gpio_set(chip, offset, value); + + ret = fpc202_read(priv, FPC202_REG_OUT_A_OUT_B); + if (ret < 0) + return ret; + + val = (u8)ret | BIT(offset - FPC202_GPIO_P0_S0_OUT_A); + + return fpc202_write(priv, FPC202_REG_OUT_A_OUT_B, val); +} + +/* + * Set the translation table entry associated with a port and device number. + * + * Each downstream port of the FPC202 has two fixed aliases corresponding to + * device numbers 0 and 1. If one of these aliases is found in an incoming I2C + * transfer, it will be translated to the address given by the corresponding + * translation table entry. + */ +static int fpc202_write_dev_addr(struct fpc202_priv *priv, u32 port_id, int dev_num, u16 addr) +{ + int ret, reg_mod, reg_aux; + u8 val; + + guard(mutex)(&priv->reg_dev_lock); + + reg_mod = FPC202_REG_MOD_DEV(port_id, dev_num); + reg_aux = FPC202_REG_AUX_DEV(port_id, dev_num); + val = addr & 0x7f; + + ret = fpc202_write(priv, reg_mod, val); + if (ret) + return ret; + + /* + * The FPC202 datasheet is unclear about the role of the AUX registers. + * Empirically, writing to them as well seems to be necessary for + * address translation to function properly. + */ + ret = fpc202_write(priv, reg_aux, val); + + priv->addr_caches[port_id][dev_num] = val; + + return ret; +} + +static int fpc202_attach_addr(struct i2c_atr *atr, u32 chan_id, + u16 addr, u16 alias) +{ + struct fpc202_priv *priv = i2c_atr_get_driver_data(atr); + + dev_dbg(&priv->client->dev, "attaching address 0x%02x to alias 0x%02x\n", addr, alias); + + return fpc202_write_dev_addr(priv, chan_id, fpc202_dev_num_from_alias(alias), addr); +} + +static void fpc202_detach_addr(struct i2c_atr *atr, u32 chan_id, + u16 addr) +{ + struct fpc202_priv *priv = i2c_atr_get_driver_data(atr); + int dev_num, reg_mod, val; + + for (dev_num = 0; dev_num < 2; dev_num++) { + reg_mod = FPC202_REG_MOD_DEV(chan_id, dev_num); + + mutex_lock(&priv->reg_dev_lock); + + val = priv->addr_caches[chan_id][dev_num]; + + mutex_unlock(&priv->reg_dev_lock); + + if (val < 0) { + dev_err(&priv->client->dev, "failed to read register 0x%x while detaching address 0x%02x\n", + reg_mod, addr); + return; + } + + if (val == (addr & 0x7f)) { + fpc202_write_dev_addr(priv, chan_id, dev_num, FPC202_REG_DEV_INVALID); + return; + } + } +} + +static const struct i2c_atr_ops fpc202_atr_ops = { + .attach_addr = fpc202_attach_addr, + .detach_addr = fpc202_detach_addr, +}; + +static int fpc202_probe_port(struct fpc202_priv *priv, struct device_node *i2c_handle, int port_id) +{ + u16 aliases[FPC202_ALIASES_PER_PORT] = { }; + struct device *dev = &priv->client->dev; + struct i2c_atr_adap_desc desc = { }; + int ret = 0; + + desc.chan_id = port_id; + desc.parent = dev; + desc.bus_handle = of_node_to_fwnode(i2c_handle); + desc.num_aliases = FPC202_ALIASES_PER_PORT; + + fpc202_fill_alias_table(priv->client, aliases, port_id); + desc.aliases = aliases; + + ret = i2c_atr_add_adapter(priv->atr, &desc); + if (ret) + return ret; + + set_bit(port_id, priv->probed_ports); + + ret = fpc202_write_dev_addr(priv, port_id, 0, FPC202_REG_DEV_INVALID); + if (ret) + return ret; + + return fpc202_write_dev_addr(priv, port_id, 1, FPC202_REG_DEV_INVALID); +} + +static void fpc202_remove_port(struct fpc202_priv *priv, int port_id) +{ + i2c_atr_del_adapter(priv->atr, port_id); + clear_bit(port_id, priv->probed_ports); +} + +static int fpc202_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct device_node *i2c_handle; + struct fpc202_priv *priv; + int ret, port_id; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + mutex_init(&priv->reg_dev_lock); + + priv->client = client; + i2c_set_clientdata(client, priv); + + priv->en_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_HIGH); + if (IS_ERR(priv->en_gpio)) { + ret = PTR_ERR(priv->en_gpio); + dev_err(dev, "failed to fetch enable GPIO! err %d\n", ret); + goto destroy_mutex; + } + + priv->gpio.label = "gpio-fpc202"; + priv->gpio.base = -1; + priv->gpio.direction_input = fpc202_gpio_direction_input; + priv->gpio.direction_output = fpc202_gpio_direction_output; + priv->gpio.set = fpc202_gpio_set; + priv->gpio.get = fpc202_gpio_get; + priv->gpio.ngpio = FPC202_GPIO_COUNT; + priv->gpio.parent = dev; + priv->gpio.owner = THIS_MODULE; + + ret = gpiochip_add_data(&priv->gpio, priv); + if (ret) { + priv->gpio.parent = NULL; + dev_err(dev, "failed to add gpiochip err %d\n", ret); + goto disable_gpio; + } + + priv->atr = i2c_atr_new(client->adapter, dev, &fpc202_atr_ops, 2, 0); + if (IS_ERR(priv->atr)) { + ret = PTR_ERR(priv->atr); + dev_err(dev, "failed to create i2c atr err %d\n", ret); + goto disable_gpio; + } + + i2c_atr_set_driver_data(priv->atr, priv); + + bitmap_zero(priv->probed_ports, FPC202_NUM_PORTS); + + for_each_child_of_node(dev->of_node, i2c_handle) { + ret = of_property_read_u32(i2c_handle, "reg", &port_id); + if (ret) { + if (ret == -EINVAL) + continue; + + dev_err(dev, "failed to read 'reg' property of child node, err %d\n", ret); + goto unregister_chans; + } + + if (port_id > FPC202_NUM_PORTS) { + dev_err(dev, "port ID %d is out of range!\n", port_id); + ret = -EINVAL; + goto unregister_chans; + } + + ret = fpc202_probe_port(priv, i2c_handle, port_id); + if (ret) { + dev_err(dev, "Failed to probe port %d, err %d\n", port_id, ret); + goto unregister_chans; + } + } + + goto out; + +unregister_chans: + for_each_set_bit(port_id, priv->probed_ports, FPC202_NUM_PORTS) + fpc202_remove_port(priv, port_id); + + i2c_atr_delete(priv->atr); +disable_gpio: + fpc202_set_enable(priv, 0); + gpiochip_remove(&priv->gpio); +destroy_mutex: + mutex_destroy(&priv->reg_dev_lock); +out: + return ret; +} + +static void fpc202_remove(struct i2c_client *client) +{ + struct fpc202_priv *priv = i2c_get_clientdata(client); + int port_id; + + for_each_set_bit(port_id, priv->probed_ports, FPC202_NUM_PORTS) + fpc202_remove_port(priv, port_id); + + mutex_destroy(&priv->reg_dev_lock); + + i2c_atr_delete(priv->atr); + + fpc202_set_enable(priv, 0); + gpiochip_remove(&priv->gpio); +} + +static const struct of_device_id fpc202_of_match[] = { + { .compatible = "ti,fpc202" }, + {} +}; +MODULE_DEVICE_TABLE(of, fpc202_of_match); + +static struct i2c_driver fpc202_driver = { + .driver = { + .name = "fpc202", + .of_match_table = fpc202_of_match, + }, + .probe = fpc202_probe, + .remove = fpc202_remove, +}; + +module_i2c_driver(fpc202_driver); + +MODULE_AUTHOR("Romain Gantois <romain.gantois@bootlin.com>"); +MODULE_DESCRIPTION("TI FPC202 Dual Port Controller driver"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS("I2C_ATR"); diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c index bdc2e6fda782..42e7d2a2a90c 100644 --- a/drivers/misc/uacce/uacce.c +++ b/drivers/misc/uacce/uacce.c @@ -465,40 +465,6 @@ static void uacce_release(struct device *dev) kfree(uacce); } -static unsigned int uacce_enable_sva(struct device *parent, unsigned int flags) -{ - int ret; - - if (!(flags & UACCE_DEV_SVA)) - return flags; - - flags &= ~UACCE_DEV_SVA; - - ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_IOPF); - if (ret) { - dev_err(parent, "failed to enable IOPF feature! ret = %pe\n", ERR_PTR(ret)); - return flags; - } - - ret = iommu_dev_enable_feature(parent, IOMMU_DEV_FEAT_SVA); - if (ret) { - dev_err(parent, "failed to enable SVA feature! ret = %pe\n", ERR_PTR(ret)); - iommu_dev_disable_feature(parent, IOMMU_DEV_FEAT_IOPF); - return flags; - } - - return flags | UACCE_DEV_SVA; -} - -static void uacce_disable_sva(struct uacce_device *uacce) -{ - if (!(uacce->flags & UACCE_DEV_SVA)) - return; - - iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_SVA); - iommu_dev_disable_feature(uacce->parent, IOMMU_DEV_FEAT_IOPF); -} - /** * uacce_alloc() - alloc an accelerator * @parent: pointer of uacce parent device @@ -518,8 +484,6 @@ struct uacce_device *uacce_alloc(struct device *parent, if (!uacce) return ERR_PTR(-ENOMEM); - flags = uacce_enable_sva(parent, flags); - uacce->parent = parent; uacce->flags = flags; uacce->ops = interface->ops; @@ -542,7 +506,6 @@ struct uacce_device *uacce_alloc(struct device *parent, return uacce; err_with_uacce: - uacce_disable_sva(uacce); kfree(uacce); return ERR_PTR(ret); } @@ -605,9 +568,6 @@ void uacce_remove(struct uacce_device *uacce) unmap_mapping_range(q->mapping, 0, 0, 1); } - /* disable sva now since no opened queues */ - uacce_disable_sva(uacce); - if (uacce->cdev) cdev_device_del(uacce->cdev, &uacce->dev); xa_erase(&uacce_xa, uacce->dev_id); diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 57bd49eea777..66c0d1ba2a33 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -1882,6 +1882,11 @@ static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host, if (IS_ERR_OR_NULL(ice)) return PTR_ERR_OR_ZERO(ice); + if (qcom_ice_get_supported_key_type(ice) != BLK_CRYPTO_KEY_TYPE_RAW) { + dev_warn(dev, "Wrapped keys not supported. Disabling inline encryption support.\n"); + return 0; + } + msm_host->ice = ice; /* Initialize the blk_crypto_profile */ @@ -1962,16 +1967,7 @@ static int sdhci_msm_ice_keyslot_program(struct blk_crypto_profile *profile, struct sdhci_msm_host *msm_host = sdhci_msm_host_from_crypto_profile(profile); - /* Only AES-256-XTS has been tested so far. */ - if (key->crypto_cfg.crypto_mode != BLK_ENCRYPTION_MODE_AES_256_XTS) - return -EOPNOTSUPP; - - return qcom_ice_program_key(msm_host->ice, - QCOM_ICE_CRYPTO_ALG_AES_XTS, - QCOM_ICE_CRYPTO_KEY_SIZE_256, - key->bytes, - key->crypto_cfg.data_unit_size / 512, - slot); + return qcom_ice_program_key(msm_host->ice, slot, key); } static int sdhci_msm_ice_keyslot_evict(struct blk_crypto_profile *profile, diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 4ffaf7588885..3504507477c6 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -391,6 +391,7 @@ static void mana_gd_process_eqe(struct gdma_queue *eq) case GDMA_EQE_HWC_INIT_EQ_ID_DB: case GDMA_EQE_HWC_INIT_DATA: case GDMA_EQE_HWC_INIT_DONE: + case GDMA_EQE_HWC_SOC_SERVICE: case GDMA_EQE_RNIC_QP_FATAL: if (!eq->eq.callback) break; @@ -964,6 +965,7 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev) err, resp.hdr.status); return err ? err : -EPROTO; } + gc->pf_cap_flags1 = resp.pf_cap_flags1; if (resp.pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) { err = mana_gd_query_hwc_timeout(pdev, &hwc->hwc_timeout); if (err) { @@ -1004,7 +1006,6 @@ int mana_gd_register_device(struct gdma_dev *gd) return 0; } -EXPORT_SYMBOL_NS(mana_gd_register_device, "NET_MANA"); int mana_gd_deregister_device(struct gdma_dev *gd) { @@ -1035,7 +1036,6 @@ int mana_gd_deregister_device(struct gdma_dev *gd) return err; } -EXPORT_SYMBOL_NS(mana_gd_deregister_device, "NET_MANA"); u32 mana_gd_wq_avail_space(struct gdma_queue *wq) { @@ -1469,10 +1469,14 @@ static int mana_gd_setup(struct pci_dev *pdev) mana_gd_init_registers(pdev); mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base); + gc->service_wq = alloc_ordered_workqueue("gdma_service_wq", 0); + if (!gc->service_wq) + return -ENOMEM; + err = mana_gd_setup_irqs(pdev); if (err) { dev_err(gc->dev, "Failed to setup IRQs: %d\n", err); - return err; + goto free_workqueue; } err = mana_hwc_create_channel(gc); @@ -1498,6 +1502,8 @@ destroy_hwc: mana_hwc_destroy_channel(gc); remove_irq: mana_gd_remove_irqs(pdev); +free_workqueue: + destroy_workqueue(gc->service_wq); dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err); return err; } @@ -1509,6 +1515,8 @@ static void mana_gd_cleanup(struct pci_dev *pdev) mana_hwc_destroy_channel(gc); mana_gd_remove_irqs(pdev); + + destroy_workqueue(gc->service_wq); dev_dbg(&pdev->dev, "mana gdma cleanup successful\n"); } @@ -1578,8 +1586,14 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto cleanup_gd; + err = mana_rdma_probe(&gc->mana_ib); + if (err) + goto cleanup_mana; + return 0; +cleanup_mana: + mana_remove(&gc->mana, false); cleanup_gd: mana_gd_cleanup(pdev); unmap_bar: @@ -1607,6 +1621,7 @@ static void mana_gd_remove(struct pci_dev *pdev) { struct gdma_context *gc = pci_get_drvdata(pdev); + mana_rdma_remove(&gc->mana_ib); mana_remove(&gc->mana, false); mana_gd_cleanup(pdev); @@ -1630,6 +1645,7 @@ static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state) { struct gdma_context *gc = pci_get_drvdata(pdev); + mana_rdma_remove(&gc->mana_ib); mana_remove(&gc->mana, true); mana_gd_cleanup(pdev); @@ -1654,6 +1670,10 @@ static int mana_gd_resume(struct pci_dev *pdev) if (err) return err; + err = mana_rdma_probe(&gc->mana_ib); + if (err) + return err; + return 0; } @@ -1664,6 +1684,7 @@ static void mana_gd_shutdown(struct pci_dev *pdev) dev_info(&pdev->dev, "Shutdown was called\n"); + mana_rdma_remove(&gc->mana_ib); mana_remove(&gc->mana, true); mana_gd_cleanup(pdev); diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index 1ba49602089b..a8c4d8db75a5 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -112,11 +112,13 @@ out: static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self, struct gdma_event *event) { + union hwc_init_soc_service_type service_data; struct hw_channel_context *hwc = ctx; struct gdma_dev *gd = hwc->gdma_dev; union hwc_init_type_data type_data; union hwc_init_eq_id_db eq_db; u32 type, val; + int ret; switch (event->type) { case GDMA_EQE_HWC_INIT_EQ_ID_DB: @@ -199,7 +201,24 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self, } break; + case GDMA_EQE_HWC_SOC_SERVICE: + service_data.as_uint32 = event->details[0]; + type = service_data.type; + switch (type) { + case GDMA_SERVICE_TYPE_RDMA_SUSPEND: + case GDMA_SERVICE_TYPE_RDMA_RESUME: + ret = mana_rdma_service_event(gd->gdma_context, type); + if (ret) + dev_err(hwc->dev, "Failed to schedule adev service event: %d\n", + ret); + break; + default: + dev_warn(hwc->dev, "Received unknown SOC service type %u\n", type); + break; + } + + break; default: dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type); /* Ignore unknown events, which should never happen. */ diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 9c58d9e0bbb5..ccd2885c939e 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -2950,7 +2950,7 @@ static void remove_adev(struct gdma_dev *gd) gd->adev = NULL; } -static int add_adev(struct gdma_dev *gd) +static int add_adev(struct gdma_dev *gd, const char *name) { struct auxiliary_device *adev; struct mana_adev *madev; @@ -2966,7 +2966,7 @@ static int add_adev(struct gdma_dev *gd) goto idx_fail; adev->id = ret; - adev->name = "rdma"; + adev->name = name; adev->dev.parent = gd->gdma_context->dev; adev->dev.release = adev_release; madev->mdev = gd; @@ -2998,6 +2998,70 @@ idx_fail: return ret; } +static void mana_rdma_service_handle(struct work_struct *work) +{ + struct mana_service_work *serv_work = + container_of(work, struct mana_service_work, work); + struct gdma_dev *gd = serv_work->gdma_dev; + struct device *dev = gd->gdma_context->dev; + int ret; + + if (READ_ONCE(gd->rdma_teardown)) + goto out; + + switch (serv_work->event) { + case GDMA_SERVICE_TYPE_RDMA_SUSPEND: + if (!gd->adev || gd->is_suspended) + break; + + remove_adev(gd); + gd->is_suspended = true; + break; + + case GDMA_SERVICE_TYPE_RDMA_RESUME: + if (!gd->is_suspended) + break; + + ret = add_adev(gd, "rdma"); + if (ret) + dev_err(dev, "Failed to add adev on resume: %d\n", ret); + else + gd->is_suspended = false; + break; + + default: + dev_warn(dev, "unknown adev service event %u\n", + serv_work->event); + break; + } + +out: + kfree(serv_work); +} + +int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event) +{ + struct gdma_dev *gd = &gc->mana_ib; + struct mana_service_work *serv_work; + + if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) { + /* RDMA device is not detected on pci */ + return 0; + } + + serv_work = kzalloc(sizeof(*serv_work), GFP_ATOMIC); + if (!serv_work) + return -ENOMEM; + + serv_work->event = event; + serv_work->gdma_dev = gd; + + INIT_WORK(&serv_work->work, mana_rdma_service_handle); + queue_work(gc->service_wq, &serv_work->work); + + return 0; +} + int mana_probe(struct gdma_dev *gd, bool resuming) { struct gdma_context *gc = gd->gdma_context; @@ -3085,7 +3149,7 @@ int mana_probe(struct gdma_dev *gd, bool resuming) } } - err = add_adev(gd); + err = add_adev(gd, "eth"); out: if (err) { mana_remove(gd, false); @@ -3159,6 +3223,44 @@ out: dev_dbg(dev, "%s succeeded\n", __func__); } +int mana_rdma_probe(struct gdma_dev *gd) +{ + int err = 0; + + if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) { + /* RDMA device is not detected on pci */ + return err; + } + + err = mana_gd_register_device(gd); + if (err) + return err; + + err = add_adev(gd, "rdma"); + if (err) + mana_gd_deregister_device(gd); + + return err; +} + +void mana_rdma_remove(struct gdma_dev *gd) +{ + struct gdma_context *gc = gd->gdma_context; + + if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) { + /* RDMA device is not detected on pci */ + return; + } + + WRITE_ONCE(gd->rdma_teardown, true); + flush_workqueue(gc->service_wq); + + if (gd->adev) + remove_adev(gd); + + mana_gd_deregister_device(gd); +} + struct net_device *mana_get_primary_netdev(struct mana_context *ac, u32 port_index, netdevice_tracker *tracker) diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index 4c253b433bf7..4904097dfd49 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig @@ -3,7 +3,7 @@ config NVME_TARGET tristate "NVMe Target support" depends on BLOCK - depends on CONFIGFS_FS + select CONFIGFS_FS select NVME_KEYRING if NVME_TARGET_TCP_TLS select KEYS if NVME_TARGET_TCP_TLS select SGL_ALLOC diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index aedd0e2dcd89..0edd639898a6 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -25,6 +25,7 @@ #include <linux/serial_core.h> #include <linux/sysfs.h> #include <linux/random.h> +#include <linux/kexec_handover.h> #include <asm/setup.h> /* for COMMAND_LINE_SIZE */ #include <asm/page.h> @@ -875,6 +876,36 @@ void __init early_init_dt_check_for_usable_mem_range(void) memblock_add(rgn[i].base, rgn[i].size); } +/** + * early_init_dt_check_kho - Decode info required for kexec handover from DT + */ +static void __init early_init_dt_check_kho(void) +{ + unsigned long node = chosen_node_offset; + u64 fdt_start, fdt_size, scratch_start, scratch_size; + const __be32 *p; + int l; + + if (!IS_ENABLED(CONFIG_KEXEC_HANDOVER) || (long)node < 0) + return; + + p = of_get_flat_dt_prop(node, "linux,kho-fdt", &l); + if (l != (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32)) + return; + + fdt_start = dt_mem_next_cell(dt_root_addr_cells, &p); + fdt_size = dt_mem_next_cell(dt_root_addr_cells, &p); + + p = of_get_flat_dt_prop(node, "linux,kho-scratch", &l); + if (l != (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32)) + return; + + scratch_start = dt_mem_next_cell(dt_root_addr_cells, &p); + scratch_size = dt_mem_next_cell(dt_root_addr_cells, &p); + + kho_populate(fdt_start, fdt_size, scratch_start, scratch_size); +} + #ifdef CONFIG_SERIAL_EARLYCON int __init early_init_dt_scan_chosen_stdout(void) @@ -1169,6 +1200,9 @@ void __init early_init_dt_scan_nodes(void) /* Handle linux,usable-memory-range property */ early_init_dt_check_for_usable_mem_range(); + + /* Handle kexec handover */ + early_init_dt_check_kho(); } bool __init early_init_dt_scan(void *dt_virt, phys_addr_t dt_phys) diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c index 5b924597a4de..1ee2d31816ae 100644 --- a/drivers/of/kexec.c +++ b/drivers/of/kexec.c @@ -264,6 +264,43 @@ static inline int setup_ima_buffer(const struct kimage *image, void *fdt, } #endif /* CONFIG_IMA_KEXEC */ +static int kho_add_chosen(const struct kimage *image, void *fdt, int chosen_node) +{ + int ret = 0; +#ifdef CONFIG_KEXEC_HANDOVER + phys_addr_t fdt_mem = 0; + phys_addr_t fdt_len = 0; + phys_addr_t scratch_mem = 0; + phys_addr_t scratch_len = 0; + + ret = fdt_delprop(fdt, chosen_node, "linux,kho-fdt"); + if (ret && ret != -FDT_ERR_NOTFOUND) + return ret; + ret = fdt_delprop(fdt, chosen_node, "linux,kho-scratch"); + if (ret && ret != -FDT_ERR_NOTFOUND) + return ret; + + if (!image->kho.fdt || !image->kho.scratch) + return 0; + + fdt_mem = image->kho.fdt; + fdt_len = PAGE_SIZE; + scratch_mem = image->kho.scratch->mem; + scratch_len = image->kho.scratch->bufsz; + + pr_debug("Adding kho metadata to DT"); + + ret = fdt_appendprop_addrrange(fdt, 0, chosen_node, "linux,kho-fdt", + fdt_mem, fdt_len); + if (ret) + return ret; + ret = fdt_appendprop_addrrange(fdt, 0, chosen_node, "linux,kho-scratch", + scratch_mem, scratch_len); + +#endif /* CONFIG_KEXEC_HANDOVER */ + return ret; +} + /* * of_kexec_alloc_and_setup_fdt - Alloc and setup a new Flattened Device Tree * @@ -414,6 +451,11 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image, #endif } + /* Add kho metadata if this is a KHO image */ + ret = kho_add_chosen(image, fdt, chosen_node); + if (ret) + goto out; + /* add bootargs */ if (cmdline) { ret = fdt_setprop_string(fdt, chosen_node, "bootargs", cmdline); diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 464cc9aca157..33db9104df17 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -103,12 +103,10 @@ config PINCTRL_AS3722 config PINCTRL_AT91 bool "AT91 pinctrl driver" - depends on OF - depends on ARCH_AT91 + depends on (OF && ARCH_AT91) || COMPILE_TEST select PINMUX select PINCONF select GPIOLIB - select OF_GPIO select GPIOLIB_IRQCHIP help Say Y here to enable the at91 pinctrl driver diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c index d49b77dcfcff..86f3d5c69e36 100644 --- a/drivers/pinctrl/actions/pinctrl-owl.c +++ b/drivers/pinctrl/actions/pinctrl-owl.c @@ -598,7 +598,7 @@ static int owl_gpio_get(struct gpio_chip *chip, unsigned int offset) return !!(val & BIT(offset)); } -static void owl_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) +static int owl_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { struct owl_pinctrl *pctrl = gpiochip_get_data(chip); const struct owl_gpio_port *port; @@ -607,13 +607,15 @@ static void owl_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) port = owl_gpio_get_port(pctrl, &offset); if (WARN_ON(port == NULL)) - return; + return -ENODEV; gpio_base = pctrl->base + port->offset; raw_spin_lock_irqsave(&pctrl->lock, flags); owl_gpio_update_reg(gpio_base + port->dat, offset, value); raw_spin_unlock_irqrestore(&pctrl->lock, flags); + + return 0; } static int owl_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) @@ -960,7 +962,7 @@ int owl_pinctrl_probe(struct platform_device *pdev, pctrl->chip.direction_input = owl_gpio_direction_input; pctrl->chip.direction_output = owl_gpio_direction_output; pctrl->chip.get = owl_gpio_get; - pctrl->chip.set = owl_gpio_set; + pctrl->chip.set_rv = owl_gpio_set; pctrl->chip.request = owl_gpio_request; pctrl->chip.free = owl_gpio_free; diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index eaeec096bc9a..826827800474 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -356,11 +356,14 @@ static int bcm2835_gpio_get_direction(struct gpio_chip *chip, unsigned int offse return GPIO_LINE_DIRECTION_IN; } -static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +static int bcm2835_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct bcm2835_pinctrl *pc = gpiochip_get_data(chip); bcm2835_gpio_set_bit(pc, value ? GPSET0 : GPCLR0, offset); + + return 0; } static int bcm2835_gpio_direction_output(struct gpio_chip *chip, @@ -394,7 +397,7 @@ static const struct gpio_chip bcm2835_gpio_chip = { .direction_output = bcm2835_gpio_direction_output, .get_direction = bcm2835_gpio_get_direction, .get = bcm2835_gpio_get, - .set = bcm2835_gpio_set, + .set_rv = bcm2835_gpio_set, .set_config = gpiochip_generic_config, .base = -1, .ngpio = BCM2835_NUM_GPIOS, @@ -411,7 +414,7 @@ static const struct gpio_chip bcm2711_gpio_chip = { .direction_output = bcm2835_gpio_direction_output, .get_direction = bcm2835_gpio_get_direction, .get = bcm2835_gpio_get, - .set = bcm2835_gpio_set, + .set_rv = bcm2835_gpio_set, .set_config = gpiochip_generic_config, .base = -1, .ngpio = BCM2711_NUM_GPIOS, diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c index c9a3d3aa8c10..1d08b8d4cdd7 100644 --- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c +++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c @@ -390,7 +390,7 @@ static int iproc_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio) return GPIO_LINE_DIRECTION_IN; } -static void iproc_gpio_set(struct gpio_chip *gc, unsigned gpio, int val) +static int iproc_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct iproc_gpio *chip = gpiochip_get_data(gc); unsigned long flags; @@ -400,6 +400,8 @@ static void iproc_gpio_set(struct gpio_chip *gc, unsigned gpio, int val) raw_spin_unlock_irqrestore(&chip->lock, flags); dev_dbg(chip->dev, "gpio:%u set, value:%d\n", gpio, val); + + return 0; } static int iproc_gpio_get(struct gpio_chip *gc, unsigned gpio) @@ -863,7 +865,7 @@ static int iproc_gpio_probe(struct platform_device *pdev) gc->direction_input = iproc_gpio_direction_input; gc->direction_output = iproc_gpio_direction_output; gc->get_direction = iproc_gpio_get_direction; - gc->set = iproc_gpio_set; + gc->set_rv = iproc_gpio_set; gc->get = iproc_gpio_get; chip->pinmux_is_supported = of_property_read_bool(dev->of_node, diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c index a96be8f244e0..b08f8480ddc6 100644 --- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c +++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c @@ -310,7 +310,7 @@ static int nsp_gpio_get_direction(struct gpio_chip *gc, unsigned gpio) return !val; } -static void nsp_gpio_set(struct gpio_chip *gc, unsigned gpio, int val) +static int nsp_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct nsp_gpio *chip = gpiochip_get_data(gc); unsigned long flags; @@ -320,6 +320,8 @@ static void nsp_gpio_set(struct gpio_chip *gc, unsigned gpio, int val) raw_spin_unlock_irqrestore(&chip->lock, flags); dev_dbg(chip->dev, "gpio:%u set, value:%d\n", gpio, val); + + return 0; } static int nsp_gpio_get(struct gpio_chip *gc, unsigned gpio) @@ -654,7 +656,7 @@ static int nsp_gpio_probe(struct platform_device *pdev) gc->direction_input = nsp_gpio_direction_input; gc->direction_output = nsp_gpio_direction_output; gc->get_direction = nsp_gpio_get_direction; - gc->set = nsp_gpio_set; + gc->set_rv = nsp_gpio_set; gc->get = nsp_gpio_get; /* optional GPIO interrupt support */ diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 4bdbf6bb26e2..9046292d1360 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1530,6 +1530,35 @@ void pinctrl_unregister_mappings(const struct pinctrl_map *map) } EXPORT_SYMBOL_GPL(pinctrl_unregister_mappings); +static void devm_pinctrl_unregister_mappings(void *maps) +{ + pinctrl_unregister_mappings(maps); +} + +/** + * devm_pinctrl_register_mappings() - Resource managed pinctrl_register_mappings() + * @dev: device for which mappings are registered + * @maps: the pincontrol mappings table to register. Note the pinctrl-core + * keeps a reference to the passed in maps, so they should _not_ be + * marked with __initdata. + * @num_maps: the number of maps in the mapping table + * + * Returns: 0 on success, or negative errno on failure. + */ +int devm_pinctrl_register_mappings(struct device *dev, + const struct pinctrl_map *maps, + unsigned int num_maps) +{ + int ret; + + ret = pinctrl_register_mappings(maps, num_maps); + if (ret) + return ret; + + return devm_add_action_or_reset(dev, devm_pinctrl_unregister_mappings, (void *)maps); +} +EXPORT_SYMBOL_GPL(devm_pinctrl_register_mappings); + /** * pinctrl_force_sleep() - turn a given controller device into sleep state * @pctldev: pin controller device diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig index 4c420b21b804..8d24decd3f07 100644 --- a/drivers/pinctrl/freescale/Kconfig +++ b/drivers/pinctrl/freescale/Kconfig @@ -20,7 +20,9 @@ config PINCTRL_IMX_SCMI config PINCTRL_IMX_SCU tristate + depends on OF depends on IMX_SCU || COMPILE_TEST + default IMX_SCU select PINCTRL_IMX config PINCTRL_IMX1_CORE @@ -159,6 +161,7 @@ config PINCTRL_IMX8MM tristate "IMX8MM pinctrl driver" depends on OF depends on SOC_IMX8M || COMPILE_TEST + default SOC_IMX8M select PINCTRL_IMX help Say Y here to enable the imx8mm pinctrl driver @@ -167,6 +170,7 @@ config PINCTRL_IMX8MN tristate "IMX8MN pinctrl driver" depends on OF depends on SOC_IMX8M || COMPILE_TEST + default SOC_IMX8M select PINCTRL_IMX help Say Y here to enable the imx8mn pinctrl driver @@ -175,6 +179,7 @@ config PINCTRL_IMX8MP tristate "IMX8MP pinctrl driver" depends on OF depends on SOC_IMX8M || COMPILE_TEST + default SOC_IMX8M select PINCTRL_IMX help Say Y here to enable the imx8mp pinctrl driver @@ -183,6 +188,7 @@ config PINCTRL_IMX8MQ tristate "IMX8MQ pinctrl driver" depends on OF depends on SOC_IMX8M || COMPILE_TEST + default SOC_IMX8M select PINCTRL_IMX help Say Y here to enable the imx8mq pinctrl driver @@ -191,6 +197,7 @@ config PINCTRL_IMX8QM tristate "IMX8QM pinctrl driver" depends on OF depends on (IMX_SCU && ARCH_MXC && ARM64) || COMPILE_TEST + default ARCH_MXC select PINCTRL_IMX_SCU help Say Y here to enable the imx8qm pinctrl driver @@ -199,6 +206,7 @@ config PINCTRL_IMX8QXP tristate "IMX8QXP pinctrl driver" depends on OF depends on (IMX_SCU && ARCH_MXC && ARM64) || COMPILE_TEST + default ARCH_MXC select PINCTRL_IMX_SCU help Say Y here to enable the imx8qxp pinctrl driver @@ -207,6 +215,7 @@ config PINCTRL_IMX8DXL tristate "IMX8DXL pinctrl driver" depends on OF depends on (IMX_SCU && ARCH_MXC && ARM64) || COMPILE_TEST + default ARCH_MXC select PINCTRL_IMX_SCU help Say Y here to enable the imx8dxl pinctrl driver @@ -215,6 +224,7 @@ config PINCTRL_IMX8ULP tristate "IMX8ULP pinctrl driver" depends on OF depends on ARCH_MXC || COMPILE_TEST + default ARCH_MXC select PINCTRL_IMX help Say Y here to enable the imx8ulp pinctrl driver @@ -239,6 +249,7 @@ config PINCTRL_IMX93 tristate "IMX93 pinctrl driver" depends on OF depends on ARCH_MXC || COMPILE_TEST + default SOC_IMX9 select PINCTRL_IMX help Say Y here to enable the imx93 pinctrl driver diff --git a/drivers/pinctrl/freescale/pinctrl-imx-scmi.c b/drivers/pinctrl/freescale/pinctrl-imx-scmi.c index 8f15c4c4dc44..4e8ab919b334 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx-scmi.c +++ b/drivers/pinctrl/freescale/pinctrl-imx-scmi.c @@ -51,6 +51,7 @@ struct scmi_pinctrl_imx { #define IMX_SCMI_PIN_SIZE 24 #define IMX95_DAISY_OFF 0x408 +#define IMX94_DAISY_OFF 0x608 static int pinctrl_scmi_imx_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node *np, @@ -70,6 +71,8 @@ static int pinctrl_scmi_imx_dt_node_to_map(struct pinctrl_dev *pctldev, if (!daisy_off) { if (of_machine_is_compatible("fsl,imx95")) { daisy_off = IMX95_DAISY_OFF; + } else if (of_machine_is_compatible("fsl,imx94")) { + daisy_off = IMX94_DAISY_OFF; } else { dev_err(pctldev->dev, "platform not support scmi pinctrl\n"); return -EINVAL; @@ -289,6 +292,7 @@ scmi_pinctrl_imx_get_pins(struct scmi_pinctrl_imx *pmx, struct pinctrl_desc *des static const char * const scmi_pinctrl_imx_allowlist[] = { "fsl,imx95", + "fsl,imx94", NULL }; diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig index 58f32818a0e6..2d15af6be276 100644 --- a/drivers/pinctrl/mediatek/Kconfig +++ b/drivers/pinctrl/mediatek/Kconfig @@ -181,6 +181,16 @@ config PINCTRL_MT6797 default ARM64 && ARCH_MEDIATEK select PINCTRL_MTK_PARIS +config PINCTRL_MT6893 + bool "MediaTek Dimensity MT6893 pin control" + depends on OF + depends on ARM64 || COMPILE_TEST + default ARM64 && ARCH_MEDIATEK + select PINCTRL_MTK_PARIS + help + Say yes here to support pin controller and gpio driver + on the MediaTek Dimensity 1200 MT6893 Smartphone SoC. + config PINCTRL_MT7622 bool "MediaTek MT7622 pin control" depends on OF @@ -263,6 +273,18 @@ config PINCTRL_MT8195 default ARM64 && ARCH_MEDIATEK select PINCTRL_MTK_PARIS +config PINCTRL_MT8196 + bool "MediaTek MT8196 pin control" + depends on OF + depends on ARM64 || COMPILE_TEST + default ARM64 && ARCH_MEDIATEK + select PINCTRL_MTK_PARIS + help + Say yes here to support pin controller and gpio driver + on MediaTek MT8196 SoC. + In MTK platform, we support virtual gpio and use it to + map specific eint which doesn't have real gpio pin. + config PINCTRL_MT8365 bool "MediaTek MT8365 pin control" depends on OF diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile index 721ae83476d0..7518980fba59 100644 --- a/drivers/pinctrl/mediatek/Makefile +++ b/drivers/pinctrl/mediatek/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_PINCTRL_MT6765) += pinctrl-mt6765.o obj-$(CONFIG_PINCTRL_MT6779) += pinctrl-mt6779.o obj-$(CONFIG_PINCTRL_MT6795) += pinctrl-mt6795.o obj-$(CONFIG_PINCTRL_MT6797) += pinctrl-mt6797.o +obj-$(CONFIG_PINCTRL_MT6893) += pinctrl-mt6893.o obj-$(CONFIG_PINCTRL_MT7622) += pinctrl-mt7622.o obj-$(CONFIG_PINCTRL_MT7623) += pinctrl-mt7623.o obj-$(CONFIG_PINCTRL_MT7629) += pinctrl-mt7629.o @@ -36,6 +37,7 @@ obj-$(CONFIG_PINCTRL_MT8186) += pinctrl-mt8186.o obj-$(CONFIG_PINCTRL_MT8188) += pinctrl-mt8188.o obj-$(CONFIG_PINCTRL_MT8192) += pinctrl-mt8192.o obj-$(CONFIG_PINCTRL_MT8195) += pinctrl-mt8195.o +obj-$(CONFIG_PINCTRL_MT8196) += pinctrl-mt8196.o obj-$(CONFIG_PINCTRL_MT8365) += pinctrl-mt8365.o obj-$(CONFIG_PINCTRL_MT8516) += pinctrl-mt8516.o obj-$(CONFIG_PINCTRL_MT6397) += pinctrl-mt6397.o diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c index e20aaba0a33a..d906a5e4101f 100644 --- a/drivers/pinctrl/mediatek/mtk-eint.c +++ b/drivers/pinctrl/mediatek/mtk-eint.c @@ -22,7 +22,6 @@ #include <linux/platform_device.h> #include "mtk-eint.h" -#include "pinctrl-mtk-common-v2.h" #define MTK_EINT_EDGE_SENSITIVE 0 #define MTK_EINT_LEVEL_SENSITIVE 1 @@ -505,10 +504,9 @@ int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n) } EXPORT_SYMBOL_GPL(mtk_eint_find_irq); -int mtk_eint_do_init(struct mtk_eint *eint) +int mtk_eint_do_init(struct mtk_eint *eint, struct mtk_eint_pin *eint_pin) { - unsigned int size, i, port, inst = 0; - struct mtk_pinctrl *hw = (struct mtk_pinctrl *)eint->pctl; + unsigned int size, i, port, virq, inst = 0; /* If clients don't assign a specific regs, let's use generic one */ if (!eint->regs) @@ -519,7 +517,15 @@ int mtk_eint_do_init(struct mtk_eint *eint) if (!eint->base_pin_num) return -ENOMEM; - if (eint->nbase == 1) { + if (eint_pin) { + eint->pins = eint_pin; + for (i = 0; i < eint->hw->ap_num; i++) { + inst = eint->pins[i].instance; + if (inst >= eint->nbase) + continue; + eint->base_pin_num[inst]++; + } + } else { size = eint->hw->ap_num * sizeof(struct mtk_eint_pin); eint->pins = devm_kmalloc(eint->dev, size, GFP_KERNEL); if (!eint->pins) @@ -533,16 +539,6 @@ int mtk_eint_do_init(struct mtk_eint *eint) } } - if (hw && hw->soc && hw->soc->eint_pin) { - eint->pins = hw->soc->eint_pin; - for (i = 0; i < eint->hw->ap_num; i++) { - inst = eint->pins[i].instance; - if (inst >= eint->nbase) - continue; - eint->base_pin_num[inst]++; - } - } - eint->pin_list = devm_kmalloc(eint->dev, eint->nbase * sizeof(u16 *), GFP_KERNEL); if (!eint->pin_list) goto err_pin_list; @@ -583,7 +579,7 @@ int mtk_eint_do_init(struct mtk_eint *eint) if (inst >= eint->nbase) continue; eint->pin_list[inst][eint->pins[i].index] = i; - int virq = irq_create_mapping(eint->domain, i); + virq = irq_create_mapping(eint->domain, i); irq_set_chip_and_handler(virq, &mtk_eint_irq_chip, handle_level_irq); irq_set_chip_data(virq, eint); @@ -609,7 +605,7 @@ err_cur_mask: err_wake_mask: devm_kfree(eint->dev, eint->pin_list); err_pin_list: - if (eint->nbase == 1) + if (!eint_pin) devm_kfree(eint->dev, eint->pins); err_pins: devm_kfree(eint->dev, eint->base_pin_num); diff --git a/drivers/pinctrl/mediatek/mtk-eint.h b/drivers/pinctrl/mediatek/mtk-eint.h index f7f58cca0d5e..fc31a4c0c77b 100644 --- a/drivers/pinctrl/mediatek/mtk-eint.h +++ b/drivers/pinctrl/mediatek/mtk-eint.h @@ -66,7 +66,7 @@ struct mtk_eint_xt { struct mtk_eint { struct device *dev; void __iomem **base; - u8 nbase; + int nbase; u16 *base_pin_num; struct irq_domain *domain; int irq; @@ -88,7 +88,7 @@ struct mtk_eint { }; #if IS_ENABLED(CONFIG_EINT_MTK) -int mtk_eint_do_init(struct mtk_eint *eint); +int mtk_eint_do_init(struct mtk_eint *eint, struct mtk_eint_pin *eint_pin); int mtk_eint_do_suspend(struct mtk_eint *eint); int mtk_eint_do_resume(struct mtk_eint *eint); int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_n, @@ -96,7 +96,8 @@ int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_n, int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n); #else -static inline int mtk_eint_do_init(struct mtk_eint *eint) +static inline int mtk_eint_do_init(struct mtk_eint *eint, + struct mtk_eint_pin *eint_pin) { return -EOPNOTSUPP; } diff --git a/drivers/pinctrl/mediatek/pinctrl-airoha.c b/drivers/pinctrl/mediatek/pinctrl-airoha.c index 5d84a778683d..b97b28ebb37a 100644 --- a/drivers/pinctrl/mediatek/pinctrl-airoha.c +++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c @@ -2247,15 +2247,16 @@ static int airoha_convert_pin_to_reg_offset(struct pinctrl_dev *pctrl_dev, } /* gpio callbacks */ -static void airoha_gpio_set(struct gpio_chip *chip, unsigned int gpio, - int value) +static int airoha_gpio_set(struct gpio_chip *chip, unsigned int gpio, + int value) { struct airoha_pinctrl *pinctrl = gpiochip_get_data(chip); u32 offset = gpio % AIROHA_PIN_BANK_SIZE; u8 index = gpio / AIROHA_PIN_BANK_SIZE; - regmap_update_bits(pinctrl->regmap, pinctrl->gpiochip.data[index], - BIT(offset), value ? BIT(offset) : 0); + return regmap_update_bits(pinctrl->regmap, + pinctrl->gpiochip.data[index], + BIT(offset), value ? BIT(offset) : 0); } static int airoha_gpio_get(struct gpio_chip *chip, unsigned int gpio) @@ -2280,9 +2281,7 @@ static int airoha_gpio_direction_output(struct gpio_chip *chip, if (err) return err; - airoha_gpio_set(chip, gpio, value); - - return 0; + return airoha_gpio_set(chip, gpio, value); } /* irq callbacks */ @@ -2419,7 +2418,7 @@ static int airoha_pinctrl_add_gpiochip(struct airoha_pinctrl *pinctrl, gc->free = gpiochip_generic_free; gc->direction_input = pinctrl_gpio_direction_input; gc->direction_output = airoha_gpio_direction_output; - gc->set = airoha_gpio_set; + gc->set_rv = airoha_gpio_set; gc->get = airoha_gpio_get; gc->base = -1; gc->ngpio = AIROHA_NUM_PINS; @@ -2715,9 +2714,7 @@ static int airoha_pinconf_set_pin_value(struct pinctrl_dev *pctrl_dev, if (pin < 0) return pin; - airoha_gpio_set(&pinctrl->gpiochip.chip, pin, value); - - return 0; + return airoha_gpio_set(&pinctrl->gpiochip.chip, pin, value); } static int airoha_pinconf_set(struct pinctrl_dev *pctrl_dev, diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.c b/drivers/pinctrl/mediatek/pinctrl-moore.c index aad4891223d3..827d0f191031 100644 --- a/drivers/pinctrl/mediatek/pinctrl-moore.c +++ b/drivers/pinctrl/mediatek/pinctrl-moore.c @@ -496,24 +496,26 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio) return !!value; } -static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value) +static int mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value) { struct mtk_pinctrl *hw = gpiochip_get_data(chip); const struct mtk_pin_desc *desc; desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio]; - if (!desc->name) { - dev_err(hw->dev, "Failed to set gpio %d\n", gpio); - return; - } + if (!desc->name) + return -ENOTSUPP; - mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO, !!value); + return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO, !!value); } static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio, int value) { - mtk_gpio_set(chip, gpio, value); + int ret; + + ret = mtk_gpio_set(chip, gpio, value); + if (ret) + return ret; return pinctrl_gpio_direction_output(chip, gpio); } @@ -567,7 +569,7 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw) chip->direction_input = pinctrl_gpio_direction_input; chip->direction_output = mtk_gpio_direction_output; chip->get = mtk_gpio_get; - chip->set = mtk_gpio_set; + chip->set_rv = mtk_gpio_set; chip->to_irq = mtk_gpio_to_irq; chip->set_config = mtk_gpio_set_config; chip->base = -1; diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6893.c b/drivers/pinctrl/mediatek/pinctrl-mt6893.c new file mode 100644 index 000000000000..468ce0109b07 --- /dev/null +++ b/drivers/pinctrl/mediatek/pinctrl-mt6893.c @@ -0,0 +1,879 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 MediaTek Inc. + * Copyright (C) 2024 Collabora Ltd. + * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> + */ + +#include <linux/module.h> +#include "pinctrl-mtk-mt6893.h" +#include "pinctrl-paris.h" + +#define PIN_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \ + PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \ + 32, 0) + +#define PINS_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \ + PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \ + 32, 1) + +static const struct mtk_pin_field_calc mt6893_pin_mode_range[] = { + PIN_FIELD(0, 219, 0x0300, 0x10, 0, 4), +}; + +static const struct mtk_pin_field_calc mt6893_pin_dir_range[] = { + PIN_FIELD(0, 219, 0x0000, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt6893_pin_di_range[] = { + PIN_FIELD(0, 219, 0x0200, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt6893_pin_do_range[] = { + PIN_FIELD(0, 219, 0x0100, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt6893_pin_smt_range[] = { + PINS_FIELD_BASE(0, 9, 2, 0x00f0, 0x10, 7, 1), + PINS_FIELD_BASE(10, 15, 1, 0x0090, 0x10, 4, 1), + PIN_FIELD_BASE(16, 17, 5, 0x00c0, 0x10, 8, 1), + PINS_FIELD_BASE(18, 25, 7, 0x00f0, 0x10, 1, 1), + PINS_FIELD_BASE(26, 30, 6, 0x00e0, 0x10, 6, 1), + PINS_FIELD_BASE(31, 35, 6, 0x00e0, 0x10, 2, 1), + PIN_FIELD_BASE(36, 36, 6, 0x00e0, 0x10, 16, 1), + PINS_FIELD_BASE(37, 39, 6, 0x00e0, 0x10, 15, 1), + PIN_FIELD_BASE(40, 41, 6, 0x00e0, 0x10, 0, 1), + PIN_FIELD_BASE(42, 42, 6, 0x00e0, 0x10, 5, 1), + PIN_FIELD_BASE(43, 44, 6, 0x00e0, 0x10, 3, 1), + PIN_FIELD_BASE(45, 45, 6, 0x00e0, 0x10, 12, 1), + PIN_FIELD_BASE(46, 46, 6, 0x00e0, 0x10, 14, 1), + PIN_FIELD_BASE(47, 47, 6, 0x00e0, 0x10, 13, 1), + PIN_FIELD_BASE(48, 49, 6, 0x00e0, 0x10, 10, 1), + PIN_FIELD_BASE(50, 50, 6, 0x00e0, 0x10, 9, 1), + PIN_FIELD_BASE(51, 52, 3, 0x0090, 0x10, 6, 1), + PINS_FIELD_BASE(53, 56, 3, 0x0090, 0x10, 8, 1), + PIN_FIELD_BASE(57, 60, 3, 0x0090, 0x10, 2, 1), + PIN_FIELD_BASE(61, 61, 3, 0x0090, 0x10, 1, 1), + PIN_FIELD_BASE(62, 62, 4, 0x0050, 0x10, 1, 1), + PINS_FIELD_BASE(63, 73, 3, 0x0090, 0x10, 0, 1), + PINS_FIELD_BASE(74, 84, 4, 0x0050, 0x10, 0, 1), + PIN_FIELD_BASE(85, 86, 4, 0x0050, 0x10, 4, 1), + PIN_FIELD_BASE(87, 88, 4, 0x0050, 0x10, 2, 1), + PIN_FIELD_BASE(89, 90, 2, 0x00f0, 0x10, 26, 1), + PIN_FIELD_BASE(91, 91, 2, 0x00f0, 0x10, 0, 1), + PINS_FIELD_BASE(92, 95, 2, 0x0100, 0x10, 0, 1), + PIN_FIELD_BASE(96, 96, 2, 0x00f0, 0x10, 30, 1), + PIN_FIELD_BASE(97, 97, 2, 0x00f0, 0x10, 28, 1), + PIN_FIELD_BASE(98, 98, 2, 0x00f0, 0x10, 31, 1), + PINS_FIELD_BASE(99, 102, 2, 0x00f0, 0x10, 29, 1), + PINS_FIELD_BASE(103, 105, 2, 0x00f0, 0x10, 24, 1), + PIN_FIELD_BASE(106, 106, 2, 0x00f0, 0x10, 25, 1), + PIN_FIELD_BASE(107, 108, 2, 0x00f0, 0x10, 5, 1), + PINS_FIELD_BASE(109, 113, 2, 0x00f0, 0x10, 8, 1), + PINS_FIELD_BASE(114, 116, 2, 0x00f0, 0x10, 16, 1), + PIN_FIELD_BASE(117, 117, 2, 0x00f0, 0x10, 17, 1), + PIN_FIELD_BASE(118, 118, 2, 0x00f0, 0x10, 10, 1), + PIN_FIELD_BASE(119, 119, 2, 0x00f0, 0x10, 18, 1), + PIN_FIELD_BASE(120, 120, 2, 0x00f0, 0x10, 15, 1), + PIN_FIELD_BASE(121, 121, 2, 0x00f0, 0x10, 23, 1), + PIN_FIELD_BASE(122, 122, 2, 0x00f0, 0x10, 14, 1), + PIN_FIELD_BASE(123, 123, 2, 0x00f0, 0x10, 22, 1), + PIN_FIELD_BASE(124, 124, 2, 0x00f0, 0x10, 13, 1), + PIN_FIELD_BASE(125, 125, 2, 0x00f0, 0x10, 21, 1), + PINS_FIELD_BASE(126, 129, 2, 0x00f0, 0x10, 9, 1), + PINS_FIELD_BASE(130, 135, 2, 0x00f0, 0x10, 4, 1), + PIN_FIELD_BASE(136, 138, 2, 0x00f0, 0x10, 1, 1), + PIN_FIELD_BASE(139, 139, 2, 0x00f0, 0x10, 12, 1), + PIN_FIELD_BASE(140, 140, 2, 0x00f0, 0x10, 20, 1), + PIN_FIELD_BASE(141, 141, 2, 0x00f0, 0x10, 11, 1), + PIN_FIELD_BASE(142, 142, 2, 0x00f0, 0x10, 19, 1), + PINS_FIELD_BASE(143, 148, 1, 0x0090, 0x10, 3, 1), + PIN_FIELD_BASE(149, 151, 1, 0x0090, 0x10, 0, 1), + PINS_FIELD_BASE(152, 155, 5, 0x00c0, 0x10, 10, 1), + PIN_FIELD_BASE(156, 156, 5, 0x00c0, 0x10, 14, 1), + PINS_FIELD_BASE(157, 159, 5, 0x00c0, 0x10, 13, 1), + PIN_FIELD_BASE(160, 161, 5, 0x00c0, 0x10, 11, 1), + PINS_FIELD_BASE(162, 171, 5, 0x00c0, 0x10, 0, 1), + PIN_FIELD_BASE(172, 173, 5, 0x00c0, 0x10, 4, 1), + PIN_FIELD_BASE(174, 174, 5, 0x00c0, 0x10, 3, 1), + PIN_FIELD_BASE(175, 175, 5, 0x00c0, 0x10, 6, 1), + PIN_FIELD_BASE(176, 177, 5, 0x00c0, 0x10, 1, 1), + PINS_FIELD_BASE(178, 182, 5, 0x00c0, 0x10, 7, 1), + PIN_FIELD_BASE(183, 183, 7, 0x00f0, 0x10, 3, 1), + PINS_FIELD_BASE(184, 190, 7, 0x00f0, 0x10, 4, 1), + PIN_FIELD_BASE(191, 191, 7, 0x00f0, 0x10, 5, 1), + PIN_FIELD_BASE(192, 192, 7, 0x00f0, 0x10, 2, 1), + PIN_FIELD_BASE(193, 193, 7, 0x00f0, 0x10, 4, 1), + PIN_FIELD_BASE(194, 194, 7, 0x00f0, 0x10, 6, 1), + PIN_FIELD_BASE(195, 195, 7, 0x00f0, 0x10, 12, 1), + PINS_FIELD_BASE(196, 199, 7, 0x00f0, 0x10, 0, 1), + PIN_FIELD_BASE(200, 200, 7, 0x00f0, 0x10, 11, 1), + PIN_FIELD_BASE(201, 201, 7, 0x00f0, 0x10, 14, 1), + PIN_FIELD_BASE(202, 202, 7, 0x00f0, 0x10, 10, 1), + PIN_FIELD_BASE(203, 203, 7, 0x00f0, 0x10, 13, 1), + PIN_FIELD_BASE(204, 205, 6, 0x00e0, 0x10, 7, 1), + PIN_FIELD_BASE(206, 208, 7, 0x00f0, 0x10, 15, 1), + PINS_FIELD_BASE(209, 211, 7, 0x00f0, 0x10, 7, 1), + PIN_FIELD_BASE(212, 213, 7, 0x00f0, 0x10, 8, 1), + PINS_FIELD_BASE(214, 219, 7, 0x00f0, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt6893_pin_ies_range[] = { + PIN_FIELD_BASE(0, 9, 2, 0x0060, 0x10, 12, 1), + PIN_FIELD_BASE(10, 15, 1, 0x0020, 0x10, 9, 1), + PIN_FIELD_BASE(16, 17, 5, 0x0030, 0x10, 21, 1), + PIN_FIELD_BASE(18, 25, 7, 0x0050, 0x10, 10, 1), + PIN_FIELD_BASE(26, 30, 6, 0x0040, 0x10, 10, 1), + PIN_FIELD_BASE(31, 31, 6, 0x0040, 0x10, 6, 1), + PIN_FIELD_BASE(32, 32, 6, 0x0040, 0x10, 3, 1), + PIN_FIELD_BASE(33, 33, 6, 0x0040, 0x10, 5, 1), + PIN_FIELD_BASE(34, 34, 6, 0x0040, 0x10, 2, 1), + PIN_FIELD_BASE(35, 35, 6, 0x0040, 0x10, 4, 1), + PIN_FIELD_BASE(36, 39, 6, 0x0040, 0x10, 23, 1), + PIN_FIELD_BASE(40, 41, 6, 0x0040, 0x10, 0, 1), + PIN_FIELD_BASE(42, 42, 6, 0x0040, 0x10, 9, 1), + PIN_FIELD_BASE(43, 44, 6, 0x0040, 0x10, 7, 1), + PIN_FIELD_BASE(45, 45, 6, 0x0040, 0x10, 20, 1), + PIN_FIELD_BASE(46, 46, 6, 0x0040, 0x10, 22, 1), + PIN_FIELD_BASE(47, 47, 6, 0x0040, 0x10, 21, 1), + PIN_FIELD_BASE(48, 49, 6, 0x0040, 0x10, 18, 1), + PIN_FIELD_BASE(50, 50, 6, 0x0040, 0x10, 17, 1), + PIN_FIELD_BASE(51, 52, 3, 0x0020, 0x10, 16, 1), + PIN_FIELD_BASE(53, 53, 3, 0x0020, 0x10, 21, 1), + PIN_FIELD_BASE(54, 54, 3, 0x0020, 0x10, 18, 1), + PIN_FIELD_BASE(55, 55, 3, 0x0020, 0x10, 20, 1), + PIN_FIELD_BASE(56, 56, 3, 0x0020, 0x10, 19, 1), + PIN_FIELD_BASE(57, 60, 3, 0x0020, 0x10, 12, 1), + PIN_FIELD_BASE(61, 61, 3, 0x0020, 0x10, 11, 1), + PIN_FIELD_BASE(62, 62, 4, 0x0010, 0x10, 11, 1), + PIN_FIELD_BASE(63, 64, 3, 0x0020, 0x10, 0, 1), + PIN_FIELD_BASE(65, 72, 3, 0x0020, 0x10, 3, 1), + PIN_FIELD_BASE(73, 73, 3, 0x0020, 0x10, 2, 1), + PIN_FIELD_BASE(74, 84, 4, 0x0010, 0x10, 0, 1), + PIN_FIELD_BASE(85, 86, 4, 0x0010, 0x10, 14, 1), + PIN_FIELD_BASE(87, 88, 4, 0x0010, 0x10, 12, 1), + PIN_FIELD_BASE(89, 90, 2, 0x0070, 0x10, 19, 1), + PIN_FIELD_BASE(91, 91, 2, 0x0060, 0x10, 0, 1), + PIN_FIELD_BASE(92, 92, 2, 0x0070, 0x10, 28, 1), + PIN_FIELD_BASE(93, 93, 2, 0x0070, 0x10, 30, 1), + PIN_FIELD_BASE(94, 94, 2, 0x0070, 0x10, 29, 1), + PIN_FIELD_BASE(95, 95, 2, 0x0070, 0x10, 31, 1), + PIN_FIELD_BASE(96, 96, 2, 0x0070, 0x10, 26, 1), + PIN_FIELD_BASE(97, 97, 2, 0x0070, 0x10, 21, 1), + PIN_FIELD_BASE(98, 98, 2, 0x0070, 0x10, 27, 1), + PIN_FIELD_BASE(99, 102, 2, 0x0070, 0x10, 22, 1), + PIN_FIELD_BASE(103, 103, 2, 0x0070, 0x10, 17, 1), + PIN_FIELD_BASE(104, 104, 2, 0x0070, 0x10, 16, 1), + PIN_FIELD_BASE(105, 105, 2, 0x0070, 0x10, 18, 1), + PIN_FIELD_BASE(106, 106, 2, 0x0070, 0x10, 15, 1), + PIN_FIELD_BASE(107, 108, 2, 0x0060, 0x10, 10, 1), + PIN_FIELD_BASE(109, 109, 2, 0x0060, 0x10, 25, 1), + PIN_FIELD_BASE(110, 110, 2, 0x0060, 0x10, 22, 1), + PIN_FIELD_BASE(111, 111, 2, 0x0060, 0x10, 24, 1), + PIN_FIELD_BASE(112, 112, 2, 0x0060, 0x10, 26, 1), + PIN_FIELD_BASE(113, 113, 2, 0x0060, 0x10, 23, 1), + PIN_FIELD_BASE(114, 114, 2, 0x0070, 0x10, 7, 1), + PIN_FIELD_BASE(115, 115, 2, 0x0070, 0x10, 6, 1), + PIN_FIELD_BASE(116, 116, 2, 0x0070, 0x10, 8, 1), + PIN_FIELD_BASE(117, 117, 2, 0x0070, 0x10, 5, 1), + PIN_FIELD_BASE(118, 118, 2, 0x0060, 0x10, 31, 1), + PIN_FIELD_BASE(119, 119, 2, 0x0070, 0x10, 9, 1), + PIN_FIELD_BASE(120, 120, 2, 0x0070, 0x10, 4, 1), + PIN_FIELD_BASE(121, 121, 2, 0x0070, 0x10, 14, 1), + PIN_FIELD_BASE(122, 122, 2, 0x0070, 0x10, 3, 1), + PIN_FIELD_BASE(123, 123, 2, 0x0070, 0x10, 13, 1), + PIN_FIELD_BASE(124, 124, 2, 0x0070, 0x10, 2, 1), + PIN_FIELD_BASE(125, 125, 2, 0x0070, 0x10, 12, 1), + PIN_FIELD_BASE(126, 129, 2, 0x0060, 0x10, 27, 1), + PIN_FIELD_BASE(130, 132, 2, 0x0060, 0x10, 7, 1), + PIN_FIELD_BASE(133, 135, 2, 0x0060, 0x10, 4, 1), + PIN_FIELD_BASE(136, 138, 2, 0x0060, 0x10, 1, 1), + PIN_FIELD_BASE(139, 139, 2, 0x0070, 0x10, 1, 1), + PIN_FIELD_BASE(140, 140, 2, 0x0070, 0x10, 11, 1), + PIN_FIELD_BASE(141, 141, 2, 0x0070, 0x10, 0, 1), + PIN_FIELD_BASE(142, 142, 2, 0x0070, 0x10, 10, 1), + PIN_FIELD_BASE(143, 145, 1, 0x0020, 0x10, 6, 1), + PIN_FIELD_BASE(146, 148, 1, 0x0020, 0x10, 3, 1), + PIN_FIELD_BASE(149, 151, 1, 0x0020, 0x10, 0, 1), + PIN_FIELD_BASE(152, 152, 5, 0x0030, 0x10, 26, 1), + PIN_FIELD_BASE(153, 153, 5, 0x0030, 0x10, 25, 1), + PIN_FIELD_BASE(154, 155, 5, 0x0030, 0x10, 23, 1), + PIN_FIELD_BASE(156, 158, 5, 0x0030, 0x10, 29, 1), + PIN_FIELD_BASE(159, 159, 5, 0x0040, 0x10, 0, 1), + PIN_FIELD_BASE(160, 161, 5, 0x0030, 0x10, 27, 1), + PIN_FIELD_BASE(162, 171, 5, 0x0030, 0x10, 0, 1), + PIN_FIELD_BASE(172, 173, 5, 0x0030, 0x10, 13, 1), + PIN_FIELD_BASE(174, 174, 5, 0x0030, 0x10, 12, 1), + PIN_FIELD_BASE(175, 175, 5, 0x0030, 0x10, 15, 1), + PIN_FIELD_BASE(176, 177, 5, 0x0030, 0x10, 10, 1), + PIN_FIELD_BASE(178, 182, 5, 0x0030, 0x10, 16, 1), + PIN_FIELD_BASE(183, 184, 7, 0x0050, 0x10, 19, 1), + PIN_FIELD_BASE(185, 185, 7, 0x0050, 0x10, 22, 1), + PIN_FIELD_BASE(186, 186, 7, 0x0050, 0x10, 24, 1), + PIN_FIELD_BASE(187, 187, 7, 0x0050, 0x10, 26, 1), + PIN_FIELD_BASE(188, 188, 7, 0x0050, 0x10, 21, 1), + PIN_FIELD_BASE(189, 189, 7, 0x0050, 0x10, 25, 1), + PIN_FIELD_BASE(190, 191, 7, 0x0050, 0x10, 27, 1), + PIN_FIELD_BASE(192, 192, 7, 0x0050, 0x10, 18, 1), + PIN_FIELD_BASE(193, 193, 7, 0x0050, 0x10, 23, 1), + PIN_FIELD_BASE(194, 194, 7, 0x0050, 0x10, 29, 1), + PIN_FIELD_BASE(195, 195, 7, 0x0060, 0x10, 5, 1), + PIN_FIELD_BASE(196, 196, 7, 0x0050, 0x10, 6, 1), + PIN_FIELD_BASE(197, 197, 7, 0x0050, 0x10, 8, 1), + PIN_FIELD_BASE(198, 198, 7, 0x0050, 0x10, 7, 1), + PIN_FIELD_BASE(199, 199, 7, 0x0050, 0x10, 3, 1), + PIN_FIELD_BASE(200, 200, 7, 0x0060, 0x10, 4, 1), + PIN_FIELD_BASE(201, 201, 7, 0x0060, 0x10, 7, 1), + PIN_FIELD_BASE(202, 202, 7, 0x0060, 0x10, 3, 1), + PIN_FIELD_BASE(203, 203, 7, 0x0060, 0x10, 6, 1), + PIN_FIELD_BASE(204, 205, 6, 0x0040, 0x10, 15, 1), + PIN_FIELD_BASE(206, 208, 7, 0x0060, 0x10, 8, 1), + PIN_FIELD_BASE(209, 209, 7, 0x0060, 0x10, 0, 1), + PIN_FIELD_BASE(210, 210, 7, 0x0050, 0x10, 31, 1), + PIN_FIELD_BASE(211, 211, 7, 0x0060, 0x10, 1, 1), + PIN_FIELD_BASE(212, 212, 7, 0x0050, 0x10, 30, 1), + PIN_FIELD_BASE(213, 213, 7, 0x0060, 0x10, 2, 1), + PIN_FIELD_BASE(214, 214, 7, 0x0050, 0x10, 0, 1), + PIN_FIELD_BASE(215, 215, 7, 0x0050, 0x10, 9, 1), + PIN_FIELD_BASE(216, 217, 7, 0x0050, 0x10, 4, 1), + PIN_FIELD_BASE(218, 219, 7, 0x0050, 0x10, 1, 1), +}; + +static const struct mtk_pin_field_calc mt6893_pin_pu_range[] = { + PIN_FIELD_BASE(0, 9, 2, 0x00a0, 0x10, 12, 1), + PIN_FIELD_BASE(16, 17, 5, 0x0070, 0x10, 21, 1), + PIN_FIELD_BASE(18, 25, 7, 0x0090, 0x10, 10, 1), + PIN_FIELD_BASE(26, 30, 6, 0x0080, 0x10, 10, 1), + PIN_FIELD_BASE(31, 31, 6, 0x0080, 0x10, 6, 1), + PIN_FIELD_BASE(32, 32, 6, 0x0080, 0x10, 3, 1), + PIN_FIELD_BASE(33, 33, 6, 0x0080, 0x10, 5, 1), + PIN_FIELD_BASE(34, 34, 6, 0x0080, 0x10, 2, 1), + PIN_FIELD_BASE(35, 35, 6, 0x0080, 0x10, 4, 1), + PIN_FIELD_BASE(36, 39, 6, 0x0080, 0x10, 17, 1), + PIN_FIELD_BASE(40, 41, 6, 0x0080, 0x10, 0, 1), + PIN_FIELD_BASE(42, 42, 6, 0x0080, 0x10, 9, 1), + PIN_FIELD_BASE(43, 44, 6, 0x0080, 0x10, 7, 1), + PIN_FIELD_BASE(57, 60, 3, 0x0050, 0x10, 12, 1), + PIN_FIELD_BASE(61, 61, 3, 0x0050, 0x10, 11, 1), + PIN_FIELD_BASE(62, 62, 4, 0x0030, 0x10, 11, 1), + PIN_FIELD_BASE(63, 64, 3, 0x0050, 0x10, 0, 1), + PIN_FIELD_BASE(65, 72, 3, 0x0050, 0x10, 3, 1), + PIN_FIELD_BASE(73, 73, 3, 0x0050, 0x10, 2, 1), + PIN_FIELD_BASE(74, 84, 4, 0x0030, 0x10, 0, 1), + PIN_FIELD_BASE(85, 86, 4, 0x0030, 0x10, 14, 1), + PIN_FIELD_BASE(87, 88, 4, 0x0030, 0x10, 12, 1), + PIN_FIELD_BASE(89, 90, 2, 0x00b0, 0x10, 19, 1), + PIN_FIELD_BASE(91, 91, 2, 0x00a0, 0x10, 0, 1), + PIN_FIELD_BASE(92, 92, 2, 0x00b0, 0x10, 28, 1), + PIN_FIELD_BASE(93, 93, 2, 0x00b0, 0x10, 30, 1), + PIN_FIELD_BASE(94, 94, 2, 0x00b0, 0x10, 29, 1), + PIN_FIELD_BASE(95, 95, 2, 0x00b0, 0x10, 31, 1), + PIN_FIELD_BASE(96, 96, 2, 0x00b0, 0x10, 26, 1), + PIN_FIELD_BASE(97, 97, 2, 0x00b0, 0x10, 21, 1), + PIN_FIELD_BASE(98, 98, 2, 0x00b0, 0x10, 27, 1), + PIN_FIELD_BASE(99, 102, 2, 0x00b0, 0x10, 22, 1), + PIN_FIELD_BASE(103, 103, 2, 0x00b0, 0x10, 17, 1), + PIN_FIELD_BASE(104, 104, 2, 0x00b0, 0x10, 16, 1), + PIN_FIELD_BASE(105, 105, 2, 0x00b0, 0x10, 18, 1), + PIN_FIELD_BASE(106, 106, 2, 0x00b0, 0x10, 15, 1), + PIN_FIELD_BASE(107, 108, 2, 0x00a0, 0x10, 10, 1), + PIN_FIELD_BASE(109, 109, 2, 0x00a0, 0x10, 25, 1), + PIN_FIELD_BASE(110, 110, 2, 0x00a0, 0x10, 22, 1), + PIN_FIELD_BASE(111, 111, 2, 0x00a0, 0x10, 24, 1), + PIN_FIELD_BASE(112, 112, 2, 0x00a0, 0x10, 26, 1), + PIN_FIELD_BASE(113, 113, 2, 0x00a0, 0x10, 23, 1), + PIN_FIELD_BASE(114, 114, 2, 0x00b0, 0x10, 7, 1), + PIN_FIELD_BASE(115, 115, 2, 0x00b0, 0x10, 6, 1), + PIN_FIELD_BASE(116, 116, 2, 0x00b0, 0x10, 8, 1), + PIN_FIELD_BASE(117, 117, 2, 0x00b0, 0x10, 5, 1), + PIN_FIELD_BASE(118, 118, 2, 0x00a0, 0x10, 31, 1), + PIN_FIELD_BASE(119, 119, 2, 0x00b0, 0x10, 9, 1), + PIN_FIELD_BASE(120, 120, 2, 0x00b0, 0x10, 4, 1), + PIN_FIELD_BASE(121, 121, 2, 0x00b0, 0x10, 14, 1), + PIN_FIELD_BASE(122, 122, 2, 0x00b0, 0x10, 3, 1), + PIN_FIELD_BASE(123, 123, 2, 0x00b0, 0x10, 13, 1), + PIN_FIELD_BASE(124, 124, 2, 0x00b0, 0x10, 2, 1), + PIN_FIELD_BASE(125, 125, 2, 0x00b0, 0x10, 12, 1), + PIN_FIELD_BASE(126, 129, 2, 0x00a0, 0x10, 27, 1), + PIN_FIELD_BASE(130, 132, 2, 0x00a0, 0x10, 7, 1), + PIN_FIELD_BASE(133, 135, 2, 0x00a0, 0x10, 4, 1), + PIN_FIELD_BASE(136, 138, 2, 0x00a0, 0x10, 1, 1), + PIN_FIELD_BASE(139, 139, 2, 0x00b0, 0x10, 1, 1), + PIN_FIELD_BASE(140, 140, 2, 0x00b0, 0x10, 11, 1), + PIN_FIELD_BASE(141, 141, 2, 0x00b0, 0x10, 0, 1), + PIN_FIELD_BASE(142, 142, 2, 0x00b0, 0x10, 10, 1), + PIN_FIELD_BASE(143, 145, 1, 0x0050, 0x10, 6, 1), + PIN_FIELD_BASE(146, 148, 1, 0x0050, 0x10, 3, 1), + PIN_FIELD_BASE(149, 151, 1, 0x0050, 0x10, 0, 1), + PIN_FIELD_BASE(156, 159, 5, 0x0070, 0x10, 25, 1), + PIN_FIELD_BASE(160, 161, 5, 0x0070, 0x10, 23, 1), + PIN_FIELD_BASE(162, 171, 5, 0x0070, 0x10, 0, 1), + PIN_FIELD_BASE(172, 173, 5, 0x0070, 0x10, 13, 1), + PIN_FIELD_BASE(174, 174, 5, 0x0070, 0x10, 12, 1), + PIN_FIELD_BASE(175, 175, 5, 0x0070, 0x10, 15, 1), + PIN_FIELD_BASE(176, 177, 5, 0x0070, 0x10, 10, 1), + PIN_FIELD_BASE(178, 182, 5, 0x0070, 0x10, 16, 1), + PIN_FIELD_BASE(195, 195, 7, 0x0090, 0x10, 25, 1), + PIN_FIELD_BASE(196, 196, 7, 0x0090, 0x10, 6, 1), + PIN_FIELD_BASE(197, 197, 7, 0x0090, 0x10, 8, 1), + PIN_FIELD_BASE(198, 198, 7, 0x0090, 0x10, 7, 1), + PIN_FIELD_BASE(199, 199, 7, 0x0090, 0x10, 3, 1), + PIN_FIELD_BASE(200, 200, 7, 0x0090, 0x10, 24, 1), + PIN_FIELD_BASE(201, 201, 7, 0x0090, 0x10, 27, 1), + PIN_FIELD_BASE(202, 202, 7, 0x0090, 0x10, 23, 1), + PIN_FIELD_BASE(203, 203, 7, 0x0090, 0x10, 26, 1), + PIN_FIELD_BASE(204, 205, 6, 0x0080, 0x10, 15, 1), + PIN_FIELD_BASE(206, 208, 7, 0x0090, 0x10, 28, 1), + PIN_FIELD_BASE(209, 209, 7, 0x0090, 0x10, 20, 1), + PIN_FIELD_BASE(210, 210, 7, 0x0090, 0x10, 19, 1), + PIN_FIELD_BASE(211, 211, 7, 0x0090, 0x10, 21, 1), + PIN_FIELD_BASE(212, 212, 7, 0x0090, 0x10, 18, 1), + PIN_FIELD_BASE(213, 213, 7, 0x0090, 0x10, 22, 1), + PIN_FIELD_BASE(214, 214, 7, 0x0090, 0x10, 0, 1), + PIN_FIELD_BASE(215, 215, 7, 0x0090, 0x10, 9, 1), + PIN_FIELD_BASE(216, 217, 7, 0x0090, 0x10, 4, 1), + PIN_FIELD_BASE(218, 219, 7, 0x0090, 0x10, 1, 1), +}; + +static const struct mtk_pin_field_calc mt6893_pin_pd_range[] = { + PIN_FIELD_BASE(0, 9, 2, 0x0080, 0x10, 12, 1), + PIN_FIELD_BASE(16, 17, 5, 0x0050, 0x10, 21, 1), + PIN_FIELD_BASE(18, 25, 7, 0x0070, 0x10, 10, 1), + PIN_FIELD_BASE(26, 30, 6, 0x0060, 0x10, 10, 1), + PIN_FIELD_BASE(31, 31, 6, 0x0060, 0x10, 6, 1), + PIN_FIELD_BASE(32, 32, 6, 0x0060, 0x10, 3, 1), + PIN_FIELD_BASE(33, 33, 6, 0x0060, 0x10, 5, 1), + PIN_FIELD_BASE(34, 34, 6, 0x0060, 0x10, 2, 1), + PIN_FIELD_BASE(35, 35, 6, 0x0060, 0x10, 4, 1), + PIN_FIELD_BASE(36, 39, 6, 0x0060, 0x10, 17, 1), + PIN_FIELD_BASE(40, 41, 6, 0x0060, 0x10, 0, 1), + PIN_FIELD_BASE(42, 42, 6, 0x0060, 0x10, 9, 1), + PIN_FIELD_BASE(43, 44, 6, 0x0060, 0x10, 7, 1), + PIN_FIELD_BASE(57, 60, 3, 0x0030, 0x10, 12, 1), + PIN_FIELD_BASE(61, 61, 3, 0x0030, 0x10, 11, 1), + PIN_FIELD_BASE(62, 62, 4, 0x0020, 0x10, 11, 1), + PIN_FIELD_BASE(63, 64, 3, 0x0030, 0x10, 0, 1), + PIN_FIELD_BASE(65, 72, 3, 0x0030, 0x10, 3, 1), + PIN_FIELD_BASE(73, 73, 3, 0x0030, 0x10, 2, 1), + PIN_FIELD_BASE(74, 84, 4, 0x0020, 0x10, 0, 1), + PIN_FIELD_BASE(85, 86, 4, 0x0020, 0x10, 14, 1), + PIN_FIELD_BASE(87, 88, 4, 0x0020, 0x10, 12, 1), + PIN_FIELD_BASE(89, 90, 2, 0x0090, 0x10, 19, 1), + PIN_FIELD_BASE(91, 91, 2, 0x0080, 0x10, 0, 1), + PIN_FIELD_BASE(92, 92, 2, 0x0090, 0x10, 28, 1), + PIN_FIELD_BASE(93, 93, 2, 0x0090, 0x10, 30, 1), + PIN_FIELD_BASE(94, 94, 2, 0x0090, 0x10, 29, 1), + PIN_FIELD_BASE(95, 95, 2, 0x0090, 0x10, 31, 1), + PIN_FIELD_BASE(96, 96, 2, 0x0090, 0x10, 26, 1), + PIN_FIELD_BASE(97, 97, 2, 0x0090, 0x10, 21, 1), + PIN_FIELD_BASE(98, 98, 2, 0x0090, 0x10, 27, 1), + PIN_FIELD_BASE(99, 102, 2, 0x0090, 0x10, 22, 1), + PIN_FIELD_BASE(103, 103, 2, 0x0090, 0x10, 17, 1), + PIN_FIELD_BASE(104, 104, 2, 0x0090, 0x10, 16, 1), + PIN_FIELD_BASE(105, 105, 2, 0x0090, 0x10, 18, 1), + PIN_FIELD_BASE(106, 106, 2, 0x0090, 0x10, 15, 1), + PIN_FIELD_BASE(107, 108, 2, 0x0080, 0x10, 10, 1), + PIN_FIELD_BASE(109, 109, 2, 0x0080, 0x10, 25, 1), + PIN_FIELD_BASE(110, 110, 2, 0x0080, 0x10, 22, 1), + PIN_FIELD_BASE(111, 111, 2, 0x0080, 0x10, 24, 1), + PIN_FIELD_BASE(112, 112, 2, 0x0080, 0x10, 26, 1), + PIN_FIELD_BASE(113, 113, 2, 0x0080, 0x10, 23, 1), + PIN_FIELD_BASE(114, 114, 2, 0x0090, 0x10, 7, 1), + PIN_FIELD_BASE(115, 115, 2, 0x0090, 0x10, 6, 1), + PIN_FIELD_BASE(116, 116, 2, 0x0090, 0x10, 8, 1), + PIN_FIELD_BASE(117, 117, 2, 0x0090, 0x10, 5, 1), + PIN_FIELD_BASE(118, 118, 2, 0x0080, 0x10, 31, 1), + PIN_FIELD_BASE(119, 119, 2, 0x0090, 0x10, 9, 1), + PIN_FIELD_BASE(120, 120, 2, 0x0090, 0x10, 4, 1), + PIN_FIELD_BASE(121, 121, 2, 0x0090, 0x10, 14, 1), + PIN_FIELD_BASE(122, 122, 2, 0x0090, 0x10, 3, 1), + PIN_FIELD_BASE(123, 123, 2, 0x0090, 0x10, 13, 1), + PIN_FIELD_BASE(124, 124, 2, 0x0090, 0x10, 2, 1), + PIN_FIELD_BASE(125, 125, 2, 0x0090, 0x10, 12, 1), + PIN_FIELD_BASE(126, 129, 2, 0x0080, 0x10, 27, 1), + PIN_FIELD_BASE(130, 132, 2, 0x0080, 0x10, 7, 1), + PIN_FIELD_BASE(133, 135, 2, 0x0080, 0x10, 4, 1), + PIN_FIELD_BASE(136, 138, 2, 0x0080, 0x10, 1, 1), + PIN_FIELD_BASE(139, 139, 2, 0x0090, 0x10, 1, 1), + PIN_FIELD_BASE(140, 140, 2, 0x0090, 0x10, 11, 1), + PIN_FIELD_BASE(141, 141, 2, 0x0090, 0x10, 0, 1), + PIN_FIELD_BASE(142, 142, 2, 0x0090, 0x10, 10, 1), + PIN_FIELD_BASE(143, 145, 1, 0x0030, 0x10, 6, 1), + PIN_FIELD_BASE(146, 148, 1, 0x0030, 0x10, 3, 1), + PIN_FIELD_BASE(149, 151, 1, 0x0030, 0x10, 0, 1), + PIN_FIELD_BASE(156, 159, 5, 0x0050, 0x10, 25, 1), + PIN_FIELD_BASE(160, 161, 5, 0x0050, 0x10, 23, 1), + PIN_FIELD_BASE(162, 171, 5, 0x0050, 0x10, 0, 1), + PIN_FIELD_BASE(172, 173, 5, 0x0050, 0x10, 13, 1), + PIN_FIELD_BASE(174, 174, 5, 0x0050, 0x10, 12, 1), + PIN_FIELD_BASE(175, 175, 5, 0x0050, 0x10, 15, 1), + PIN_FIELD_BASE(176, 177, 5, 0x0050, 0x10, 10, 1), + PIN_FIELD_BASE(178, 182, 5, 0x0050, 0x10, 16, 1), + PIN_FIELD_BASE(195, 195, 7, 0x0070, 0x10, 25, 1), + PIN_FIELD_BASE(196, 196, 7, 0x0070, 0x10, 6, 1), + PIN_FIELD_BASE(197, 197, 7, 0x0070, 0x10, 8, 1), + PIN_FIELD_BASE(198, 198, 7, 0x0070, 0x10, 7, 1), + PIN_FIELD_BASE(199, 199, 7, 0x0070, 0x10, 3, 1), + PIN_FIELD_BASE(200, 200, 7, 0x0070, 0x10, 24, 1), + PIN_FIELD_BASE(201, 201, 7, 0x0070, 0x10, 27, 1), + PIN_FIELD_BASE(202, 202, 7, 0x0070, 0x10, 23, 1), + PIN_FIELD_BASE(203, 203, 7, 0x0070, 0x10, 26, 1), + PIN_FIELD_BASE(204, 205, 6, 0x0060, 0x10, 15, 1), + PIN_FIELD_BASE(206, 208, 7, 0x0070, 0x10, 28, 1), + PIN_FIELD_BASE(209, 209, 7, 0x0070, 0x10, 20, 1), + PIN_FIELD_BASE(210, 210, 7, 0x0070, 0x10, 19, 1), + PIN_FIELD_BASE(211, 211, 7, 0x0070, 0x10, 21, 1), + PIN_FIELD_BASE(212, 212, 7, 0x0070, 0x10, 18, 1), + PIN_FIELD_BASE(213, 213, 7, 0x0070, 0x10, 22, 1), + PIN_FIELD_BASE(214, 214, 7, 0x0070, 0x10, 0, 1), + PIN_FIELD_BASE(215, 215, 7, 0x0070, 0x10, 9, 1), + PIN_FIELD_BASE(216, 217, 7, 0x0070, 0x10, 4, 1), + PIN_FIELD_BASE(218, 219, 7, 0x0070, 0x10, 1, 1), +}; + +static const struct mtk_pin_field_calc mt6893_pin_drv_range[] = { + PINS_FIELD_BASE(0, 9, 2, 0x0000, 0x10, 21, 3), + PINS_FIELD_BASE(10, 15, 1, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(16, 17, 5, 0x0000, 0x10, 18, 3), + PINS_FIELD_BASE(18, 25, 7, 0x0000, 0x10, 3, 3), + PINS_FIELD_BASE(26, 30, 6, 0x0000, 0x10, 15, 3), + PINS_FIELD_BASE(31, 35, 6, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(36, 36, 6, 0x0010, 0x10, 7, 3), + PINS_FIELD_BASE(37, 39, 6, 0x0010, 0x10, 4, 3), + PIN_FIELD_BASE(40, 41, 6, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(42, 42, 6, 0x0000, 0x10, 12, 3), + PINS_FIELD_BASE(43, 44, 6, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(45, 45, 6, 0x0000, 0x10, 30, 2), + PIN_FIELD_BASE(46, 46, 6, 0x0010, 0x10, 2, 2), + PIN_FIELD_BASE(47, 47, 6, 0x0010, 0x10, 0, 2), + PIN_FIELD_BASE(48, 49, 6, 0x0000, 0x10, 26, 2), + PIN_FIELD_BASE(50, 50, 6, 0x0000, 0x10, 24, 2), + PIN_FIELD_BASE(51, 52, 3, 0x0000, 0x10, 18, 3), + PINS_FIELD_BASE(53, 56, 3, 0x0000, 0x10, 24, 3), + PIN_FIELD_BASE(57, 60, 3, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(61, 61, 3, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(62, 62, 4, 0x0000, 0x10, 3, 3), + PINS_FIELD_BASE(63, 73, 3, 0x0000, 0x10, 0, 3), + PINS_FIELD_BASE(74, 84, 4, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(85, 86, 4, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(87, 88, 4, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(89, 90, 2, 0x0020, 0x10, 15, 3), + PIN_FIELD_BASE(91, 91, 2, 0x0000, 0x10, 0, 3), + PINS_FIELD_BASE(92, 95, 2, 0x0030, 0x10, 3, 3), + PIN_FIELD_BASE(96, 96, 2, 0x0020, 0x10, 27, 3), + PIN_FIELD_BASE(97, 97, 2, 0x0020, 0x10, 21, 3), + PIN_FIELD_BASE(98, 98, 2, 0x0030, 0x10, 0, 3), + PINS_FIELD_BASE(99, 102, 2, 0x0020, 0x10, 24, 3), + PINS_FIELD_BASE(103, 105, 2, 0x0020, 0x10, 9, 3), + PIN_FIELD_BASE(106, 106, 2, 0x0020, 0x10, 12, 3), + PIN_FIELD_BASE(107, 108, 2, 0x0000, 0x10, 15, 3), + PINS_FIELD_BASE(109, 113, 2, 0x0000, 0x10, 24, 3), + PINS_FIELD_BASE(114, 117, 2, 0x0010, 0x10, 18, 3), + PIN_FIELD_BASE(118, 118, 2, 0x0010, 0x10, 0, 3), + PIN_FIELD_BASE(119, 119, 2, 0x0010, 0x10, 21, 3), + PIN_FIELD_BASE(120, 120, 2, 0x0010, 0x10, 15, 3), + PIN_FIELD_BASE(121, 121, 2, 0x0020, 0x10, 6, 3), + PIN_FIELD_BASE(122, 122, 2, 0x0010, 0x10, 12, 3), + PIN_FIELD_BASE(123, 123, 2, 0x0020, 0x10, 3, 3), + PIN_FIELD_BASE(124, 124, 2, 0x0010, 0x10, 9, 3), + PIN_FIELD_BASE(125, 125, 2, 0x0020, 0x10, 0, 3), + PINS_FIELD_BASE(126, 129, 2, 0x0000, 0x10, 27, 3), + PINS_FIELD_BASE(130, 135, 2, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(136, 138, 2, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(139, 139, 2, 0x0010, 0x10, 6, 3), + PIN_FIELD_BASE(140, 140, 2, 0x0010, 0x10, 27, 3), + PIN_FIELD_BASE(141, 141, 2, 0x0010, 0x10, 3, 3), + PIN_FIELD_BASE(142, 142, 2, 0x0010, 0x10, 24, 3), + PINS_FIELD_BASE(143, 148, 1, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(149, 151, 1, 0x0000, 0x10, 0, 3), + PINS_FIELD_BASE(152, 155, 5, 0x0000, 0x10, 24, 3), + PIN_FIELD_BASE(156, 156, 5, 0x0010, 0x10, 6, 3), + PINS_FIELD_BASE(157, 159, 5, 0x0010, 0x10, 3, 3), + PIN_FIELD_BASE(160, 160, 5, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(161, 161, 5, 0x0010, 0x10, 0, 3), + PINS_FIELD_BASE(162, 171, 5, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(172, 172, 5, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(173, 173, 5, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(174, 174, 5, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(175, 177, 5, 0x0000, 0x10, 3, 3), + PINS_FIELD_BASE(178, 182, 5, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(183, 183, 7, 0x0000, 0x10, 9, 3), + PINS_FIELD_BASE(184, 190, 7, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(191, 191, 7, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(192, 192, 7, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(193, 193, 7, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(194, 194, 7, 0x0000, 0x10, 18, 3), + PIN_FIELD_BASE(195, 195, 7, 0x0010, 0x10, 3, 3), + PINS_FIELD_BASE(196, 199, 7, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(200, 200, 7, 0x0010, 0x10, 0, 3), + PIN_FIELD_BASE(201, 201, 7, 0x0010, 0x10, 9, 3), + PIN_FIELD_BASE(202, 202, 7, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(203, 203, 7, 0x0010, 0x10, 6, 3), + PIN_FIELD_BASE(204, 205, 6, 0x0000, 0x10, 18, 3), + PIN_FIELD_BASE(206, 208, 7, 0x0010, 0x10, 12, 3), + PINS_FIELD_BASE(209, 212, 7, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(213, 213, 7, 0x0000, 0x10, 24, 3), + PINS_FIELD_BASE(214, 219, 7, 0x0000, 0x10, 0, 3), +}; + +static const struct mtk_pin_field_calc mt6893_pin_pupd_range[] = { + PIN_FIELD_BASE(10, 15, 1, 0x0040, 0x10, 0, 1), + PIN_FIELD_BASE(45, 45, 6, 0x0070, 0x10, 3, 1), + PIN_FIELD_BASE(46, 46, 6, 0x0070, 0x10, 5, 1), + PIN_FIELD_BASE(47, 47, 6, 0x0070, 0x10, 4, 1), + PIN_FIELD_BASE(48, 49, 6, 0x0070, 0x10, 1, 1), + PIN_FIELD_BASE(50, 50, 6, 0x0070, 0x10, 0, 1), + PIN_FIELD_BASE(51, 52, 3, 0x0040, 0x10, 0, 1), + PIN_FIELD_BASE(53, 53, 3, 0x0040, 0x10, 5, 1), + PIN_FIELD_BASE(54, 54, 3, 0x0040, 0x10, 2, 1), + PIN_FIELD_BASE(55, 55, 3, 0x0040, 0x10, 4, 1), + PIN_FIELD_BASE(56, 56, 3, 0x0040, 0x10, 3, 1), + PIN_FIELD_BASE(152, 152, 5, 0x0060, 0x10, 3, 1), + PIN_FIELD_BASE(153, 153, 5, 0x0060, 0x10, 2, 1), + PIN_FIELD_BASE(154, 155, 5, 0x0060, 0x10, 0, 1), + PIN_FIELD_BASE(183, 184, 7, 0x0080, 0x10, 1, 1), + PIN_FIELD_BASE(185, 185, 7, 0x0080, 0x10, 4, 1), + PIN_FIELD_BASE(186, 186, 7, 0x0080, 0x10, 6, 1), + PIN_FIELD_BASE(187, 187, 7, 0x0080, 0x10, 8, 1), + PIN_FIELD_BASE(188, 188, 7, 0x0080, 0x10, 3, 1), + PIN_FIELD_BASE(189, 189, 7, 0x0080, 0x10, 7, 1), + PIN_FIELD_BASE(190, 191, 7, 0x0080, 0x10, 9, 1), + PIN_FIELD_BASE(192, 192, 7, 0x0080, 0x10, 0, 1), + PIN_FIELD_BASE(193, 193, 7, 0x0080, 0x10, 5, 1), + PIN_FIELD_BASE(194, 194, 7, 0x0080, 0x10, 11, 1), +}; + +static const struct mtk_pin_field_calc mt6893_pin_r0_range[] = { + PIN_FIELD_BASE(10, 15, 1, 0x0060, 0x10, 0, 1), + PIN_FIELD_BASE(24, 24, 7, 0x00e0, 0x10, 0, 1), + PIN_FIELD_BASE(25, 25, 7, 0x00e0, 0x10, 2, 1), + PIN_FIELD_BASE(45, 45, 6, 0x0090, 0x10, 3, 1), + PIN_FIELD_BASE(46, 46, 6, 0x0090, 0x10, 5, 1), + PIN_FIELD_BASE(47, 47, 6, 0x0090, 0x10, 4, 1), + PIN_FIELD_BASE(48, 49, 6, 0x0090, 0x10, 1, 1), + PIN_FIELD_BASE(50, 50, 6, 0x0090, 0x10, 0, 1), + PIN_FIELD_BASE(51, 52, 3, 0x0060, 0x10, 0, 1), + PIN_FIELD_BASE(53, 53, 3, 0x0060, 0x10, 5, 1), + PIN_FIELD_BASE(54, 54, 3, 0x0060, 0x10, 2, 1), + PIN_FIELD_BASE(55, 55, 3, 0x0060, 0x10, 4, 1), + PIN_FIELD_BASE(56, 56, 3, 0x0060, 0x10, 3, 1), + PIN_FIELD_BASE(118, 118, 2, 0x00e0, 0x10, 0, 1), + PIN_FIELD_BASE(119, 119, 2, 0x00e0, 0x10, 12, 1), + PIN_FIELD_BASE(120, 120, 2, 0x00e0, 0x10, 10, 1), + PIN_FIELD_BASE(121, 121, 2, 0x00e0, 0x10, 22, 1), + PIN_FIELD_BASE(122, 122, 2, 0x00e0, 0x10, 8, 1), + PIN_FIELD_BASE(123, 123, 2, 0x00e0, 0x10, 20, 1), + PIN_FIELD_BASE(124, 124, 2, 0x00e0, 0x10, 6, 1), + PIN_FIELD_BASE(125, 125, 2, 0x00e0, 0x10, 18, 1), + PIN_FIELD_BASE(139, 139, 2, 0x00e0, 0x10, 4, 1), + PIN_FIELD_BASE(140, 140, 2, 0x00e0, 0x10, 16, 1), + PIN_FIELD_BASE(141, 141, 2, 0x00e0, 0x10, 2, 1), + PIN_FIELD_BASE(142, 142, 2, 0x00e0, 0x10, 14, 1), + PIN_FIELD_BASE(152, 152, 5, 0x0080, 0x10, 3, 1), + PIN_FIELD_BASE(153, 153, 5, 0x0080, 0x10, 2, 1), + PIN_FIELD_BASE(154, 155, 5, 0x0080, 0x10, 0, 1), + PIN_FIELD_BASE(160, 160, 5, 0x00b0, 0x10, 0, 1), + PIN_FIELD_BASE(161, 161, 5, 0x00b0, 0x10, 2, 1), + PIN_FIELD_BASE(183, 184, 7, 0x00a0, 0x10, 1, 1), + PIN_FIELD_BASE(185, 185, 7, 0x00a0, 0x10, 4, 1), + PIN_FIELD_BASE(186, 186, 7, 0x00a0, 0x10, 6, 1), + PIN_FIELD_BASE(187, 187, 7, 0x00a0, 0x10, 8, 1), + PIN_FIELD_BASE(188, 188, 7, 0x00a0, 0x10, 3, 1), + PIN_FIELD_BASE(189, 189, 7, 0x00a0, 0x10, 7, 1), + PIN_FIELD_BASE(190, 191, 7, 0x00a0, 0x10, 9, 1), + PIN_FIELD_BASE(192, 192, 7, 0x00a0, 0x10, 0, 1), + PIN_FIELD_BASE(193, 193, 7, 0x00a0, 0x10, 5, 1), + PIN_FIELD_BASE(194, 194, 7, 0x00a0, 0x10, 11, 1), + PIN_FIELD_BASE(200, 200, 7, 0x00e0, 0x10, 6, 1), + PIN_FIELD_BASE(201, 201, 7, 0x00e0, 0x10, 10, 1), + PIN_FIELD_BASE(202, 202, 7, 0x00e0, 0x10, 4, 1), + PIN_FIELD_BASE(203, 203, 7, 0x00e0, 0x10, 8, 1), + PIN_FIELD_BASE(204, 204, 6, 0x00d0, 0x10, 0, 1), + PIN_FIELD_BASE(205, 205, 6, 0x00d0, 0x10, 2, 1), +}; + +static const struct mtk_pin_field_calc mt6893_pin_r1_range[] = { + PIN_FIELD_BASE(10, 15, 1, 0x0070, 0x10, 0, 1), + PIN_FIELD_BASE(24, 24, 7, 0x00e0, 0x10, 1, 1), + PIN_FIELD_BASE(25, 25, 7, 0x00e0, 0x10, 3, 1), + PIN_FIELD_BASE(45, 45, 6, 0x00a0, 0x10, 3, 1), + PIN_FIELD_BASE(46, 46, 6, 0x00a0, 0x10, 5, 1), + PIN_FIELD_BASE(47, 47, 6, 0x00a0, 0x10, 4, 1), + PIN_FIELD_BASE(48, 49, 6, 0x00a0, 0x10, 1, 1), + PIN_FIELD_BASE(50, 50, 6, 0x00a0, 0x10, 0, 1), + PIN_FIELD_BASE(51, 52, 3, 0x0070, 0x10, 0, 1), + PIN_FIELD_BASE(53, 53, 3, 0x0070, 0x10, 5, 1), + PIN_FIELD_BASE(54, 54, 3, 0x0070, 0x10, 2, 1), + PIN_FIELD_BASE(55, 55, 3, 0x0070, 0x10, 4, 1), + PIN_FIELD_BASE(56, 56, 3, 0x0070, 0x10, 3, 1), + PIN_FIELD_BASE(118, 118, 2, 0x00e0, 0x10, 1, 1), + PIN_FIELD_BASE(119, 119, 2, 0x00e0, 0x10, 13, 1), + PIN_FIELD_BASE(120, 120, 2, 0x00e0, 0x10, 11, 1), + PIN_FIELD_BASE(121, 121, 2, 0x00e0, 0x10, 23, 1), + PIN_FIELD_BASE(122, 122, 2, 0x00e0, 0x10, 9, 1), + PIN_FIELD_BASE(123, 123, 2, 0x00e0, 0x10, 21, 1), + PIN_FIELD_BASE(124, 124, 2, 0x00e0, 0x10, 7, 1), + PIN_FIELD_BASE(125, 125, 2, 0x00e0, 0x10, 19, 1), + PIN_FIELD_BASE(139, 139, 2, 0x00e0, 0x10, 5, 1), + PIN_FIELD_BASE(140, 140, 2, 0x00e0, 0x10, 17, 1), + PIN_FIELD_BASE(141, 141, 2, 0x00e0, 0x10, 3, 1), + PIN_FIELD_BASE(142, 142, 2, 0x00e0, 0x10, 15, 1), + PIN_FIELD_BASE(152, 152, 5, 0x0090, 0x10, 3, 1), + PIN_FIELD_BASE(153, 153, 5, 0x0090, 0x10, 2, 1), + PIN_FIELD_BASE(154, 155, 5, 0x0090, 0x10, 0, 1), + PIN_FIELD_BASE(160, 160, 5, 0x00b0, 0x10, 1, 1), + PIN_FIELD_BASE(161, 161, 5, 0x00b0, 0x10, 3, 1), + PIN_FIELD_BASE(183, 184, 7, 0x00b0, 0x10, 1, 1), + PIN_FIELD_BASE(185, 185, 7, 0x00b0, 0x10, 4, 1), + PIN_FIELD_BASE(186, 186, 7, 0x00b0, 0x10, 6, 1), + PIN_FIELD_BASE(187, 187, 7, 0x00b0, 0x10, 8, 1), + PIN_FIELD_BASE(188, 188, 7, 0x00b0, 0x10, 3, 1), + PIN_FIELD_BASE(189, 189, 7, 0x00b0, 0x10, 7, 1), + PIN_FIELD_BASE(190, 191, 7, 0x00b0, 0x10, 9, 1), + PIN_FIELD_BASE(192, 192, 7, 0x00b0, 0x10, 0, 1), + PIN_FIELD_BASE(193, 193, 7, 0x00b0, 0x10, 5, 1), + PIN_FIELD_BASE(194, 194, 7, 0x00b0, 0x10, 11, 1), + PIN_FIELD_BASE(200, 200, 7, 0x00e0, 0x10, 7, 1), + PIN_FIELD_BASE(201, 201, 7, 0x00e0, 0x10, 11, 1), + PIN_FIELD_BASE(202, 202, 7, 0x00e0, 0x10, 5, 1), + PIN_FIELD_BASE(203, 203, 7, 0x00e0, 0x10, 9, 1), + PIN_FIELD_BASE(204, 204, 6, 0x00d0, 0x10, 1, 1), + PIN_FIELD_BASE(205, 205, 6, 0x00d0, 0x10, 3, 1), +}; + +static const struct mtk_pin_field_calc mt6893_pin_drv_adv_range[] = { + PIN_FIELD_BASE(24, 24, 7, 0x0030, 0x10, 0, 3), + PIN_FIELD_BASE(25, 25, 7, 0x0030, 0x10, 3, 3), + PIN_FIELD_BASE(89, 89, 2, 0x0050, 0x10, 6, 5), + PIN_FIELD_BASE(90, 90, 2, 0x0050, 0x10, 11, 5), + PIN_FIELD_BASE(118, 118, 2, 0x0040, 0x10, 0, 3), + PIN_FIELD_BASE(119, 119, 2, 0x0040, 0x10, 18, 3), + PIN_FIELD_BASE(120, 120, 2, 0x0040, 0x10, 15, 3), + PIN_FIELD_BASE(121, 121, 2, 0x0050, 0x10, 3, 3), + PIN_FIELD_BASE(122, 122, 2, 0x0040, 0x10, 12, 3), + PIN_FIELD_BASE(123, 123, 2, 0x0050, 0x10, 0, 3), + PIN_FIELD_BASE(124, 124, 2, 0x0040, 0x10, 9, 3), + PIN_FIELD_BASE(125, 125, 2, 0x0040, 0x10, 27, 3), + PIN_FIELD_BASE(139, 139, 2, 0x0040, 0x10, 6, 3), + PIN_FIELD_BASE(140, 140, 2, 0x0040, 0x10, 24, 3), + PIN_FIELD_BASE(141, 141, 2, 0x0040, 0x10, 3, 3), + PIN_FIELD_BASE(142, 142, 2, 0x0040, 0x10, 21, 3), + PIN_FIELD_BASE(160, 160, 5, 0x0020, 0x10, 0, 3), + PIN_FIELD_BASE(161, 161, 5, 0x0020, 0x10, 3, 3), + PIN_FIELD_BASE(200, 200, 7, 0x0030, 0x10, 9, 3), + PIN_FIELD_BASE(201, 201, 7, 0x0030, 0x10, 15, 3), + PIN_FIELD_BASE(202, 202, 7, 0x0030, 0x10, 6, 3), + PIN_FIELD_BASE(203, 203, 7, 0x0030, 0x10, 12, 3), + PIN_FIELD_BASE(204, 204, 6, 0x0020, 0x10, 0, 3), + PIN_FIELD_BASE(205, 205, 6, 0x0020, 0x10, 3, 3), +}; + +static const struct mtk_pin_field_calc mt6893_pin_rsel_range[] = { + PIN_FIELD_BASE(24, 24, 7, 0x00e0, 0x10, 0, 2), + PIN_FIELD_BASE(25, 25, 7, 0x00e0, 0x10, 2, 2), + PIN_FIELD_BASE(118, 118, 2, 0x00e0, 0x10, 0, 2), + PIN_FIELD_BASE(119, 119, 2, 0x00e0, 0x10, 12, 2), + PIN_FIELD_BASE(120, 120, 2, 0x00e0, 0x10, 10, 2), + PIN_FIELD_BASE(121, 121, 2, 0x00e0, 0x10, 22, 2), + PIN_FIELD_BASE(122, 122, 2, 0x00e0, 0x10, 8, 2), + PIN_FIELD_BASE(123, 123, 2, 0x00e0, 0x10, 20, 2), + PIN_FIELD_BASE(124, 124, 2, 0x00e0, 0x10, 6, 2), + PIN_FIELD_BASE(125, 125, 2, 0x00e0, 0x10, 18, 2), + PIN_FIELD_BASE(139, 139, 2, 0x00e0, 0x10, 4, 2), + PIN_FIELD_BASE(140, 140, 2, 0x00e0, 0x10, 16, 2), + PIN_FIELD_BASE(141, 141, 2, 0x00e0, 0x10, 2, 2), + PIN_FIELD_BASE(142, 142, 2, 0x00e0, 0x10, 14, 2), + PIN_FIELD_BASE(160, 160, 5, 0x00b0, 0x10, 0, 2), + PIN_FIELD_BASE(161, 161, 5, 0x00b0, 0x10, 2, 2), + PIN_FIELD_BASE(200, 200, 7, 0x00e0, 0x10, 6, 2), + PIN_FIELD_BASE(201, 201, 7, 0x00e0, 0x10, 10, 2), + PIN_FIELD_BASE(202, 202, 7, 0x00e0, 0x10, 4, 2), + PIN_FIELD_BASE(203, 203, 7, 0x00e0, 0x10, 8, 2), + PIN_FIELD_BASE(204, 204, 6, 0x00d0, 0x10, 0, 2), + PIN_FIELD_BASE(205, 205, 6, 0x00d0, 0x10, 2, 2), +}; + +static const unsigned int mt6893_pull_type[] = { + MTK_PULL_PU_PD_TYPE, /* 0 */ MTK_PULL_PU_PD_TYPE, /* 1 */ + MTK_PULL_PU_PD_TYPE, /* 2 */ MTK_PULL_PU_PD_TYPE, /* 3 */ + MTK_PULL_PU_PD_TYPE, /* 4 */ MTK_PULL_PU_PD_TYPE, /* 5 */ + MTK_PULL_PU_PD_TYPE, /* 6 */ MTK_PULL_PU_PD_TYPE, /* 7 */ + MTK_PULL_PU_PD_TYPE, /* 8 */ MTK_PULL_PU_PD_TYPE, /* 9 */ + MTK_PULL_PUPD_R1R0_TYPE, /* 10 */ MTK_PULL_PUPD_R1R0_TYPE, /* 11 */ + MTK_PULL_PUPD_R1R0_TYPE, /* 12 */ MTK_PULL_PUPD_R1R0_TYPE, /* 13 */ + MTK_PULL_PUPD_R1R0_TYPE, /* 14 */ MTK_PULL_PUPD_R1R0_TYPE, /* 15 */ + MTK_PULL_PU_PD_TYPE, /* 16 */ MTK_PULL_PU_PD_TYPE, /* 17 */ + MTK_PULL_PU_PD_TYPE, /* 18 */ MTK_PULL_PU_PD_TYPE, /* 19 */ + MTK_PULL_PU_PD_TYPE, /* 20 */ MTK_PULL_PU_PD_TYPE, /* 21 */ + MTK_PULL_PU_PD_TYPE, /* 22 */ MTK_PULL_PU_PD_TYPE, /* 23 */ + MTK_PULL_PU_PD_RSEL_TYPE, /* 24 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 25 */ + MTK_PULL_PU_PD_TYPE, /* 26 */ MTK_PULL_PU_PD_TYPE, /* 27 */ + MTK_PULL_PU_PD_TYPE, /* 28 */ MTK_PULL_PU_PD_TYPE, /* 29 */ + MTK_PULL_PU_PD_TYPE, /* 30 */ MTK_PULL_PU_PD_TYPE, /* 31 */ + MTK_PULL_PU_PD_TYPE, /* 32 */ MTK_PULL_PU_PD_TYPE, /* 33 */ + MTK_PULL_PU_PD_TYPE, /* 34 */ MTK_PULL_PU_PD_TYPE, /* 35 */ + MTK_PULL_PU_PD_TYPE, /* 36 */ MTK_PULL_PU_PD_TYPE, /* 37 */ + MTK_PULL_PU_PD_TYPE, /* 38 */ MTK_PULL_PU_PD_TYPE, /* 39 */ + MTK_PULL_PU_PD_TYPE, /* 40 */ MTK_PULL_PU_PD_TYPE, /* 41 */ + MTK_PULL_PU_PD_TYPE, /* 42 */ MTK_PULL_PU_PD_TYPE, /* 43 */ + MTK_PULL_PU_PD_TYPE, /* 44 */ MTK_PULL_PUPD_R1R0_TYPE, /* 45 */ + MTK_PULL_PUPD_R1R0_TYPE, /* 46 */ MTK_PULL_PUPD_R1R0_TYPE, /* 47 */ + MTK_PULL_PUPD_R1R0_TYPE, /* 48 */ MTK_PULL_PUPD_R1R0_TYPE, /* 49 */ + MTK_PULL_PUPD_R1R0_TYPE, /* 50 */ MTK_PULL_PUPD_R1R0_TYPE, /* 51 */ + MTK_PULL_PUPD_R1R0_TYPE, /* 52 */ MTK_PULL_PUPD_R1R0_TYPE, /* 53 */ + MTK_PULL_PUPD_R1R0_TYPE, /* 54 */ MTK_PULL_PUPD_R1R0_TYPE, /* 55 */ + MTK_PULL_PUPD_R1R0_TYPE, /* 56 */ MTK_PULL_PU_PD_TYPE, /* 57 */ + MTK_PULL_PU_PD_TYPE, /* 58 */ MTK_PULL_PU_PD_TYPE, /* 59 */ + MTK_PULL_PU_PD_TYPE, /* 60 */ MTK_PULL_PU_PD_TYPE, /* 61 */ + MTK_PULL_PU_PD_TYPE, /* 62 */ MTK_PULL_PU_PD_TYPE, /* 63 */ + MTK_PULL_PU_PD_TYPE, /* 64 */ MTK_PULL_PU_PD_TYPE, /* 65 */ + MTK_PULL_PU_PD_TYPE, /* 66 */ MTK_PULL_PU_PD_TYPE, /* 67 */ + MTK_PULL_PU_PD_TYPE, /* 68 */ MTK_PULL_PU_PD_TYPE, /* 69 */ + MTK_PULL_PU_PD_TYPE, /* 70 */ MTK_PULL_PU_PD_TYPE, /* 71 */ + MTK_PULL_PU_PD_TYPE, /* 72 */ MTK_PULL_PU_PD_TYPE, /* 73 */ + MTK_PULL_PU_PD_TYPE, /* 74 */ MTK_PULL_PU_PD_TYPE, /* 75 */ + MTK_PULL_PU_PD_TYPE, /* 76 */ MTK_PULL_PU_PD_TYPE, /* 77 */ + MTK_PULL_PU_PD_TYPE, /* 78 */ MTK_PULL_PU_PD_TYPE, /* 79 */ + MTK_PULL_PU_PD_TYPE, /* 80 */ MTK_PULL_PU_PD_TYPE, /* 81 */ + MTK_PULL_PU_PD_TYPE, /* 82 */ MTK_PULL_PU_PD_TYPE, /* 83 */ + MTK_PULL_PU_PD_TYPE, /* 84 */ MTK_PULL_PU_PD_TYPE, /* 85 */ + MTK_PULL_PU_PD_TYPE, /* 86 */ MTK_PULL_PU_PD_TYPE, /* 87 */ + MTK_PULL_PU_PD_TYPE, /* 88 */ MTK_PULL_PU_PD_TYPE, /* 89 */ + MTK_PULL_PU_PD_TYPE, /* 90 */ MTK_PULL_PU_PD_TYPE, /* 91 */ + MTK_PULL_PU_PD_TYPE, /* 92 */ MTK_PULL_PU_PD_TYPE, /* 93 */ + MTK_PULL_PU_PD_TYPE, /* 94 */ MTK_PULL_PU_PD_TYPE, /* 95 */ + MTK_PULL_PU_PD_TYPE, /* 96 */ MTK_PULL_PU_PD_TYPE, /* 97 */ + MTK_PULL_PU_PD_TYPE, /* 98 */ MTK_PULL_PU_PD_TYPE, /* 99 */ + MTK_PULL_PU_PD_TYPE, /* 100 */ MTK_PULL_PU_PD_TYPE, /* 101 */ + MTK_PULL_PU_PD_TYPE, /* 102 */ MTK_PULL_PU_PD_TYPE, /* 103 */ + MTK_PULL_PU_PD_TYPE, /* 104 */ MTK_PULL_PU_PD_TYPE, /* 105 */ + MTK_PULL_PU_PD_TYPE, /* 106 */ MTK_PULL_PU_PD_TYPE, /* 107 */ + MTK_PULL_PU_PD_TYPE, /* 108 */ MTK_PULL_PU_PD_TYPE, /* 109 */ + MTK_PULL_PU_PD_TYPE, /* 110 */ MTK_PULL_PU_PD_TYPE, /* 111 */ + MTK_PULL_PU_PD_TYPE, /* 112 */ MTK_PULL_PU_PD_TYPE, /* 113 */ + MTK_PULL_PU_PD_TYPE, /* 114 */ MTK_PULL_PU_PD_TYPE, /* 115 */ + MTK_PULL_PU_PD_TYPE, /* 116 */ MTK_PULL_PU_PD_TYPE, /* 117 */ + MTK_PULL_PU_PD_RSEL_TYPE, /* 118 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 119 */ + MTK_PULL_PU_PD_RSEL_TYPE, /* 120 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 121 */ + MTK_PULL_PU_PD_RSEL_TYPE, /* 122 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 123 */ + MTK_PULL_PU_PD_RSEL_TYPE, /* 124 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 125 */ + MTK_PULL_PU_PD_TYPE, /* 126 */ MTK_PULL_PU_PD_TYPE, /* 127 */ + MTK_PULL_PU_PD_TYPE, /* 128 */ MTK_PULL_PU_PD_TYPE, /* 129 */ + MTK_PULL_PU_PD_TYPE, /* 130 */ MTK_PULL_PU_PD_TYPE, /* 131 */ + MTK_PULL_PU_PD_TYPE, /* 132 */ MTK_PULL_PU_PD_TYPE, /* 133 */ + MTK_PULL_PU_PD_TYPE, /* 134 */ MTK_PULL_PU_PD_TYPE, /* 135 */ + MTK_PULL_PU_PD_TYPE, /* 136 */ MTK_PULL_PU_PD_TYPE, /* 137 */ + MTK_PULL_PU_PD_TYPE, /* 138 */ MTK_PULL_PU_PD_TYPE, /* 139 */ + MTK_PULL_PU_PD_RSEL_TYPE, /* 140 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 141 */ + MTK_PULL_PU_PD_RSEL_TYPE, /* 142 */ MTK_PULL_PU_PD_TYPE, /* 143 */ + MTK_PULL_PU_PD_TYPE, /* 144 */ MTK_PULL_PU_PD_TYPE, /* 145 */ + MTK_PULL_PU_PD_TYPE, /* 146 */ MTK_PULL_PU_PD_TYPE, /* 147 */ + MTK_PULL_PU_PD_TYPE, /* 148 */ MTK_PULL_PU_PD_TYPE, /* 149 */ + MTK_PULL_PU_PD_TYPE, /* 150 */ MTK_PULL_PU_PD_TYPE, /* 151 */ + MTK_PULL_PUPD_R1R0_TYPE, /* 152 */ MTK_PULL_PUPD_R1R0_TYPE, /* 153 */ + MTK_PULL_PUPD_R1R0_TYPE, /* 154 */ MTK_PULL_PUPD_R1R0_TYPE, /* 155 */ + MTK_PULL_PU_PD_TYPE, /* 156 */ MTK_PULL_PU_PD_TYPE, /* 157 */ + MTK_PULL_PU_PD_TYPE, /* 158 */ MTK_PULL_PU_PD_TYPE, /* 159 */ + MTK_PULL_PU_PD_RSEL_TYPE, /* 160 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 161 */ + MTK_PULL_PU_PD_TYPE, /* 162 */ MTK_PULL_PU_PD_TYPE, /* 163 */ + MTK_PULL_PU_PD_TYPE, /* 164 */ MTK_PULL_PU_PD_TYPE, /* 165 */ + MTK_PULL_PU_PD_TYPE, /* 166 */ MTK_PULL_PU_PD_TYPE, /* 167 */ + MTK_PULL_PU_PD_TYPE, /* 168 */ MTK_PULL_PU_PD_TYPE, /* 169 */ + MTK_PULL_PU_PD_TYPE, /* 170 */ MTK_PULL_PU_PD_TYPE, /* 171 */ + MTK_PULL_PU_PD_TYPE, /* 172 */ MTK_PULL_PU_PD_TYPE, /* 173 */ + MTK_PULL_PU_PD_TYPE, /* 174 */ MTK_PULL_PU_PD_TYPE, /* 175 */ + MTK_PULL_PU_PD_TYPE, /* 176 */ MTK_PULL_PU_PD_TYPE, /* 177 */ + MTK_PULL_PU_PD_TYPE, /* 178 */ MTK_PULL_PU_PD_TYPE, /* 179 */ + MTK_PULL_PU_PD_TYPE, /* 180 */ MTK_PULL_PU_PD_TYPE, /* 181 */ + MTK_PULL_PU_PD_TYPE, /* 182 */ MTK_PULL_PUPD_R1R0_TYPE, /* 183 */ + MTK_PULL_PUPD_R1R0_TYPE, /* 184 */ MTK_PULL_PUPD_R1R0_TYPE, /* 185 */ + MTK_PULL_PUPD_R1R0_TYPE, /* 186 */ MTK_PULL_PUPD_R1R0_TYPE, /* 187 */ + MTK_PULL_PUPD_R1R0_TYPE, /* 188 */ MTK_PULL_PUPD_R1R0_TYPE, /* 189 */ + MTK_PULL_PUPD_R1R0_TYPE, /* 190 */ MTK_PULL_PUPD_R1R0_TYPE, /* 191 */ + MTK_PULL_PUPD_R1R0_TYPE, /* 192 */ MTK_PULL_PUPD_R1R0_TYPE, /* 193 */ + MTK_PULL_PUPD_R1R0_TYPE, /* 194 */ MTK_PULL_PU_PD_TYPE, /* 195 */ + MTK_PULL_PU_PD_TYPE, /* 196 */ MTK_PULL_PU_PD_TYPE, /* 197 */ + MTK_PULL_PU_PD_TYPE, /* 198 */ MTK_PULL_PU_PD_TYPE, /* 199 */ + MTK_PULL_PU_PD_RSEL_TYPE, /* 200 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 201 */ + MTK_PULL_PU_PD_RSEL_TYPE, /* 202 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 203 */ + MTK_PULL_PU_PD_RSEL_TYPE, /* 204 */ MTK_PULL_PU_PD_RSEL_TYPE, /* 205 */ + MTK_PULL_PU_PD_TYPE, /* 206 */ MTK_PULL_PU_PD_TYPE, /* 207 */ + MTK_PULL_PU_PD_TYPE, /* 208 */ MTK_PULL_PU_PD_TYPE, /* 209 */ + MTK_PULL_PU_PD_TYPE, /* 210 */ MTK_PULL_PU_PD_TYPE, /* 211 */ + MTK_PULL_PU_PD_TYPE, /* 212 */ MTK_PULL_PU_PD_TYPE, /* 213 */ + MTK_PULL_PU_PD_TYPE, /* 214 */ MTK_PULL_PU_PD_TYPE, /* 215 */ + MTK_PULL_PU_PD_TYPE, /* 216 */ MTK_PULL_PU_PD_TYPE, /* 217 */ + MTK_PULL_PU_PD_TYPE, /* 218 */ MTK_PULL_PU_PD_TYPE, /* 219 */ +}; + +static const char * const mt6893_pinctrl_register_base_name[] = { + "base", "rm", "bm", "bl", "br", "lm", "lb", "rt", "lt", "tl", +}; + +static const struct mtk_pin_reg_calc mt6893_reg_cals[PINCTRL_PIN_REG_MAX] = { + [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt6893_pin_mode_range), + [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt6893_pin_dir_range), + [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt6893_pin_di_range), + [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt6893_pin_do_range), + [PINCTRL_PIN_REG_SR] = MTK_RANGE(mt6893_pin_dir_range), + [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt6893_pin_smt_range), + [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt6893_pin_ies_range), + [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt6893_pin_pu_range), + [PINCTRL_PIN_REG_PD] = MTK_RANGE(mt6893_pin_pd_range), + [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt6893_pin_drv_range), + [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt6893_pin_pupd_range), + [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt6893_pin_r0_range), + [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt6893_pin_r1_range), + [PINCTRL_PIN_REG_DRV_ADV] = MTK_RANGE(mt6893_pin_drv_adv_range), + [PINCTRL_PIN_REG_RSEL] = MTK_RANGE(mt6893_pin_rsel_range), +}; + +static const struct mtk_eint_hw mt6893_eint_hw = { + .port_mask = 7, + .ports = 7, + .ap_num = 224, + .db_cnt = 32, + .db_time = debounce_time_mt6765, +}; + +static const struct mtk_pin_soc mt6893_data = { + .reg_cal = mt6893_reg_cals, + .pins = mtk_pins_mt6893, + .npins = ARRAY_SIZE(mtk_pins_mt6893), + .ngrps = ARRAY_SIZE(mtk_pins_mt6893), + .eint_hw = &mt6893_eint_hw, + .nfuncs = 8, + .gpio_m = 0, + .base_names = mt6893_pinctrl_register_base_name, + .nbase_names = ARRAY_SIZE(mt6893_pinctrl_register_base_name), + .pull_type = mt6893_pull_type, + .bias_set_combo = mtk_pinconf_bias_set_combo, + .bias_get_combo = mtk_pinconf_bias_get_combo, + .drive_set = mtk_pinconf_drive_set_rev1, + .drive_get = mtk_pinconf_drive_get_rev1, + .adv_drive_set = mtk_pinconf_adv_drive_set_raw, + .adv_drive_get = mtk_pinconf_adv_drive_get_raw, +}; + +static const struct of_device_id mt6893_pinctrl_of_match[] = { + { .compatible = "mediatek,mt6893-pinctrl", .data = &mt6893_data }, + { /* sentinel */ } +}; + +static struct platform_driver mt6893_pinctrl_driver = { + .driver = { + .name = "mt6893-pinctrl", + .of_match_table = mt6893_pinctrl_of_match, + .pm = pm_sleep_ptr(&mtk_paris_pinctrl_pm_ops) + }, + .probe = mtk_paris_pinctrl_probe, +}; + +static int __init mt6893_pinctrl_init(void) +{ + return platform_driver_register(&mt6893_pinctrl_driver); +} + +arch_initcall(mt6893_pinctrl_init); + +MODULE_DESCRIPTION("MediaTek MT6893 Pinctrl Driver"); diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8196.c b/drivers/pinctrl/mediatek/pinctrl-mt8196.c new file mode 100644 index 000000000000..82a73929c7a0 --- /dev/null +++ b/drivers/pinctrl/mediatek/pinctrl-mt8196.c @@ -0,0 +1,1860 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2025 MediaTek Inc. + * Author: Guodong Liu <Guodong.Liu@mediatek.com> + * Lei Xue <lei.xue@mediatek.com> + * Cathy Xu <ot_cathy.xu@mediatek.com> + */ + +#include <linux/module.h> +#include "pinctrl-mtk-mt8196.h" +#include "pinctrl-paris.h" + +#define PIN_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \ + PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \ + 32, 0) + +#define PINS_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \ + PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \ + 32, 1) + +static const struct mtk_pin_field_calc mt8196_pin_mode_range[] = { + PIN_FIELD(0, 270, 0x0300, 0x10, 0, 4), +}; + +static const struct mtk_pin_field_calc mt8196_pin_dir_range[] = { + PIN_FIELD(0, 270, 0x0000, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt8196_pin_di_range[] = { + PIN_FIELD(0, 270, 0x0200, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt8196_pin_do_range[] = { + PIN_FIELD(0, 270, 0x0100, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt8196_pin_smt_range[] = { + PIN_FIELD_BASE(0, 0, 8, 0x00d0, 0x10, 0, 1), + PIN_FIELD_BASE(1, 1, 8, 0x00d0, 0x10, 1, 1), + PIN_FIELD_BASE(2, 2, 11, 0x00a0, 0x10, 1, 1), + PIN_FIELD_BASE(3, 3, 11, 0x00a0, 0x10, 1, 1), + PIN_FIELD_BASE(4, 4, 11, 0x00a0, 0x10, 2, 1), + PIN_FIELD_BASE(5, 5, 11, 0x00a0, 0x10, 2, 1), + PIN_FIELD_BASE(6, 6, 11, 0x00a0, 0x10, 2, 1), + PIN_FIELD_BASE(7, 7, 11, 0x00a0, 0x10, 2, 1), + PIN_FIELD_BASE(8, 8, 11, 0x00a0, 0x10, 3, 1), + PIN_FIELD_BASE(9, 9, 9, 0x0120, 0x10, 13, 1), + PIN_FIELD_BASE(10, 10, 9, 0x0120, 0x10, 12, 1), + PIN_FIELD_BASE(11, 11, 8, 0x00d0, 0x10, 2, 1), + PIN_FIELD_BASE(12, 12, 9, 0x0120, 0x10, 15, 1), + PIN_FIELD_BASE(13, 13, 6, 0x0120, 0x10, 3, 1), + PIN_FIELD_BASE(14, 14, 3, 0x00c0, 0x10, 0, 1), + PIN_FIELD_BASE(15, 15, 6, 0x0120, 0x10, 0, 1), + PIN_FIELD_BASE(16, 16, 6, 0x0120, 0x10, 3, 1), + PIN_FIELD_BASE(17, 17, 6, 0x0120, 0x10, 3, 1), + PIN_FIELD_BASE(18, 18, 6, 0x0120, 0x10, 1, 1), + PIN_FIELD_BASE(19, 19, 6, 0x0120, 0x10, 2, 1), + PIN_FIELD_BASE(20, 20, 3, 0x00c0, 0x10, 4, 1), + PIN_FIELD_BASE(21, 21, 2, 0x00b0, 0x10, 0, 1), + PIN_FIELD_BASE(22, 22, 2, 0x00b0, 0x10, 1, 1), + PIN_FIELD_BASE(23, 23, 2, 0x00b0, 0x10, 2, 1), + PIN_FIELD_BASE(24, 24, 2, 0x00b0, 0x10, 3, 1), + PIN_FIELD_BASE(25, 25, 2, 0x00b0, 0x10, 4, 1), + PIN_FIELD_BASE(26, 26, 2, 0x00b0, 0x10, 5, 1), + PIN_FIELD_BASE(27, 27, 2, 0x00b0, 0x10, 7, 1), + PIN_FIELD_BASE(28, 28, 2, 0x00b0, 0x10, 7, 1), + PIN_FIELD_BASE(29, 29, 2, 0x00b0, 0x10, 7, 1), + PIN_FIELD_BASE(30, 30, 2, 0x00b0, 0x10, 8, 1), + PIN_FIELD_BASE(31, 31, 2, 0x00b0, 0x10, 8, 1), + PIN_FIELD_BASE(32, 32, 1, 0x00c0, 0x10, 8, 1), + PIN_FIELD_BASE(33, 33, 1, 0x00c0, 0x10, 8, 1), + PIN_FIELD_BASE(34, 34, 1, 0x00c0, 0x10, 8, 1), + PIN_FIELD_BASE(35, 35, 1, 0x00c0, 0x10, 8, 1), + PIN_FIELD_BASE(36, 36, 1, 0x00c0, 0x10, 9, 1), + PIN_FIELD_BASE(37, 37, 1, 0x00c0, 0x10, 9, 1), + PIN_FIELD_BASE(38, 38, 1, 0x00c0, 0x10, 6, 1), + PIN_FIELD_BASE(39, 39, 8, 0x00d0, 0x10, 4, 1), + PIN_FIELD_BASE(40, 40, 8, 0x00d0, 0x10, 3, 1), + PIN_FIELD_BASE(41, 41, 8, 0x00d0, 0x10, 3, 1), + PIN_FIELD_BASE(42, 42, 8, 0x00d0, 0x10, 3, 1), + PIN_FIELD_BASE(43, 43, 8, 0x00d0, 0x10, 3, 1), + PIN_FIELD_BASE(44, 44, 8, 0x00d0, 0x10, 5, 1), + PIN_FIELD_BASE(45, 45, 8, 0x00d0, 0x10, 5, 1), + PIN_FIELD_BASE(46, 46, 8, 0x00d0, 0x10, 6, 1), + PIN_FIELD_BASE(47, 47, 8, 0x00d0, 0x10, 9, 1), + PIN_FIELD_BASE(48, 48, 8, 0x00d0, 0x10, 7, 1), + PIN_FIELD_BASE(49, 49, 8, 0x00d0, 0x10, 10, 1), + PIN_FIELD_BASE(50, 50, 8, 0x00d0, 0x10, 8, 1), + PIN_FIELD_BASE(51, 51, 8, 0x00d0, 0x10, 11, 1), + PIN_FIELD_BASE(52, 52, 9, 0x0120, 0x10, 7, 1), + PIN_FIELD_BASE(53, 53, 9, 0x0120, 0x10, 8, 1), + PIN_FIELD_BASE(54, 54, 9, 0x0120, 0x10, 2, 1), + PIN_FIELD_BASE(55, 55, 9, 0x0120, 0x10, 1, 1), + PIN_FIELD_BASE(56, 56, 9, 0x0120, 0x10, 5, 1), + PIN_FIELD_BASE(57, 57, 9, 0x0120, 0x10, 6, 1), + PIN_FIELD_BASE(58, 58, 9, 0x0120, 0x10, 3, 1), + PIN_FIELD_BASE(59, 59, 9, 0x0120, 0x10, 4, 1), + PIN_FIELD_BASE(60, 60, 9, 0x0120, 0x10, 19, 1), + PIN_FIELD_BASE(61, 61, 9, 0x0120, 0x10, 10, 1), + PIN_FIELD_BASE(62, 62, 9, 0x0120, 0x10, 9, 1), + PIN_FIELD_BASE(63, 63, 9, 0x0120, 0x10, 14, 1), + PIN_FIELD_BASE(64, 64, 9, 0x0120, 0x10, 0, 1), + PIN_FIELD_BASE(65, 65, 9, 0x0120, 0x10, 11, 1), + PIN_FIELD_BASE(66, 66, 9, 0x0120, 0x10, 16, 1), + PIN_FIELD_BASE(67, 67, 9, 0x0120, 0x10, 18, 1), + PIN_FIELD_BASE(68, 68, 9, 0x0120, 0x10, 18, 1), + PIN_FIELD_BASE(69, 69, 9, 0x0120, 0x10, 18, 1), + PIN_FIELD_BASE(70, 70, 9, 0x0120, 0x10, 17, 1), + PIN_FIELD_BASE(71, 71, 9, 0x0120, 0x10, 17, 1), + PIN_FIELD_BASE(72, 72, 9, 0x0120, 0x10, 18, 1), + PIN_FIELD_BASE(73, 73, 9, 0x0120, 0x10, 17, 1), + PIN_FIELD_BASE(74, 74, 9, 0x0120, 0x10, 17, 1), + PIN_FIELD_BASE(75, 75, 10, 0x00b0, 0x10, 2, 1), + PIN_FIELD_BASE(76, 76, 10, 0x00b0, 0x10, 3, 1), + PIN_FIELD_BASE(77, 77, 10, 0x00b0, 0x10, 4, 1), + PIN_FIELD_BASE(78, 78, 10, 0x00b0, 0x10, 5, 1), + PIN_FIELD_BASE(79, 79, 10, 0x00b0, 0x10, 0, 1), + PIN_FIELD_BASE(80, 80, 10, 0x00b0, 0x10, 1, 1), + PIN_FIELD_BASE(81, 81, 11, 0x00a0, 0x10, 0, 1), + PIN_FIELD_BASE(82, 82, 11, 0x00a0, 0x10, 0, 1), + PIN_FIELD_BASE(83, 83, 11, 0x00a0, 0x10, 0, 1), + PIN_FIELD_BASE(84, 84, 11, 0x00a0, 0x10, 0, 1), + PIN_FIELD_BASE(85, 85, 11, 0x00a0, 0x10, 0, 1), + PIN_FIELD_BASE(86, 86, 11, 0x00a0, 0x10, 0, 1), + PIN_FIELD_BASE(87, 87, 11, 0x00a0, 0x10, 0, 1), + PIN_FIELD_BASE(88, 88, 11, 0x00a0, 0x10, 0, 1), + PIN_FIELD_BASE(89, 89, 11, 0x00a0, 0x10, 1, 1), + PIN_FIELD_BASE(90, 90, 11, 0x00a0, 0x10, 1, 1), + PIN_FIELD_BASE(91, 91, 12, 0x00c0, 0x10, 5, 1), + PIN_FIELD_BASE(92, 92, 12, 0x00c0, 0x10, 5, 1), + PIN_FIELD_BASE(93, 93, 12, 0x00c0, 0x10, 5, 1), + PIN_FIELD_BASE(94, 94, 12, 0x00c0, 0x10, 4, 1), + PIN_FIELD_BASE(95, 95, 12, 0x00c0, 0x10, 0, 1), + PIN_FIELD_BASE(96, 96, 12, 0x00c0, 0x10, 2, 1), + PIN_FIELD_BASE(97, 97, 12, 0x00c0, 0x10, 1, 1), + PIN_FIELD_BASE(98, 98, 12, 0x00c0, 0x10, 3, 1), + PIN_FIELD_BASE(99, 99, 12, 0x00c0, 0x10, 6, 1), + PIN_FIELD_BASE(100, 100, 12, 0x00c0, 0x10, 9, 1), + PIN_FIELD_BASE(101, 101, 12, 0x00c0, 0x10, 7, 1), + PIN_FIELD_BASE(102, 102, 12, 0x00c0, 0x10, 10, 1), + PIN_FIELD_BASE(103, 103, 12, 0x00c0, 0x10, 4, 1), + PIN_FIELD_BASE(104, 104, 12, 0x00c0, 0x10, 8, 1), + PIN_FIELD_BASE(105, 105, 12, 0x00c0, 0x10, 11, 1), + PIN_FIELD_BASE(106, 106, 5, 0x00b0, 0x10, 0, 1), + PIN_FIELD_BASE(107, 107, 5, 0x00b0, 0x10, 0, 1), + PIN_FIELD_BASE(108, 108, 5, 0x00b0, 0x10, 0, 1), + PIN_FIELD_BASE(109, 109, 5, 0x00b0, 0x10, 0, 1), + PIN_FIELD_BASE(110, 110, 5, 0x00b0, 0x10, 1, 1), + PIN_FIELD_BASE(111, 111, 5, 0x00b0, 0x10, 1, 1), + PIN_FIELD_BASE(112, 112, 5, 0x00b0, 0x10, 1, 1), + PIN_FIELD_BASE(113, 113, 5, 0x00b0, 0x10, 1, 1), + PIN_FIELD_BASE(114, 114, 5, 0x00b0, 0x10, 0, 1), + PIN_FIELD_BASE(115, 115, 5, 0x00b0, 0x10, 2, 1), + PIN_FIELD_BASE(116, 116, 5, 0x00b0, 0x10, 2, 1), + PIN_FIELD_BASE(117, 117, 5, 0x00b0, 0x10, 2, 1), + PIN_FIELD_BASE(118, 118, 6, 0x0120, 0x10, 6, 1), + PIN_FIELD_BASE(119, 119, 6, 0x0120, 0x10, 7, 1), + PIN_FIELD_BASE(120, 120, 6, 0x0120, 0x10, 9, 1), + PIN_FIELD_BASE(121, 121, 6, 0x0120, 0x10, 8, 1), + PIN_FIELD_BASE(122, 122, 6, 0x0120, 0x10, 3, 1), + PIN_FIELD_BASE(123, 123, 6, 0x0120, 0x10, 4, 1), + PIN_FIELD_BASE(124, 124, 6, 0x0120, 0x10, 5, 1), + PIN_FIELD_BASE(125, 125, 7, 0x00f0, 0x10, 0, 1), + PIN_FIELD_BASE(126, 126, 7, 0x00f0, 0x10, 1, 1), + PIN_FIELD_BASE(127, 127, 7, 0x00f0, 0x10, 2, 1), + PIN_FIELD_BASE(128, 128, 7, 0x00f0, 0x10, 3, 1), + PIN_FIELD_BASE(129, 129, 7, 0x00f0, 0x10, 4, 1), + PIN_FIELD_BASE(130, 130, 7, 0x00f0, 0x10, 5, 1), + PIN_FIELD_BASE(131, 131, 7, 0x00f0, 0x10, 9, 1), + PIN_FIELD_BASE(132, 132, 7, 0x00f0, 0x10, 11, 1), + PIN_FIELD_BASE(133, 133, 7, 0x00f0, 0x10, 10, 1), + PIN_FIELD_BASE(134, 134, 7, 0x00f0, 0x10, 6, 1), + PIN_FIELD_BASE(135, 135, 7, 0x00f0, 0x10, 8, 1), + PIN_FIELD_BASE(136, 136, 7, 0x00f0, 0x10, 7, 1), + PIN_FIELD_BASE(137, 137, 4, 0x00d0, 0x10, 2, 1), + PIN_FIELD_BASE(138, 138, 4, 0x00d0, 0x10, 3, 1), + PIN_FIELD_BASE(139, 139, 4, 0x00d0, 0x10, 4, 1), + PIN_FIELD_BASE(140, 140, 4, 0x00d0, 0x10, 5, 1), + PIN_FIELD_BASE(141, 141, 4, 0x00d0, 0x10, 6, 1), + PIN_FIELD_BASE(142, 142, 4, 0x00d0, 0x10, 7, 1), + PIN_FIELD_BASE(143, 143, 4, 0x00d0, 0x10, 8, 1), + PIN_FIELD_BASE(144, 144, 4, 0x00d0, 0x10, 9, 1), + PIN_FIELD_BASE(145, 145, 4, 0x00d0, 0x10, 10, 1), + PIN_FIELD_BASE(146, 146, 4, 0x00d0, 0x10, 10, 1), + PIN_FIELD_BASE(147, 147, 4, 0x00d0, 0x10, 0, 1), + PIN_FIELD_BASE(148, 148, 4, 0x00d0, 0x10, 1, 1), + PIN_FIELD_BASE(149, 149, 4, 0x00d0, 0x10, 11, 1), + PIN_FIELD_BASE(150, 150, 4, 0x00d0, 0x10, 11, 1), + PIN_FIELD_BASE(151, 151, 4, 0x00d0, 0x10, 11, 1), + PIN_FIELD_BASE(152, 152, 4, 0x00d0, 0x10, 11, 1), + PIN_FIELD_BASE(153, 153, 4, 0x00d0, 0x10, 13, 1), + PIN_FIELD_BASE(154, 154, 4, 0x00d0, 0x10, 13, 1), + PIN_FIELD_BASE(155, 155, 4, 0x00d0, 0x10, 12, 1), + PIN_FIELD_BASE(156, 156, 4, 0x00d0, 0x10, 12, 1), + PIN_FIELD_BASE(157, 157, 2, 0x00b0, 0x10, 6, 1), + PIN_FIELD_BASE(158, 158, 2, 0x00b0, 0x10, 6, 1), + PIN_FIELD_BASE(159, 159, 2, 0x00b0, 0x10, 6, 1), + PIN_FIELD_BASE(160, 160, 3, 0x00c0, 0x10, 1, 1), + PIN_FIELD_BASE(161, 161, 3, 0x00c0, 0x10, 2, 1), + PIN_FIELD_BASE(162, 162, 3, 0x00c0, 0x10, 2, 1), + PIN_FIELD_BASE(163, 163, 3, 0x00c0, 0x10, 2, 1), + PIN_FIELD_BASE(164, 164, 3, 0x00c0, 0x10, 7, 1), + PIN_FIELD_BASE(165, 165, 3, 0x00c0, 0x10, 9, 1), + PIN_FIELD_BASE(166, 166, 3, 0x00c0, 0x10, 8, 1), + PIN_FIELD_BASE(167, 167, 3, 0x00c0, 0x10, 10, 1), + PIN_FIELD_BASE(168, 168, 3, 0x00c0, 0x10, 11, 1), + PIN_FIELD_BASE(169, 169, 3, 0x00c0, 0x10, 3, 1), + PIN_FIELD_BASE(170, 170, 3, 0x00c0, 0x10, 12, 1), + PIN_FIELD_BASE(171, 171, 3, 0x00c0, 0x10, 3, 1), + PIN_FIELD_BASE(172, 172, 3, 0x00c0, 0x10, 3, 1), + PIN_FIELD_BASE(173, 173, 3, 0x00c0, 0x10, 3, 1), + PIN_FIELD_BASE(174, 174, 1, 0x00c0, 0x10, 9, 1), + PIN_FIELD_BASE(175, 175, 1, 0x00c0, 0x10, 9, 1), + PIN_FIELD_BASE(176, 176, 1, 0x00c0, 0x10, 10, 1), + PIN_FIELD_BASE(177, 177, 1, 0x00c0, 0x10, 11, 1), + PIN_FIELD_BASE(178, 178, 1, 0x00c0, 0x10, 7, 1), + PIN_FIELD_BASE(179, 179, 1, 0x00c0, 0x10, 7, 1), + PIN_FIELD_BASE(180, 180, 1, 0x00c0, 0x10, 0, 1), + PIN_FIELD_BASE(181, 181, 1, 0x00c0, 0x10, 1, 1), + PIN_FIELD_BASE(182, 182, 1, 0x00c0, 0x10, 2, 1), + PIN_FIELD_BASE(183, 183, 1, 0x00c0, 0x10, 3, 1), + PIN_FIELD_BASE(184, 184, 1, 0x00c0, 0x10, 4, 1), + PIN_FIELD_BASE(185, 185, 1, 0x00c0, 0x10, 5, 1), + PIN_FIELD_BASE(186, 186, 13, 0x0110, 0x10, 14, 1), + PIN_FIELD_BASE(187, 187, 13, 0x0110, 0x10, 14, 1), + PIN_FIELD_BASE(188, 188, 13, 0x0110, 0x10, 4, 1), + PIN_FIELD_BASE(189, 189, 13, 0x0110, 0x10, 9, 1), + PIN_FIELD_BASE(190, 190, 13, 0x0110, 0x10, 5, 1), + PIN_FIELD_BASE(191, 191, 13, 0x0110, 0x10, 10, 1), + PIN_FIELD_BASE(192, 192, 13, 0x0110, 0x10, 0, 1), + PIN_FIELD_BASE(193, 193, 13, 0x0110, 0x10, 15, 1), + PIN_FIELD_BASE(194, 194, 13, 0x0110, 0x10, 6, 1), + PIN_FIELD_BASE(195, 195, 13, 0x0110, 0x10, 11, 1), + PIN_FIELD_BASE(196, 196, 13, 0x0110, 0x10, 1, 1), + PIN_FIELD_BASE(197, 197, 13, 0x0110, 0x10, 16, 1), + PIN_FIELD_BASE(198, 198, 13, 0x0110, 0x10, 7, 1), + PIN_FIELD_BASE(199, 199, 13, 0x0110, 0x10, 12, 1), + PIN_FIELD_BASE(200, 200, 13, 0x0110, 0x10, 19, 1), + PIN_FIELD_BASE(201, 201, 13, 0x0110, 0x10, 22, 1), + PIN_FIELD_BASE(202, 202, 13, 0x0110, 0x10, 8, 1), + PIN_FIELD_BASE(203, 203, 13, 0x0110, 0x10, 13, 1), + PIN_FIELD_BASE(204, 204, 13, 0x0110, 0x10, 2, 1), + PIN_FIELD_BASE(205, 205, 13, 0x0110, 0x10, 3, 1), + PIN_FIELD_BASE(206, 206, 13, 0x0110, 0x10, 18, 1), + PIN_FIELD_BASE(207, 207, 13, 0x0110, 0x10, 17, 1), + PIN_FIELD_BASE(208, 208, 13, 0x0110, 0x10, 17, 1), + PIN_FIELD_BASE(209, 209, 13, 0x0110, 0x10, 17, 1), + PIN_FIELD_BASE(210, 210, 14, 0x0130, 0x10, 0, 1), + PIN_FIELD_BASE(211, 211, 14, 0x0130, 0x10, 1, 1), + PIN_FIELD_BASE(212, 212, 14, 0x0130, 0x10, 2, 1), + PIN_FIELD_BASE(213, 213, 14, 0x0130, 0x10, 3, 1), + PIN_FIELD_BASE(214, 214, 13, 0x0110, 0x10, 20, 1), + PIN_FIELD_BASE(215, 215, 13, 0x0110, 0x10, 21, 1), + PIN_FIELD_BASE(216, 216, 14, 0x0130, 0x10, 11, 1), + PIN_FIELD_BASE(217, 217, 14, 0x0130, 0x10, 11, 1), + PIN_FIELD_BASE(218, 218, 14, 0x0130, 0x10, 11, 1), + PIN_FIELD_BASE(219, 219, 14, 0x0130, 0x10, 4, 1), + PIN_FIELD_BASE(220, 220, 14, 0x0130, 0x10, 11, 1), + PIN_FIELD_BASE(221, 221, 14, 0x0130, 0x10, 12, 1), + PIN_FIELD_BASE(222, 222, 14, 0x0130, 0x10, 22, 1), + PIN_FIELD_BASE(223, 223, 14, 0x0130, 0x10, 21, 1), + PIN_FIELD_BASE(224, 224, 14, 0x0130, 0x10, 5, 1), + PIN_FIELD_BASE(225, 225, 14, 0x0130, 0x10, 6, 1), + PIN_FIELD_BASE(226, 226, 14, 0x0130, 0x10, 7, 1), + PIN_FIELD_BASE(227, 227, 14, 0x0130, 0x10, 8, 1), + PIN_FIELD_BASE(228, 228, 14, 0x0130, 0x10, 9, 1), + PIN_FIELD_BASE(229, 229, 14, 0x0130, 0x10, 10, 1), + PIN_FIELD_BASE(230, 230, 15, 0x00e0, 0x10, 0, 1), + PIN_FIELD_BASE(231, 231, 15, 0x00e0, 0x10, 0, 1), + PIN_FIELD_BASE(232, 232, 15, 0x00e0, 0x10, 0, 1), + PIN_FIELD_BASE(233, 233, 15, 0x00e0, 0x10, 1, 1), + PIN_FIELD_BASE(234, 234, 15, 0x00e0, 0x10, 1, 1), + PIN_FIELD_BASE(235, 235, 15, 0x00e0, 0x10, 1, 1), + PIN_FIELD_BASE(236, 236, 15, 0x00e0, 0x10, 1, 1), + PIN_FIELD_BASE(237, 237, 15, 0x00e0, 0x10, 2, 1), + PIN_FIELD_BASE(238, 238, 15, 0x00e0, 0x10, 2, 1), + PIN_FIELD_BASE(239, 239, 15, 0x00e0, 0x10, 2, 1), + PIN_FIELD_BASE(240, 240, 15, 0x00e0, 0x10, 2, 1), + PIN_FIELD_BASE(241, 241, 15, 0x00e0, 0x10, 3, 1), + PIN_FIELD_BASE(242, 242, 15, 0x00e0, 0x10, 3, 1), + PIN_FIELD_BASE(243, 243, 15, 0x00e0, 0x10, 3, 1), + PIN_FIELD_BASE(244, 244, 15, 0x00e0, 0x10, 3, 1), + PIN_FIELD_BASE(245, 245, 15, 0x00e0, 0x10, 4, 1), + PIN_FIELD_BASE(246, 246, 15, 0x00e0, 0x10, 5, 1), + PIN_FIELD_BASE(247, 247, 15, 0x00e0, 0x10, 5, 1), + PIN_FIELD_BASE(248, 248, 15, 0x00e0, 0x10, 4, 1), + PIN_FIELD_BASE(249, 249, 15, 0x00e0, 0x10, 4, 1), + PIN_FIELD_BASE(250, 250, 15, 0x00e0, 0x10, 4, 1), + PIN_FIELD_BASE(251, 251, 3, 0x00c0, 0x10, 4, 1), + PIN_FIELD_BASE(252, 252, 3, 0x00c0, 0x10, 4, 1), + PIN_FIELD_BASE(253, 253, 3, 0x00c0, 0x10, 4, 1), + PIN_FIELD_BASE(254, 254, 3, 0x00c0, 0x10, 5, 1), + PIN_FIELD_BASE(255, 255, 3, 0x00c0, 0x10, 5, 1), + PIN_FIELD_BASE(256, 256, 3, 0x00c0, 0x10, 5, 1), + PIN_FIELD_BASE(257, 257, 3, 0x00c0, 0x10, 5, 1), + PIN_FIELD_BASE(258, 258, 3, 0x00c0, 0x10, 6, 1), + PIN_FIELD_BASE(259, 259, 14, 0x0130, 0x10, 13, 1), + PIN_FIELD_BASE(260, 260, 14, 0x0130, 0x10, 14, 1), + PIN_FIELD_BASE(261, 261, 14, 0x0130, 0x10, 15, 1), + PIN_FIELD_BASE(262, 262, 14, 0x0130, 0x10, 16, 1), + PIN_FIELD_BASE(263, 263, 14, 0x0130, 0x10, 17, 1), + PIN_FIELD_BASE(264, 264, 14, 0x0130, 0x10, 18, 1), + PIN_FIELD_BASE(265, 265, 14, 0x0130, 0x10, 19, 1), + PIN_FIELD_BASE(266, 266, 14, 0x0130, 0x10, 20, 1), + PIN_FIELD_BASE(267, 267, 15, 0x00e0, 0x10, 8, 1), + PIN_FIELD_BASE(268, 268, 15, 0x00e0, 0x10, 9, 1), + PIN_FIELD_BASE(269, 269, 15, 0x00e0, 0x10, 6, 1), + PIN_FIELD_BASE(270, 270, 15, 0x00e0, 0x10, 7, 1), +}; + +static const struct mtk_pin_field_calc mt8196_pin_ies_range[] = { + PIN_FIELD_BASE(0, 0, 8, 0x0060, 0x10, 0, 1), + PIN_FIELD_BASE(1, 1, 8, 0x0060, 0x10, 1, 1), + PIN_FIELD_BASE(2, 2, 11, 0x0040, 0x10, 1, 1), + PIN_FIELD_BASE(3, 3, 11, 0x0040, 0x10, 2, 1), + PIN_FIELD_BASE(4, 4, 11, 0x0040, 0x10, 3, 1), + PIN_FIELD_BASE(5, 5, 11, 0x0040, 0x10, 4, 1), + PIN_FIELD_BASE(6, 6, 11, 0x0040, 0x10, 5, 1), + PIN_FIELD_BASE(7, 7, 11, 0x0040, 0x10, 6, 1), + PIN_FIELD_BASE(8, 8, 11, 0x0040, 0x10, 7, 1), + PIN_FIELD_BASE(9, 9, 9, 0x0070, 0x10, 14, 1), + PIN_FIELD_BASE(10, 10, 9, 0x0070, 0x10, 12, 1), + PIN_FIELD_BASE(11, 11, 8, 0x0060, 0x10, 2, 1), + PIN_FIELD_BASE(12, 12, 9, 0x0070, 0x10, 13, 1), + PIN_FIELD_BASE(13, 13, 6, 0x0060, 0x10, 1, 1), + PIN_FIELD_BASE(14, 14, 3, 0x0050, 0x10, 0, 1), + PIN_FIELD_BASE(15, 15, 6, 0x0060, 0x10, 2, 1), + PIN_FIELD_BASE(16, 16, 6, 0x0060, 0x10, 3, 1), + PIN_FIELD_BASE(17, 17, 6, 0x0060, 0x10, 4, 1), + PIN_FIELD_BASE(18, 18, 6, 0x0060, 0x10, 5, 1), + PIN_FIELD_BASE(19, 19, 6, 0x0060, 0x10, 6, 1), + PIN_FIELD_BASE(20, 20, 3, 0x0050, 0x10, 1, 1), + PIN_FIELD_BASE(21, 21, 2, 0x0050, 0x10, 3, 1), + PIN_FIELD_BASE(22, 22, 2, 0x0050, 0x10, 4, 1), + PIN_FIELD_BASE(23, 23, 2, 0x0050, 0x10, 5, 1), + PIN_FIELD_BASE(24, 24, 2, 0x0050, 0x10, 6, 1), + PIN_FIELD_BASE(25, 25, 2, 0x0050, 0x10, 7, 1), + PIN_FIELD_BASE(26, 26, 2, 0x0050, 0x10, 8, 1), + PIN_FIELD_BASE(27, 27, 2, 0x0050, 0x10, 9, 1), + PIN_FIELD_BASE(28, 28, 2, 0x0050, 0x10, 10, 1), + PIN_FIELD_BASE(29, 29, 2, 0x0050, 0x10, 11, 1), + PIN_FIELD_BASE(30, 30, 2, 0x0050, 0x10, 12, 1), + PIN_FIELD_BASE(31, 31, 2, 0x0050, 0x10, 13, 1), + PIN_FIELD_BASE(32, 32, 1, 0x0050, 0x10, 8, 1), + PIN_FIELD_BASE(33, 33, 1, 0x0050, 0x10, 9, 1), + PIN_FIELD_BASE(34, 34, 1, 0x0050, 0x10, 10, 1), + PIN_FIELD_BASE(35, 35, 1, 0x0050, 0x10, 11, 1), + PIN_FIELD_BASE(36, 36, 1, 0x0050, 0x10, 12, 1), + PIN_FIELD_BASE(37, 37, 1, 0x0050, 0x10, 13, 1), + PIN_FIELD_BASE(38, 38, 1, 0x0050, 0x10, 14, 1), + PIN_FIELD_BASE(39, 39, 8, 0x0060, 0x10, 6, 1), + PIN_FIELD_BASE(40, 40, 8, 0x0060, 0x10, 3, 1), + PIN_FIELD_BASE(41, 41, 8, 0x0060, 0x10, 5, 1), + PIN_FIELD_BASE(42, 42, 8, 0x0060, 0x10, 4, 1), + PIN_FIELD_BASE(43, 43, 8, 0x0060, 0x10, 7, 1), + PIN_FIELD_BASE(44, 44, 8, 0x0060, 0x10, 8, 1), + PIN_FIELD_BASE(45, 45, 8, 0x0060, 0x10, 9, 1), + PIN_FIELD_BASE(46, 46, 8, 0x0060, 0x10, 10, 1), + PIN_FIELD_BASE(47, 47, 8, 0x0060, 0x10, 13, 1), + PIN_FIELD_BASE(48, 48, 8, 0x0060, 0x10, 11, 1), + PIN_FIELD_BASE(49, 49, 8, 0x0060, 0x10, 14, 1), + PIN_FIELD_BASE(50, 50, 8, 0x0060, 0x10, 12, 1), + PIN_FIELD_BASE(51, 51, 8, 0x0060, 0x10, 15, 1), + PIN_FIELD_BASE(52, 52, 9, 0x0070, 0x10, 7, 1), + PIN_FIELD_BASE(53, 53, 9, 0x0070, 0x10, 8, 1), + PIN_FIELD_BASE(54, 54, 9, 0x0070, 0x10, 2, 1), + PIN_FIELD_BASE(55, 55, 9, 0x0070, 0x10, 1, 1), + PIN_FIELD_BASE(56, 56, 9, 0x0070, 0x10, 5, 1), + PIN_FIELD_BASE(57, 57, 9, 0x0070, 0x10, 6, 1), + PIN_FIELD_BASE(58, 58, 9, 0x0070, 0x10, 3, 1), + PIN_FIELD_BASE(59, 59, 9, 0x0070, 0x10, 4, 1), + PIN_FIELD_BASE(60, 60, 9, 0x0070, 0x10, 19, 1), + PIN_FIELD_BASE(61, 61, 9, 0x0070, 0x10, 10, 1), + PIN_FIELD_BASE(62, 62, 9, 0x0070, 0x10, 9, 1), + PIN_FIELD_BASE(63, 63, 9, 0x0070, 0x10, 18, 1), + PIN_FIELD_BASE(64, 64, 9, 0x0070, 0x10, 0, 1), + PIN_FIELD_BASE(65, 65, 9, 0x0070, 0x10, 11, 1), + PIN_FIELD_BASE(66, 66, 9, 0x0070, 0x10, 24, 1), + PIN_FIELD_BASE(67, 67, 9, 0x0070, 0x10, 22, 1), + PIN_FIELD_BASE(68, 68, 9, 0x0070, 0x10, 21, 1), + PIN_FIELD_BASE(69, 69, 9, 0x0070, 0x10, 25, 1), + PIN_FIELD_BASE(70, 70, 9, 0x0070, 0x10, 16, 1), + PIN_FIELD_BASE(71, 71, 9, 0x0070, 0x10, 15, 1), + PIN_FIELD_BASE(72, 72, 9, 0x0070, 0x10, 23, 1), + PIN_FIELD_BASE(73, 73, 9, 0x0070, 0x10, 20, 1), + PIN_FIELD_BASE(74, 74, 9, 0x0070, 0x10, 17, 1), + PIN_FIELD_BASE(75, 75, 10, 0x0050, 0x10, 2, 1), + PIN_FIELD_BASE(76, 76, 10, 0x0050, 0x10, 3, 1), + PIN_FIELD_BASE(77, 77, 10, 0x0050, 0x10, 4, 1), + PIN_FIELD_BASE(78, 78, 10, 0x0050, 0x10, 5, 1), + PIN_FIELD_BASE(79, 79, 10, 0x0050, 0x10, 0, 1), + PIN_FIELD_BASE(80, 80, 10, 0x0050, 0x10, 1, 1), + PIN_FIELD_BASE(81, 81, 11, 0x0040, 0x10, 9, 1), + PIN_FIELD_BASE(82, 82, 11, 0x0040, 0x10, 10, 1), + PIN_FIELD_BASE(83, 83, 11, 0x0040, 0x10, 12, 1), + PIN_FIELD_BASE(84, 84, 11, 0x0040, 0x10, 11, 1), + PIN_FIELD_BASE(85, 85, 11, 0x0040, 0x10, 13, 1), + PIN_FIELD_BASE(86, 86, 11, 0x0040, 0x10, 14, 1), + PIN_FIELD_BASE(87, 87, 11, 0x0040, 0x10, 16, 1), + PIN_FIELD_BASE(88, 88, 11, 0x0040, 0x10, 15, 1), + PIN_FIELD_BASE(89, 89, 11, 0x0040, 0x10, 0, 1), + PIN_FIELD_BASE(90, 90, 11, 0x0040, 0x10, 8, 1), + PIN_FIELD_BASE(91, 91, 12, 0x0050, 0x10, 6, 1), + PIN_FIELD_BASE(92, 92, 12, 0x0050, 0x10, 7, 1), + PIN_FIELD_BASE(93, 93, 12, 0x0050, 0x10, 8, 1), + PIN_FIELD_BASE(94, 94, 12, 0x0050, 0x10, 4, 1), + PIN_FIELD_BASE(95, 95, 12, 0x0050, 0x10, 1, 1), + PIN_FIELD_BASE(96, 96, 12, 0x0050, 0x10, 3, 1), + PIN_FIELD_BASE(97, 97, 12, 0x0050, 0x10, 2, 1), + PIN_FIELD_BASE(98, 98, 12, 0x0050, 0x10, 5, 1), + PIN_FIELD_BASE(99, 99, 12, 0x0050, 0x10, 9, 1), + PIN_FIELD_BASE(100, 100, 12, 0x0050, 0x10, 12, 1), + PIN_FIELD_BASE(101, 101, 12, 0x0050, 0x10, 10, 1), + PIN_FIELD_BASE(102, 102, 12, 0x0050, 0x10, 13, 1), + PIN_FIELD_BASE(103, 103, 12, 0x0050, 0x10, 0, 1), + PIN_FIELD_BASE(104, 104, 12, 0x0050, 0x10, 11, 1), + PIN_FIELD_BASE(105, 105, 12, 0x0050, 0x10, 14, 1), + PIN_FIELD_BASE(106, 106, 5, 0x0050, 0x10, 0, 1), + PIN_FIELD_BASE(107, 107, 5, 0x0050, 0x10, 1, 1), + PIN_FIELD_BASE(108, 108, 5, 0x0050, 0x10, 3, 1), + PIN_FIELD_BASE(109, 109, 5, 0x0050, 0x10, 2, 1), + PIN_FIELD_BASE(110, 110, 5, 0x0050, 0x10, 4, 1), + PIN_FIELD_BASE(111, 111, 5, 0x0050, 0x10, 5, 1), + PIN_FIELD_BASE(112, 112, 5, 0x0050, 0x10, 7, 1), + PIN_FIELD_BASE(113, 113, 5, 0x0050, 0x10, 6, 1), + PIN_FIELD_BASE(114, 114, 5, 0x0050, 0x10, 8, 1), + PIN_FIELD_BASE(115, 115, 5, 0x0050, 0x10, 9, 1), + PIN_FIELD_BASE(116, 116, 5, 0x0050, 0x10, 11, 1), + PIN_FIELD_BASE(117, 117, 5, 0x0050, 0x10, 10, 1), + PIN_FIELD_BASE(118, 118, 6, 0x0060, 0x10, 9, 1), + PIN_FIELD_BASE(119, 119, 6, 0x0060, 0x10, 10, 1), + PIN_FIELD_BASE(120, 120, 6, 0x0060, 0x10, 12, 1), + PIN_FIELD_BASE(121, 121, 6, 0x0060, 0x10, 11, 1), + PIN_FIELD_BASE(122, 122, 6, 0x0060, 0x10, 0, 1), + PIN_FIELD_BASE(123, 123, 6, 0x0060, 0x10, 7, 1), + PIN_FIELD_BASE(124, 124, 6, 0x0060, 0x10, 8, 1), + PIN_FIELD_BASE(125, 125, 7, 0x0050, 0x10, 0, 1), + PIN_FIELD_BASE(126, 126, 7, 0x0050, 0x10, 1, 1), + PIN_FIELD_BASE(127, 127, 7, 0x0050, 0x10, 2, 1), + PIN_FIELD_BASE(128, 128, 7, 0x0050, 0x10, 3, 1), + PIN_FIELD_BASE(129, 129, 7, 0x0050, 0x10, 4, 1), + PIN_FIELD_BASE(130, 130, 7, 0x0050, 0x10, 5, 1), + PIN_FIELD_BASE(131, 131, 7, 0x0050, 0x10, 9, 1), + PIN_FIELD_BASE(132, 132, 7, 0x0050, 0x10, 11, 1), + PIN_FIELD_BASE(133, 133, 7, 0x0050, 0x10, 10, 1), + PIN_FIELD_BASE(134, 134, 7, 0x0050, 0x10, 6, 1), + PIN_FIELD_BASE(135, 135, 7, 0x0050, 0x10, 8, 1), + PIN_FIELD_BASE(136, 136, 7, 0x0050, 0x10, 7, 1), + PIN_FIELD_BASE(137, 137, 4, 0x0040, 0x10, 10, 1), + PIN_FIELD_BASE(138, 138, 4, 0x0040, 0x10, 11, 1), + PIN_FIELD_BASE(139, 139, 4, 0x0040, 0x10, 12, 1), + PIN_FIELD_BASE(140, 140, 4, 0x0040, 0x10, 13, 1), + PIN_FIELD_BASE(141, 141, 4, 0x0040, 0x10, 14, 1), + PIN_FIELD_BASE(142, 142, 4, 0x0040, 0x10, 15, 1), + PIN_FIELD_BASE(143, 143, 4, 0x0040, 0x10, 16, 1), + PIN_FIELD_BASE(144, 144, 4, 0x0040, 0x10, 17, 1), + PIN_FIELD_BASE(145, 145, 4, 0x0040, 0x10, 0, 1), + PIN_FIELD_BASE(146, 146, 4, 0x0040, 0x10, 1, 1), + PIN_FIELD_BASE(147, 147, 4, 0x0040, 0x10, 2, 1), + PIN_FIELD_BASE(148, 148, 4, 0x0040, 0x10, 3, 1), + PIN_FIELD_BASE(149, 149, 4, 0x0040, 0x10, 4, 1), + PIN_FIELD_BASE(150, 150, 4, 0x0040, 0x10, 5, 1), + PIN_FIELD_BASE(151, 151, 4, 0x0040, 0x10, 6, 1), + PIN_FIELD_BASE(152, 152, 4, 0x0040, 0x10, 7, 1), + PIN_FIELD_BASE(153, 153, 4, 0x0040, 0x10, 9, 1), + PIN_FIELD_BASE(154, 154, 4, 0x0040, 0x10, 8, 1), + PIN_FIELD_BASE(155, 155, 4, 0x0040, 0x10, 18, 1), + PIN_FIELD_BASE(156, 156, 4, 0x0040, 0x10, 19, 1), + PIN_FIELD_BASE(157, 157, 2, 0x0050, 0x10, 1, 1), + PIN_FIELD_BASE(158, 158, 2, 0x0050, 0x10, 2, 1), + PIN_FIELD_BASE(159, 159, 2, 0x0050, 0x10, 0, 1), + PIN_FIELD_BASE(160, 160, 3, 0x0050, 0x10, 22, 1), + PIN_FIELD_BASE(161, 161, 3, 0x0050, 0x10, 20, 1), + PIN_FIELD_BASE(162, 162, 3, 0x0050, 0x10, 23, 1), + PIN_FIELD_BASE(163, 163, 3, 0x0050, 0x10, 21, 1), + PIN_FIELD_BASE(164, 164, 3, 0x0050, 0x10, 12, 1), + PIN_FIELD_BASE(165, 165, 3, 0x0050, 0x10, 14, 1), + PIN_FIELD_BASE(166, 166, 3, 0x0050, 0x10, 13, 1), + PIN_FIELD_BASE(167, 167, 3, 0x0050, 0x10, 15, 1), + PIN_FIELD_BASE(168, 168, 3, 0x0050, 0x10, 16, 1), + PIN_FIELD_BASE(169, 169, 3, 0x0050, 0x10, 17, 1), + PIN_FIELD_BASE(170, 170, 3, 0x0050, 0x10, 19, 1), + PIN_FIELD_BASE(171, 171, 3, 0x0050, 0x10, 18, 1), + PIN_FIELD_BASE(172, 172, 3, 0x0050, 0x10, 10, 1), + PIN_FIELD_BASE(173, 173, 3, 0x0050, 0x10, 11, 1), + PIN_FIELD_BASE(174, 174, 1, 0x0050, 0x10, 15, 1), + PIN_FIELD_BASE(175, 175, 1, 0x0050, 0x10, 16, 1), + PIN_FIELD_BASE(176, 176, 1, 0x0050, 0x10, 17, 1), + PIN_FIELD_BASE(177, 177, 1, 0x0050, 0x10, 18, 1), + PIN_FIELD_BASE(178, 178, 1, 0x0050, 0x10, 6, 1), + PIN_FIELD_BASE(179, 179, 1, 0x0050, 0x10, 7, 1), + PIN_FIELD_BASE(180, 180, 1, 0x0050, 0x10, 0, 1), + PIN_FIELD_BASE(181, 181, 1, 0x0050, 0x10, 1, 1), + PIN_FIELD_BASE(182, 182, 1, 0x0050, 0x10, 2, 1), + PIN_FIELD_BASE(183, 183, 1, 0x0050, 0x10, 3, 1), + PIN_FIELD_BASE(184, 184, 1, 0x0050, 0x10, 4, 1), + PIN_FIELD_BASE(185, 185, 1, 0x0050, 0x10, 5, 1), + PIN_FIELD_BASE(186, 186, 13, 0x0090, 0x10, 4, 1), + PIN_FIELD_BASE(187, 187, 13, 0x0090, 0x10, 5, 1), + PIN_FIELD_BASE(188, 188, 13, 0x0090, 0x10, 12, 1), + PIN_FIELD_BASE(189, 189, 13, 0x0090, 0x10, 17, 1), + PIN_FIELD_BASE(190, 190, 13, 0x0090, 0x10, 13, 1), + PIN_FIELD_BASE(191, 191, 13, 0x0090, 0x10, 18, 1), + PIN_FIELD_BASE(192, 192, 13, 0x0090, 0x10, 0, 1), + PIN_FIELD_BASE(193, 193, 13, 0x0090, 0x10, 6, 1), + PIN_FIELD_BASE(194, 194, 13, 0x0090, 0x10, 14, 1), + PIN_FIELD_BASE(195, 195, 13, 0x0090, 0x10, 19, 1), + PIN_FIELD_BASE(196, 196, 13, 0x0090, 0x10, 1, 1), + PIN_FIELD_BASE(197, 197, 13, 0x0090, 0x10, 7, 1), + PIN_FIELD_BASE(198, 198, 13, 0x0090, 0x10, 15, 1), + PIN_FIELD_BASE(199, 199, 13, 0x0090, 0x10, 20, 1), + PIN_FIELD_BASE(200, 200, 13, 0x0090, 0x10, 22, 1), + PIN_FIELD_BASE(201, 201, 13, 0x0090, 0x10, 25, 1), + PIN_FIELD_BASE(202, 202, 13, 0x0090, 0x10, 16, 1), + PIN_FIELD_BASE(203, 203, 13, 0x0090, 0x10, 21, 1), + PIN_FIELD_BASE(204, 204, 13, 0x0090, 0x10, 2, 1), + PIN_FIELD_BASE(205, 205, 13, 0x0090, 0x10, 3, 1), + PIN_FIELD_BASE(206, 206, 13, 0x0090, 0x10, 8, 1), + PIN_FIELD_BASE(207, 207, 13, 0x0090, 0x10, 9, 1), + PIN_FIELD_BASE(208, 208, 13, 0x0090, 0x10, 10, 1), + PIN_FIELD_BASE(209, 209, 13, 0x0090, 0x10, 11, 1), + PIN_FIELD_BASE(210, 210, 14, 0x0060, 0x10, 0, 1), + PIN_FIELD_BASE(211, 211, 14, 0x0060, 0x10, 1, 1), + PIN_FIELD_BASE(212, 212, 14, 0x0060, 0x10, 2, 1), + PIN_FIELD_BASE(213, 213, 14, 0x0060, 0x10, 3, 1), + PIN_FIELD_BASE(214, 214, 13, 0x0090, 0x10, 23, 1), + PIN_FIELD_BASE(215, 215, 13, 0x0090, 0x10, 24, 1), + PIN_FIELD_BASE(216, 216, 14, 0x0060, 0x10, 13, 1), + PIN_FIELD_BASE(217, 217, 14, 0x0060, 0x10, 5, 1), + PIN_FIELD_BASE(218, 218, 14, 0x0060, 0x10, 6, 1), + PIN_FIELD_BASE(219, 219, 14, 0x0060, 0x10, 4, 1), + PIN_FIELD_BASE(220, 220, 14, 0x0060, 0x10, 22, 1), + PIN_FIELD_BASE(221, 221, 14, 0x0060, 0x10, 23, 1), + PIN_FIELD_BASE(222, 222, 14, 0x0060, 0x10, 25, 1), + PIN_FIELD_BASE(223, 223, 14, 0x0060, 0x10, 24, 1), + PIN_FIELD_BASE(224, 224, 14, 0x0060, 0x10, 7, 1), + PIN_FIELD_BASE(225, 225, 14, 0x0060, 0x10, 8, 1), + PIN_FIELD_BASE(226, 226, 14, 0x0060, 0x10, 9, 1), + PIN_FIELD_BASE(227, 227, 14, 0x0060, 0x10, 10, 1), + PIN_FIELD_BASE(228, 228, 14, 0x0060, 0x10, 11, 1), + PIN_FIELD_BASE(229, 229, 14, 0x0060, 0x10, 12, 1), + PIN_FIELD_BASE(230, 230, 15, 0x0040, 0x10, 13, 1), + PIN_FIELD_BASE(231, 231, 15, 0x0040, 0x10, 14, 1), + PIN_FIELD_BASE(232, 232, 15, 0x0040, 0x10, 10, 1), + PIN_FIELD_BASE(233, 233, 15, 0x0040, 0x10, 0, 1), + PIN_FIELD_BASE(234, 234, 15, 0x0040, 0x10, 3, 1), + PIN_FIELD_BASE(235, 235, 15, 0x0040, 0x10, 1, 1), + PIN_FIELD_BASE(236, 236, 15, 0x0040, 0x10, 2, 1), + PIN_FIELD_BASE(237, 237, 15, 0x0040, 0x10, 6, 1), + PIN_FIELD_BASE(238, 238, 15, 0x0040, 0x10, 5, 1), + PIN_FIELD_BASE(239, 239, 15, 0x0040, 0x10, 23, 1), + PIN_FIELD_BASE(240, 240, 15, 0x0040, 0x10, 22, 1), + PIN_FIELD_BASE(241, 241, 15, 0x0040, 0x10, 16, 1), + PIN_FIELD_BASE(242, 242, 15, 0x0040, 0x10, 17, 1), + PIN_FIELD_BASE(243, 243, 15, 0x0040, 0x10, 15, 1), + PIN_FIELD_BASE(244, 244, 15, 0x0040, 0x10, 12, 1), + PIN_FIELD_BASE(245, 245, 15, 0x0040, 0x10, 9, 1), + PIN_FIELD_BASE(246, 246, 15, 0x0040, 0x10, 8, 1), + PIN_FIELD_BASE(247, 247, 15, 0x0040, 0x10, 7, 1), + PIN_FIELD_BASE(248, 248, 15, 0x0040, 0x10, 4, 1), + PIN_FIELD_BASE(249, 249, 15, 0x0040, 0x10, 24, 1), + PIN_FIELD_BASE(250, 250, 15, 0x0040, 0x10, 11, 1), + PIN_FIELD_BASE(251, 251, 3, 0x0050, 0x10, 2, 1), + PIN_FIELD_BASE(252, 252, 3, 0x0050, 0x10, 3, 1), + PIN_FIELD_BASE(253, 253, 3, 0x0050, 0x10, 4, 1), + PIN_FIELD_BASE(254, 254, 3, 0x0050, 0x10, 5, 1), + PIN_FIELD_BASE(255, 255, 3, 0x0050, 0x10, 6, 1), + PIN_FIELD_BASE(256, 256, 3, 0x0050, 0x10, 7, 1), + PIN_FIELD_BASE(257, 257, 3, 0x0050, 0x10, 8, 1), + PIN_FIELD_BASE(258, 258, 3, 0x0050, 0x10, 9, 1), + PIN_FIELD_BASE(259, 259, 14, 0x0060, 0x10, 14, 1), + PIN_FIELD_BASE(260, 260, 14, 0x0060, 0x10, 15, 1), + PIN_FIELD_BASE(261, 261, 14, 0x0060, 0x10, 16, 1), + PIN_FIELD_BASE(262, 262, 14, 0x0060, 0x10, 17, 1), + PIN_FIELD_BASE(263, 263, 14, 0x0060, 0x10, 18, 1), + PIN_FIELD_BASE(264, 264, 14, 0x0060, 0x10, 19, 1), + PIN_FIELD_BASE(265, 265, 14, 0x0060, 0x10, 20, 1), + PIN_FIELD_BASE(266, 266, 14, 0x0060, 0x10, 21, 1), + PIN_FIELD_BASE(267, 267, 15, 0x0040, 0x10, 20, 1), + PIN_FIELD_BASE(268, 268, 15, 0x0040, 0x10, 21, 1), + PIN_FIELD_BASE(269, 269, 15, 0x0040, 0x10, 18, 1), + PIN_FIELD_BASE(270, 270, 15, 0x0040, 0x10, 19, 1), +}; + +static const struct mtk_pin_field_calc mt8196_pin_pupd_range[] = { + PIN_FIELD_BASE(60, 60, 9, 0x00b0, 0x10, 0, 1), + PIN_FIELD_BASE(125, 125, 7, 0x0080, 0x10, 0, 1), + PIN_FIELD_BASE(126, 126, 7, 0x0080, 0x10, 1, 1), + PIN_FIELD_BASE(127, 127, 7, 0x0080, 0x10, 2, 1), + PIN_FIELD_BASE(128, 128, 7, 0x0080, 0x10, 3, 1), + PIN_FIELD_BASE(129, 129, 7, 0x0080, 0x10, 4, 1), + PIN_FIELD_BASE(130, 130, 7, 0x0080, 0x10, 5, 1), + PIN_FIELD_BASE(131, 131, 7, 0x0080, 0x10, 9, 1), + PIN_FIELD_BASE(132, 132, 7, 0x0080, 0x10, 11, 1), + PIN_FIELD_BASE(133, 133, 7, 0x0080, 0x10, 10, 1), + PIN_FIELD_BASE(134, 134, 7, 0x0080, 0x10, 6, 1), + PIN_FIELD_BASE(135, 135, 7, 0x0080, 0x10, 8, 1), + PIN_FIELD_BASE(136, 136, 7, 0x0080, 0x10, 7, 1), + PIN_FIELD_BASE(137, 137, 4, 0x0070, 0x10, 10, 1), + PIN_FIELD_BASE(138, 138, 4, 0x0070, 0x10, 11, 1), + PIN_FIELD_BASE(139, 139, 4, 0x0070, 0x10, 12, 1), + PIN_FIELD_BASE(140, 140, 4, 0x0070, 0x10, 13, 1), + PIN_FIELD_BASE(141, 141, 4, 0x0070, 0x10, 14, 1), + PIN_FIELD_BASE(142, 142, 4, 0x0070, 0x10, 15, 1), + PIN_FIELD_BASE(143, 143, 4, 0x0070, 0x10, 16, 1), + PIN_FIELD_BASE(144, 144, 4, 0x0070, 0x10, 17, 1), + PIN_FIELD_BASE(145, 145, 4, 0x0070, 0x10, 0, 1), + PIN_FIELD_BASE(146, 146, 4, 0x0070, 0x10, 1, 1), + PIN_FIELD_BASE(147, 147, 4, 0x0070, 0x10, 2, 1), + PIN_FIELD_BASE(148, 148, 4, 0x0070, 0x10, 3, 1), + PIN_FIELD_BASE(149, 149, 4, 0x0070, 0x10, 4, 1), + PIN_FIELD_BASE(150, 150, 4, 0x0070, 0x10, 5, 1), + PIN_FIELD_BASE(151, 151, 4, 0x0070, 0x10, 6, 1), + PIN_FIELD_BASE(152, 152, 4, 0x0070, 0x10, 7, 1), + PIN_FIELD_BASE(153, 153, 4, 0x0070, 0x10, 9, 1), + PIN_FIELD_BASE(154, 154, 4, 0x0070, 0x10, 8, 1), + PIN_FIELD_BASE(155, 155, 4, 0x0070, 0x10, 18, 1), + PIN_FIELD_BASE(156, 156, 4, 0x0070, 0x10, 19, 1), + PIN_FIELD_BASE(217, 217, 14, 0x00a0, 0x10, 1, 1), + PIN_FIELD_BASE(218, 218, 14, 0x00a0, 0x10, 2, 1), + PIN_FIELD_BASE(219, 219, 14, 0x00a0, 0x10, 0, 1), + PIN_FIELD_BASE(224, 224, 14, 0x00a0, 0x10, 3, 1), + PIN_FIELD_BASE(225, 225, 14, 0x00a0, 0x10, 4, 1), + PIN_FIELD_BASE(226, 226, 14, 0x00a0, 0x10, 5, 1), + PIN_FIELD_BASE(227, 227, 14, 0x00a0, 0x10, 6, 1), + PIN_FIELD_BASE(228, 228, 14, 0x00a0, 0x10, 7, 1), + PIN_FIELD_BASE(229, 229, 14, 0x00a0, 0x10, 8, 1), + PIN_FIELD_BASE(259, 259, 14, 0x00a0, 0x10, 9, 1), + PIN_FIELD_BASE(260, 260, 14, 0x00a0, 0x10, 10, 1), + PIN_FIELD_BASE(261, 261, 14, 0x00a0, 0x10, 11, 1), + PIN_FIELD_BASE(262, 262, 14, 0x00a0, 0x10, 12, 1), + PIN_FIELD_BASE(263, 263, 14, 0x00a0, 0x10, 13, 1), + PIN_FIELD_BASE(264, 264, 14, 0x00a0, 0x10, 14, 1), + PIN_FIELD_BASE(265, 265, 14, 0x00a0, 0x10, 15, 1), + PIN_FIELD_BASE(266, 266, 14, 0x00a0, 0x10, 16, 1), + PIN_FIELD_BASE(267, 267, 15, 0x0080, 0x10, 2, 1), + PIN_FIELD_BASE(268, 268, 15, 0x0080, 0x10, 3, 1), + PIN_FIELD_BASE(269, 269, 15, 0x0080, 0x10, 0, 1), + PIN_FIELD_BASE(270, 270, 15, 0x0080, 0x10, 1, 1), +}; + +static const struct mtk_pin_field_calc mt8196_pin_r0_range[] = { + PIN_FIELD_BASE(60, 60, 9, 0x00d0, 0x10, 0, 1), + PIN_FIELD_BASE(125, 125, 7, 0x0090, 0x10, 0, 1), + PIN_FIELD_BASE(126, 126, 7, 0x0090, 0x10, 1, 1), + PIN_FIELD_BASE(127, 127, 7, 0x0090, 0x10, 2, 1), + PIN_FIELD_BASE(128, 128, 7, 0x0090, 0x10, 3, 1), + PIN_FIELD_BASE(129, 129, 7, 0x0090, 0x10, 4, 1), + PIN_FIELD_BASE(130, 130, 7, 0x0090, 0x10, 5, 1), + PIN_FIELD_BASE(131, 131, 7, 0x0090, 0x10, 9, 1), + PIN_FIELD_BASE(132, 132, 7, 0x0090, 0x10, 11, 1), + PIN_FIELD_BASE(133, 133, 7, 0x0090, 0x10, 10, 1), + PIN_FIELD_BASE(134, 134, 7, 0x0090, 0x10, 6, 1), + PIN_FIELD_BASE(135, 135, 7, 0x0090, 0x10, 8, 1), + PIN_FIELD_BASE(136, 136, 7, 0x0090, 0x10, 7, 1), + PIN_FIELD_BASE(137, 137, 4, 0x0080, 0x10, 10, 1), + PIN_FIELD_BASE(138, 138, 4, 0x0080, 0x10, 11, 1), + PIN_FIELD_BASE(139, 139, 4, 0x0080, 0x10, 12, 1), + PIN_FIELD_BASE(140, 140, 4, 0x0080, 0x10, 13, 1), + PIN_FIELD_BASE(141, 141, 4, 0x0080, 0x10, 14, 1), + PIN_FIELD_BASE(142, 142, 4, 0x0080, 0x10, 15, 1), + PIN_FIELD_BASE(143, 143, 4, 0x0080, 0x10, 16, 1), + PIN_FIELD_BASE(144, 144, 4, 0x0080, 0x10, 17, 1), + PIN_FIELD_BASE(145, 145, 4, 0x0080, 0x10, 0, 1), + PIN_FIELD_BASE(146, 146, 4, 0x0080, 0x10, 1, 1), + PIN_FIELD_BASE(147, 147, 4, 0x0080, 0x10, 2, 1), + PIN_FIELD_BASE(148, 148, 4, 0x0080, 0x10, 3, 1), + PIN_FIELD_BASE(149, 149, 4, 0x0080, 0x10, 4, 1), + PIN_FIELD_BASE(150, 150, 4, 0x0080, 0x10, 5, 1), + PIN_FIELD_BASE(151, 151, 4, 0x0080, 0x10, 6, 1), + PIN_FIELD_BASE(152, 152, 4, 0x0080, 0x10, 7, 1), + PIN_FIELD_BASE(153, 153, 4, 0x0080, 0x10, 9, 1), + PIN_FIELD_BASE(154, 154, 4, 0x0080, 0x10, 8, 1), + PIN_FIELD_BASE(155, 155, 4, 0x0080, 0x10, 18, 1), + PIN_FIELD_BASE(156, 156, 4, 0x0080, 0x10, 19, 1), + PIN_FIELD_BASE(217, 217, 14, 0x00c0, 0x10, 1, 1), + PIN_FIELD_BASE(218, 218, 14, 0x00c0, 0x10, 2, 1), + PIN_FIELD_BASE(219, 219, 14, 0x00c0, 0x10, 0, 1), + PIN_FIELD_BASE(224, 224, 14, 0x00c0, 0x10, 3, 1), + PIN_FIELD_BASE(225, 225, 14, 0x00c0, 0x10, 4, 1), + PIN_FIELD_BASE(226, 226, 14, 0x00c0, 0x10, 5, 1), + PIN_FIELD_BASE(227, 227, 14, 0x00c0, 0x10, 6, 1), + PIN_FIELD_BASE(228, 228, 14, 0x00c0, 0x10, 7, 1), + PIN_FIELD_BASE(229, 229, 14, 0x00c0, 0x10, 8, 1), + PIN_FIELD_BASE(259, 259, 14, 0x00c0, 0x10, 9, 1), + PIN_FIELD_BASE(260, 260, 14, 0x00c0, 0x10, 10, 1), + PIN_FIELD_BASE(261, 261, 14, 0x00c0, 0x10, 11, 1), + PIN_FIELD_BASE(262, 262, 14, 0x00c0, 0x10, 12, 1), + PIN_FIELD_BASE(263, 263, 14, 0x00c0, 0x10, 13, 1), + PIN_FIELD_BASE(264, 264, 14, 0x00c0, 0x10, 14, 1), + PIN_FIELD_BASE(265, 265, 14, 0x00c0, 0x10, 15, 1), + PIN_FIELD_BASE(266, 266, 14, 0x00c0, 0x10, 16, 1), + PIN_FIELD_BASE(267, 267, 15, 0x00a0, 0x10, 2, 1), + PIN_FIELD_BASE(268, 268, 15, 0x00a0, 0x10, 3, 1), + PIN_FIELD_BASE(269, 269, 15, 0x00a0, 0x10, 0, 1), + PIN_FIELD_BASE(270, 270, 15, 0x00a0, 0x10, 1, 1), +}; + +static const struct mtk_pin_field_calc mt8196_pin_r1_range[] = { + PIN_FIELD_BASE(60, 60, 9, 0x00e0, 0x10, 0, 1), + PIN_FIELD_BASE(125, 125, 7, 0x00a0, 0x10, 0, 1), + PIN_FIELD_BASE(126, 126, 7, 0x00a0, 0x10, 1, 1), + PIN_FIELD_BASE(127, 127, 7, 0x00a0, 0x10, 2, 1), + PIN_FIELD_BASE(128, 128, 7, 0x00a0, 0x10, 3, 1), + PIN_FIELD_BASE(129, 129, 7, 0x00a0, 0x10, 4, 1), + PIN_FIELD_BASE(130, 130, 7, 0x00a0, 0x10, 5, 1), + PIN_FIELD_BASE(131, 131, 7, 0x00a0, 0x10, 9, 1), + PIN_FIELD_BASE(132, 132, 7, 0x00a0, 0x10, 11, 1), + PIN_FIELD_BASE(133, 133, 7, 0x00a0, 0x10, 10, 1), + PIN_FIELD_BASE(134, 134, 7, 0x00a0, 0x10, 6, 1), + PIN_FIELD_BASE(135, 135, 7, 0x00a0, 0x10, 8, 1), + PIN_FIELD_BASE(136, 136, 7, 0x00a0, 0x10, 7, 1), + PIN_FIELD_BASE(137, 137, 4, 0x0090, 0x10, 10, 1), + PIN_FIELD_BASE(138, 138, 4, 0x0090, 0x10, 11, 1), + PIN_FIELD_BASE(139, 139, 4, 0x0090, 0x10, 12, 1), + PIN_FIELD_BASE(140, 140, 4, 0x0090, 0x10, 13, 1), + PIN_FIELD_BASE(141, 141, 4, 0x0090, 0x10, 14, 1), + PIN_FIELD_BASE(142, 142, 4, 0x0090, 0x10, 15, 1), + PIN_FIELD_BASE(143, 143, 4, 0x0090, 0x10, 16, 1), + PIN_FIELD_BASE(144, 144, 4, 0x0090, 0x10, 17, 1), + PIN_FIELD_BASE(145, 145, 4, 0x0090, 0x10, 0, 1), + PIN_FIELD_BASE(146, 146, 4, 0x0090, 0x10, 1, 1), + PIN_FIELD_BASE(147, 147, 4, 0x0090, 0x10, 2, 1), + PIN_FIELD_BASE(148, 148, 4, 0x0090, 0x10, 3, 1), + PIN_FIELD_BASE(149, 149, 4, 0x0090, 0x10, 4, 1), + PIN_FIELD_BASE(150, 150, 4, 0x0090, 0x10, 5, 1), + PIN_FIELD_BASE(151, 151, 4, 0x0090, 0x10, 6, 1), + PIN_FIELD_BASE(152, 152, 4, 0x0090, 0x10, 7, 1), + PIN_FIELD_BASE(153, 153, 4, 0x0090, 0x10, 9, 1), + PIN_FIELD_BASE(154, 154, 4, 0x0090, 0x10, 8, 1), + PIN_FIELD_BASE(155, 155, 4, 0x0090, 0x10, 18, 1), + PIN_FIELD_BASE(156, 156, 4, 0x0090, 0x10, 19, 1), + PIN_FIELD_BASE(217, 217, 14, 0x00d0, 0x10, 1, 1), + PIN_FIELD_BASE(218, 218, 14, 0x00d0, 0x10, 2, 1), + PIN_FIELD_BASE(219, 219, 14, 0x00d0, 0x10, 0, 1), + PIN_FIELD_BASE(224, 224, 14, 0x00d0, 0x10, 3, 1), + PIN_FIELD_BASE(225, 225, 14, 0x00d0, 0x10, 4, 1), + PIN_FIELD_BASE(226, 226, 14, 0x00d0, 0x10, 5, 1), + PIN_FIELD_BASE(227, 227, 14, 0x00d0, 0x10, 6, 1), + PIN_FIELD_BASE(228, 228, 14, 0x00d0, 0x10, 7, 1), + PIN_FIELD_BASE(229, 229, 14, 0x00d0, 0x10, 8, 1), + PIN_FIELD_BASE(259, 259, 14, 0x00d0, 0x10, 9, 1), + PIN_FIELD_BASE(260, 260, 14, 0x00d0, 0x10, 10, 1), + PIN_FIELD_BASE(261, 261, 14, 0x00d0, 0x10, 11, 1), + PIN_FIELD_BASE(262, 262, 14, 0x00d0, 0x10, 12, 1), + PIN_FIELD_BASE(263, 263, 14, 0x00d0, 0x10, 13, 1), + PIN_FIELD_BASE(264, 264, 14, 0x00d0, 0x10, 14, 1), + PIN_FIELD_BASE(265, 265, 14, 0x00d0, 0x10, 15, 1), + PIN_FIELD_BASE(266, 266, 14, 0x00d0, 0x10, 16, 1), + PIN_FIELD_BASE(267, 267, 15, 0x00b0, 0x10, 2, 1), + PIN_FIELD_BASE(268, 268, 15, 0x00b0, 0x10, 3, 1), + PIN_FIELD_BASE(269, 269, 15, 0x00b0, 0x10, 0, 1), + PIN_FIELD_BASE(270, 270, 15, 0x00b0, 0x10, 1, 1), +}; + +static const struct mtk_pin_field_calc mt8196_pin_pu_range[] = { + PIN_FIELD_BASE(0, 0, 8, 0x00a0, 0x10, 0, 1), + PIN_FIELD_BASE(1, 1, 8, 0x00a0, 0x10, 1, 1), + PIN_FIELD_BASE(2, 2, 11, 0x0080, 0x10, 1, 1), + PIN_FIELD_BASE(3, 3, 11, 0x0080, 0x10, 2, 1), + PIN_FIELD_BASE(4, 4, 11, 0x0080, 0x10, 3, 1), + PIN_FIELD_BASE(5, 5, 11, 0x0080, 0x10, 4, 1), + PIN_FIELD_BASE(6, 6, 11, 0x0080, 0x10, 5, 1), + PIN_FIELD_BASE(7, 7, 11, 0x0080, 0x10, 6, 1), + PIN_FIELD_BASE(8, 8, 11, 0x0080, 0x10, 7, 1), + PIN_FIELD_BASE(9, 9, 9, 0x00c0, 0x10, 14, 1), + PIN_FIELD_BASE(10, 10, 9, 0x00c0, 0x10, 12, 1), + PIN_FIELD_BASE(11, 11, 8, 0x00a0, 0x10, 2, 1), + PIN_FIELD_BASE(12, 12, 9, 0x00c0, 0x10, 13, 1), + PIN_FIELD_BASE(13, 13, 6, 0x00b0, 0x10, 1, 1), + PIN_FIELD_BASE(14, 14, 3, 0x0090, 0x10, 0, 1), + PIN_FIELD_BASE(15, 15, 6, 0x00b0, 0x10, 2, 1), + PIN_FIELD_BASE(16, 16, 6, 0x00b0, 0x10, 3, 1), + PIN_FIELD_BASE(17, 17, 6, 0x00b0, 0x10, 4, 1), + PIN_FIELD_BASE(18, 18, 6, 0x00b0, 0x10, 5, 1), + PIN_FIELD_BASE(19, 19, 6, 0x00b0, 0x10, 6, 1), + PIN_FIELD_BASE(20, 20, 3, 0x0090, 0x10, 1, 1), + PIN_FIELD_BASE(21, 21, 2, 0x0090, 0x10, 3, 1), + PIN_FIELD_BASE(22, 22, 2, 0x0090, 0x10, 4, 1), + PIN_FIELD_BASE(23, 23, 2, 0x0090, 0x10, 5, 1), + PIN_FIELD_BASE(24, 24, 2, 0x0090, 0x10, 6, 1), + PIN_FIELD_BASE(25, 25, 2, 0x0090, 0x10, 7, 1), + PIN_FIELD_BASE(26, 26, 2, 0x0090, 0x10, 8, 1), + PIN_FIELD_BASE(27, 27, 2, 0x0090, 0x10, 9, 1), + PIN_FIELD_BASE(28, 28, 2, 0x0090, 0x10, 10, 1), + PIN_FIELD_BASE(29, 29, 2, 0x0090, 0x10, 11, 1), + PIN_FIELD_BASE(30, 30, 2, 0x0090, 0x10, 12, 1), + PIN_FIELD_BASE(31, 31, 2, 0x0090, 0x10, 13, 1), + PIN_FIELD_BASE(32, 32, 1, 0x0090, 0x10, 8, 1), + PIN_FIELD_BASE(33, 33, 1, 0x0090, 0x10, 9, 1), + PIN_FIELD_BASE(34, 34, 1, 0x0090, 0x10, 10, 1), + PIN_FIELD_BASE(35, 35, 1, 0x0090, 0x10, 11, 1), + PIN_FIELD_BASE(36, 36, 1, 0x0090, 0x10, 12, 1), + PIN_FIELD_BASE(37, 37, 1, 0x0090, 0x10, 13, 1), + PIN_FIELD_BASE(38, 38, 1, 0x0090, 0x10, 14, 1), + PIN_FIELD_BASE(39, 39, 8, 0x00a0, 0x10, 6, 1), + PIN_FIELD_BASE(40, 40, 8, 0x00a0, 0x10, 3, 1), + PIN_FIELD_BASE(41, 41, 8, 0x00a0, 0x10, 5, 1), + PIN_FIELD_BASE(42, 42, 8, 0x00a0, 0x10, 4, 1), + PIN_FIELD_BASE(43, 43, 8, 0x00a0, 0x10, 7, 1), + PIN_FIELD_BASE(44, 44, 8, 0x00a0, 0x10, 8, 1), + PIN_FIELD_BASE(45, 45, 8, 0x00a0, 0x10, 9, 1), + PIN_FIELD_BASE(46, 46, 8, 0x00a0, 0x10, 10, 1), + PIN_FIELD_BASE(47, 47, 8, 0x00a0, 0x10, 13, 1), + PIN_FIELD_BASE(48, 48, 8, 0x00a0, 0x10, 11, 1), + PIN_FIELD_BASE(49, 49, 8, 0x00a0, 0x10, 14, 1), + PIN_FIELD_BASE(50, 50, 8, 0x00a0, 0x10, 12, 1), + PIN_FIELD_BASE(51, 51, 8, 0x00a0, 0x10, 15, 1), + PIN_FIELD_BASE(52, 52, 9, 0x00c0, 0x10, 7, 1), + PIN_FIELD_BASE(53, 53, 9, 0x00c0, 0x10, 8, 1), + PIN_FIELD_BASE(54, 54, 9, 0x00c0, 0x10, 2, 1), + PIN_FIELD_BASE(55, 55, 9, 0x00c0, 0x10, 1, 1), + PIN_FIELD_BASE(56, 56, 9, 0x00c0, 0x10, 5, 1), + PIN_FIELD_BASE(57, 57, 9, 0x00c0, 0x10, 6, 1), + PIN_FIELD_BASE(58, 58, 9, 0x00c0, 0x10, 3, 1), + PIN_FIELD_BASE(59, 59, 9, 0x00c0, 0x10, 4, 1), + PIN_FIELD_BASE(61, 61, 9, 0x00c0, 0x10, 10, 1), + PIN_FIELD_BASE(62, 62, 9, 0x00c0, 0x10, 9, 1), + PIN_FIELD_BASE(63, 63, 9, 0x00c0, 0x10, 18, 1), + PIN_FIELD_BASE(64, 64, 9, 0x00c0, 0x10, 0, 1), + PIN_FIELD_BASE(65, 65, 9, 0x00c0, 0x10, 11, 1), + PIN_FIELD_BASE(66, 66, 9, 0x00c0, 0x10, 24, 1), + PIN_FIELD_BASE(67, 67, 9, 0x00c0, 0x10, 21, 1), + PIN_FIELD_BASE(68, 68, 9, 0x00c0, 0x10, 20, 1), + PIN_FIELD_BASE(69, 69, 9, 0x00c0, 0x10, 25, 1), + PIN_FIELD_BASE(70, 70, 9, 0x00c0, 0x10, 16, 1), + PIN_FIELD_BASE(71, 71, 9, 0x00c0, 0x10, 15, 1), + PIN_FIELD_BASE(72, 72, 9, 0x00c0, 0x10, 23, 1), + PIN_FIELD_BASE(73, 73, 9, 0x00c0, 0x10, 19, 1), + PIN_FIELD_BASE(74, 74, 9, 0x00c0, 0x10, 17, 1), + PIN_FIELD_BASE(75, 75, 10, 0x0090, 0x10, 2, 1), + PIN_FIELD_BASE(76, 76, 10, 0x0090, 0x10, 3, 1), + PIN_FIELD_BASE(77, 77, 10, 0x0090, 0x10, 4, 1), + PIN_FIELD_BASE(78, 78, 10, 0x0090, 0x10, 5, 1), + PIN_FIELD_BASE(79, 79, 10, 0x0090, 0x10, 0, 1), + PIN_FIELD_BASE(80, 80, 10, 0x0090, 0x10, 1, 1), + PIN_FIELD_BASE(81, 81, 11, 0x0080, 0x10, 9, 1), + PIN_FIELD_BASE(82, 82, 11, 0x0080, 0x10, 10, 1), + PIN_FIELD_BASE(83, 83, 11, 0x0080, 0x10, 12, 1), + PIN_FIELD_BASE(84, 84, 11, 0x0080, 0x10, 11, 1), + PIN_FIELD_BASE(85, 85, 11, 0x0080, 0x10, 13, 1), + PIN_FIELD_BASE(86, 86, 11, 0x0080, 0x10, 14, 1), + PIN_FIELD_BASE(87, 87, 11, 0x0080, 0x10, 16, 1), + PIN_FIELD_BASE(88, 88, 11, 0x0080, 0x10, 15, 1), + PIN_FIELD_BASE(89, 89, 11, 0x0080, 0x10, 0, 1), + PIN_FIELD_BASE(90, 90, 11, 0x0080, 0x10, 8, 1), + PIN_FIELD_BASE(91, 91, 12, 0x0090, 0x10, 6, 1), + PIN_FIELD_BASE(92, 92, 12, 0x0090, 0x10, 7, 1), + PIN_FIELD_BASE(93, 93, 12, 0x0090, 0x10, 8, 1), + PIN_FIELD_BASE(94, 94, 12, 0x0090, 0x10, 4, 1), + PIN_FIELD_BASE(95, 95, 12, 0x0090, 0x10, 1, 1), + PIN_FIELD_BASE(96, 96, 12, 0x0090, 0x10, 3, 1), + PIN_FIELD_BASE(97, 97, 12, 0x0090, 0x10, 2, 1), + PIN_FIELD_BASE(98, 98, 12, 0x0090, 0x10, 5, 1), + PIN_FIELD_BASE(99, 99, 12, 0x0090, 0x10, 9, 1), + PIN_FIELD_BASE(100, 100, 12, 0x0090, 0x10, 12, 1), + PIN_FIELD_BASE(101, 101, 12, 0x0090, 0x10, 10, 1), + PIN_FIELD_BASE(102, 102, 12, 0x0090, 0x10, 13, 1), + PIN_FIELD_BASE(103, 103, 12, 0x0090, 0x10, 0, 1), + PIN_FIELD_BASE(104, 104, 12, 0x0090, 0x10, 11, 1), + PIN_FIELD_BASE(105, 105, 12, 0x0090, 0x10, 14, 1), + PIN_FIELD_BASE(106, 106, 5, 0x0090, 0x10, 0, 1), + PIN_FIELD_BASE(107, 107, 5, 0x0090, 0x10, 1, 1), + PIN_FIELD_BASE(108, 108, 5, 0x0090, 0x10, 3, 1), + PIN_FIELD_BASE(109, 109, 5, 0x0090, 0x10, 2, 1), + PIN_FIELD_BASE(110, 110, 5, 0x0090, 0x10, 4, 1), + PIN_FIELD_BASE(111, 111, 5, 0x0090, 0x10, 5, 1), + PIN_FIELD_BASE(112, 112, 5, 0x0090, 0x10, 7, 1), + PIN_FIELD_BASE(113, 113, 5, 0x0090, 0x10, 6, 1), + PIN_FIELD_BASE(114, 114, 5, 0x0090, 0x10, 8, 1), + PIN_FIELD_BASE(115, 115, 5, 0x0090, 0x10, 9, 1), + PIN_FIELD_BASE(116, 116, 5, 0x0090, 0x10, 11, 1), + PIN_FIELD_BASE(117, 117, 5, 0x0090, 0x10, 10, 1), + PIN_FIELD_BASE(118, 118, 6, 0x00b0, 0x10, 9, 1), + PIN_FIELD_BASE(119, 119, 6, 0x00b0, 0x10, 10, 1), + PIN_FIELD_BASE(120, 120, 6, 0x00b0, 0x10, 12, 1), + PIN_FIELD_BASE(121, 121, 6, 0x00b0, 0x10, 11, 1), + PIN_FIELD_BASE(122, 122, 6, 0x00b0, 0x10, 0, 1), + PIN_FIELD_BASE(123, 123, 6, 0x00b0, 0x10, 7, 1), + PIN_FIELD_BASE(124, 124, 6, 0x00b0, 0x10, 8, 1), + PIN_FIELD_BASE(157, 157, 2, 0x0090, 0x10, 1, 1), + PIN_FIELD_BASE(158, 158, 2, 0x0090, 0x10, 2, 1), + PIN_FIELD_BASE(159, 159, 2, 0x0090, 0x10, 0, 1), + PIN_FIELD_BASE(160, 160, 3, 0x0090, 0x10, 22, 1), + PIN_FIELD_BASE(161, 161, 3, 0x0090, 0x10, 20, 1), + PIN_FIELD_BASE(162, 162, 3, 0x0090, 0x10, 23, 1), + PIN_FIELD_BASE(163, 163, 3, 0x0090, 0x10, 21, 1), + PIN_FIELD_BASE(164, 164, 3, 0x0090, 0x10, 12, 1), + PIN_FIELD_BASE(165, 165, 3, 0x0090, 0x10, 14, 1), + PIN_FIELD_BASE(166, 166, 3, 0x0090, 0x10, 13, 1), + PIN_FIELD_BASE(167, 167, 3, 0x0090, 0x10, 15, 1), + PIN_FIELD_BASE(168, 168, 3, 0x0090, 0x10, 16, 1), + PIN_FIELD_BASE(169, 169, 3, 0x0090, 0x10, 17, 1), + PIN_FIELD_BASE(170, 170, 3, 0x0090, 0x10, 19, 1), + PIN_FIELD_BASE(171, 171, 3, 0x0090, 0x10, 18, 1), + PIN_FIELD_BASE(172, 172, 3, 0x0090, 0x10, 10, 1), + PIN_FIELD_BASE(173, 173, 3, 0x0090, 0x10, 11, 1), + PIN_FIELD_BASE(174, 174, 1, 0x0090, 0x10, 15, 1), + PIN_FIELD_BASE(175, 175, 1, 0x0090, 0x10, 16, 1), + PIN_FIELD_BASE(176, 176, 1, 0x0090, 0x10, 17, 1), + PIN_FIELD_BASE(177, 177, 1, 0x0090, 0x10, 18, 1), + PIN_FIELD_BASE(178, 178, 1, 0x0090, 0x10, 6, 1), + PIN_FIELD_BASE(179, 179, 1, 0x0090, 0x10, 7, 1), + PIN_FIELD_BASE(180, 180, 1, 0x0090, 0x10, 0, 1), + PIN_FIELD_BASE(181, 181, 1, 0x0090, 0x10, 1, 1), + PIN_FIELD_BASE(182, 182, 1, 0x0090, 0x10, 2, 1), + PIN_FIELD_BASE(183, 183, 1, 0x0090, 0x10, 3, 1), + PIN_FIELD_BASE(184, 184, 1, 0x0090, 0x10, 4, 1), + PIN_FIELD_BASE(185, 185, 1, 0x0090, 0x10, 5, 1), + PIN_FIELD_BASE(186, 186, 13, 0x00d0, 0x10, 4, 1), + PIN_FIELD_BASE(187, 187, 13, 0x00d0, 0x10, 5, 1), + PIN_FIELD_BASE(188, 188, 13, 0x00d0, 0x10, 12, 1), + PIN_FIELD_BASE(189, 189, 13, 0x00d0, 0x10, 17, 1), + PIN_FIELD_BASE(190, 190, 13, 0x00d0, 0x10, 13, 1), + PIN_FIELD_BASE(191, 191, 13, 0x00d0, 0x10, 18, 1), + PIN_FIELD_BASE(192, 192, 13, 0x00d0, 0x10, 0, 1), + PIN_FIELD_BASE(193, 193, 13, 0x00d0, 0x10, 6, 1), + PIN_FIELD_BASE(194, 194, 13, 0x00d0, 0x10, 14, 1), + PIN_FIELD_BASE(195, 195, 13, 0x00d0, 0x10, 19, 1), + PIN_FIELD_BASE(196, 196, 13, 0x00d0, 0x10, 1, 1), + PIN_FIELD_BASE(197, 197, 13, 0x00d0, 0x10, 7, 1), + PIN_FIELD_BASE(198, 198, 13, 0x00d0, 0x10, 15, 1), + PIN_FIELD_BASE(199, 199, 13, 0x00d0, 0x10, 20, 1), + PIN_FIELD_BASE(200, 200, 13, 0x00d0, 0x10, 22, 1), + PIN_FIELD_BASE(201, 201, 13, 0x00d0, 0x10, 25, 1), + PIN_FIELD_BASE(202, 202, 13, 0x00d0, 0x10, 16, 1), + PIN_FIELD_BASE(203, 203, 13, 0x00d0, 0x10, 21, 1), + PIN_FIELD_BASE(204, 204, 13, 0x00d0, 0x10, 2, 1), + PIN_FIELD_BASE(205, 205, 13, 0x00d0, 0x10, 3, 1), + PIN_FIELD_BASE(206, 206, 13, 0x00d0, 0x10, 8, 1), + PIN_FIELD_BASE(207, 207, 13, 0x00d0, 0x10, 9, 1), + PIN_FIELD_BASE(208, 208, 13, 0x00d0, 0x10, 10, 1), + PIN_FIELD_BASE(209, 209, 13, 0x00d0, 0x10, 11, 1), + PIN_FIELD_BASE(210, 210, 14, 0x00b0, 0x10, 0, 1), + PIN_FIELD_BASE(211, 211, 14, 0x00b0, 0x10, 1, 1), + PIN_FIELD_BASE(212, 212, 14, 0x00b0, 0x10, 2, 1), + PIN_FIELD_BASE(213, 213, 14, 0x00b0, 0x10, 3, 1), + PIN_FIELD_BASE(214, 214, 13, 0x00d0, 0x10, 23, 1), + PIN_FIELD_BASE(215, 215, 13, 0x00d0, 0x10, 24, 1), + PIN_FIELD_BASE(216, 216, 14, 0x00b0, 0x10, 4, 1), + PIN_FIELD_BASE(220, 220, 14, 0x00b0, 0x10, 5, 1), + PIN_FIELD_BASE(221, 221, 14, 0x00b0, 0x10, 6, 1), + PIN_FIELD_BASE(222, 222, 14, 0x00b0, 0x10, 8, 1), + PIN_FIELD_BASE(223, 223, 14, 0x00b0, 0x10, 7, 1), + PIN_FIELD_BASE(230, 230, 15, 0x0090, 0x10, 13, 1), + PIN_FIELD_BASE(231, 231, 15, 0x0090, 0x10, 14, 1), + PIN_FIELD_BASE(232, 232, 15, 0x0090, 0x10, 10, 1), + PIN_FIELD_BASE(233, 233, 15, 0x0090, 0x10, 0, 1), + PIN_FIELD_BASE(234, 234, 15, 0x0090, 0x10, 3, 1), + PIN_FIELD_BASE(235, 235, 15, 0x0090, 0x10, 1, 1), + PIN_FIELD_BASE(236, 236, 15, 0x0090, 0x10, 2, 1), + PIN_FIELD_BASE(237, 237, 15, 0x0090, 0x10, 6, 1), + PIN_FIELD_BASE(238, 238, 15, 0x0090, 0x10, 5, 1), + PIN_FIELD_BASE(239, 239, 15, 0x0090, 0x10, 19, 1), + PIN_FIELD_BASE(240, 240, 15, 0x0090, 0x10, 18, 1), + PIN_FIELD_BASE(241, 241, 15, 0x0090, 0x10, 16, 1), + PIN_FIELD_BASE(242, 242, 15, 0x0090, 0x10, 17, 1), + PIN_FIELD_BASE(243, 243, 15, 0x0090, 0x10, 15, 1), + PIN_FIELD_BASE(244, 244, 15, 0x0090, 0x10, 12, 1), + PIN_FIELD_BASE(245, 245, 15, 0x0090, 0x10, 9, 1), + PIN_FIELD_BASE(246, 246, 15, 0x0090, 0x10, 8, 1), + PIN_FIELD_BASE(247, 247, 15, 0x0090, 0x10, 7, 1), + PIN_FIELD_BASE(248, 248, 15, 0x0090, 0x10, 4, 1), + PIN_FIELD_BASE(249, 249, 15, 0x0090, 0x10, 20, 1), + PIN_FIELD_BASE(250, 250, 15, 0x0090, 0x10, 11, 1), + PIN_FIELD_BASE(251, 251, 3, 0x0090, 0x10, 2, 1), + PIN_FIELD_BASE(252, 252, 3, 0x0090, 0x10, 3, 1), + PIN_FIELD_BASE(253, 253, 3, 0x0090, 0x10, 4, 1), + PIN_FIELD_BASE(254, 254, 3, 0x0090, 0x10, 5, 1), + PIN_FIELD_BASE(255, 255, 3, 0x0090, 0x10, 6, 1), + PIN_FIELD_BASE(256, 256, 3, 0x0090, 0x10, 7, 1), + PIN_FIELD_BASE(257, 257, 3, 0x0090, 0x10, 8, 1), + PIN_FIELD_BASE(258, 258, 3, 0x0090, 0x10, 9, 1), +}; + +static const struct mtk_pin_field_calc mt8196_pin_pd_range[] = { + PIN_FIELD_BASE(0, 0, 8, 0x0090, 0x10, 0, 1), + PIN_FIELD_BASE(1, 1, 8, 0x0090, 0x10, 1, 1), + PIN_FIELD_BASE(2, 2, 11, 0x0070, 0x10, 1, 1), + PIN_FIELD_BASE(3, 3, 11, 0x0070, 0x10, 2, 1), + PIN_FIELD_BASE(4, 4, 11, 0x0070, 0x10, 3, 1), + PIN_FIELD_BASE(5, 5, 11, 0x0070, 0x10, 4, 1), + PIN_FIELD_BASE(6, 6, 11, 0x0070, 0x10, 5, 1), + PIN_FIELD_BASE(7, 7, 11, 0x0070, 0x10, 6, 1), + PIN_FIELD_BASE(8, 8, 11, 0x0070, 0x10, 7, 1), + PIN_FIELD_BASE(9, 9, 9, 0x00a0, 0x10, 14, 1), + PIN_FIELD_BASE(10, 10, 9, 0x00a0, 0x10, 12, 1), + PIN_FIELD_BASE(11, 11, 8, 0x0090, 0x10, 2, 1), + PIN_FIELD_BASE(12, 12, 9, 0x00a0, 0x10, 13, 1), + PIN_FIELD_BASE(13, 13, 6, 0x0090, 0x10, 1, 1), + PIN_FIELD_BASE(14, 14, 3, 0x0080, 0x10, 0, 1), + PIN_FIELD_BASE(15, 15, 6, 0x0090, 0x10, 2, 1), + PIN_FIELD_BASE(16, 16, 6, 0x0090, 0x10, 3, 1), + PIN_FIELD_BASE(17, 17, 6, 0x0090, 0x10, 4, 1), + PIN_FIELD_BASE(18, 18, 6, 0x0090, 0x10, 5, 1), + PIN_FIELD_BASE(19, 19, 6, 0x0090, 0x10, 6, 1), + PIN_FIELD_BASE(20, 20, 3, 0x0080, 0x10, 1, 1), + PIN_FIELD_BASE(21, 21, 2, 0x0080, 0x10, 3, 1), + PIN_FIELD_BASE(22, 22, 2, 0x0080, 0x10, 4, 1), + PIN_FIELD_BASE(23, 23, 2, 0x0080, 0x10, 5, 1), + PIN_FIELD_BASE(24, 24, 2, 0x0080, 0x10, 6, 1), + PIN_FIELD_BASE(25, 25, 2, 0x0080, 0x10, 7, 1), + PIN_FIELD_BASE(26, 26, 2, 0x0080, 0x10, 8, 1), + PIN_FIELD_BASE(27, 27, 2, 0x0080, 0x10, 9, 1), + PIN_FIELD_BASE(28, 28, 2, 0x0080, 0x10, 10, 1), + PIN_FIELD_BASE(29, 29, 2, 0x0080, 0x10, 11, 1), + PIN_FIELD_BASE(30, 30, 2, 0x0080, 0x10, 12, 1), + PIN_FIELD_BASE(31, 31, 2, 0x0080, 0x10, 13, 1), + PIN_FIELD_BASE(32, 32, 1, 0x0080, 0x10, 8, 1), + PIN_FIELD_BASE(33, 33, 1, 0x0080, 0x10, 9, 1), + PIN_FIELD_BASE(34, 34, 1, 0x0080, 0x10, 10, 1), + PIN_FIELD_BASE(35, 35, 1, 0x0080, 0x10, 11, 1), + PIN_FIELD_BASE(36, 36, 1, 0x0080, 0x10, 12, 1), + PIN_FIELD_BASE(37, 37, 1, 0x0080, 0x10, 13, 1), + PIN_FIELD_BASE(38, 38, 1, 0x0080, 0x10, 14, 1), + PIN_FIELD_BASE(39, 39, 8, 0x0090, 0x10, 6, 1), + PIN_FIELD_BASE(40, 40, 8, 0x0090, 0x10, 3, 1), + PIN_FIELD_BASE(41, 41, 8, 0x0090, 0x10, 5, 1), + PIN_FIELD_BASE(42, 42, 8, 0x0090, 0x10, 4, 1), + PIN_FIELD_BASE(43, 43, 8, 0x0090, 0x10, 7, 1), + PIN_FIELD_BASE(44, 44, 8, 0x0090, 0x10, 8, 1), + PIN_FIELD_BASE(45, 45, 8, 0x0090, 0x10, 9, 1), + PIN_FIELD_BASE(46, 46, 8, 0x0090, 0x10, 10, 1), + PIN_FIELD_BASE(47, 47, 8, 0x0090, 0x10, 13, 1), + PIN_FIELD_BASE(48, 48, 8, 0x0090, 0x10, 11, 1), + PIN_FIELD_BASE(49, 49, 8, 0x0090, 0x10, 14, 1), + PIN_FIELD_BASE(50, 50, 8, 0x0090, 0x10, 12, 1), + PIN_FIELD_BASE(51, 51, 8, 0x0090, 0x10, 15, 1), + PIN_FIELD_BASE(52, 52, 9, 0x00a0, 0x10, 7, 1), + PIN_FIELD_BASE(53, 53, 9, 0x00a0, 0x10, 8, 1), + PIN_FIELD_BASE(54, 54, 9, 0x00a0, 0x10, 2, 1), + PIN_FIELD_BASE(55, 55, 9, 0x00a0, 0x10, 1, 1), + PIN_FIELD_BASE(56, 56, 9, 0x00a0, 0x10, 5, 1), + PIN_FIELD_BASE(57, 57, 9, 0x00a0, 0x10, 6, 1), + PIN_FIELD_BASE(58, 58, 9, 0x00a0, 0x10, 3, 1), + PIN_FIELD_BASE(59, 59, 9, 0x00a0, 0x10, 4, 1), + PIN_FIELD_BASE(61, 61, 9, 0x00a0, 0x10, 10, 1), + PIN_FIELD_BASE(62, 62, 9, 0x00a0, 0x10, 9, 1), + PIN_FIELD_BASE(63, 63, 9, 0x00a0, 0x10, 18, 1), + PIN_FIELD_BASE(64, 64, 9, 0x00a0, 0x10, 0, 1), + PIN_FIELD_BASE(65, 65, 9, 0x00a0, 0x10, 11, 1), + PIN_FIELD_BASE(66, 66, 9, 0x00a0, 0x10, 24, 1), + PIN_FIELD_BASE(67, 67, 9, 0x00a0, 0x10, 21, 1), + PIN_FIELD_BASE(68, 68, 9, 0x00a0, 0x10, 20, 1), + PIN_FIELD_BASE(69, 69, 9, 0x00a0, 0x10, 25, 1), + PIN_FIELD_BASE(70, 70, 9, 0x00a0, 0x10, 16, 1), + PIN_FIELD_BASE(71, 71, 9, 0x00a0, 0x10, 15, 1), + PIN_FIELD_BASE(72, 72, 9, 0x00a0, 0x10, 23, 1), + PIN_FIELD_BASE(73, 73, 9, 0x00a0, 0x10, 19, 1), + PIN_FIELD_BASE(74, 74, 9, 0x00a0, 0x10, 17, 1), + PIN_FIELD_BASE(75, 75, 10, 0x0080, 0x10, 2, 1), + PIN_FIELD_BASE(76, 76, 10, 0x0080, 0x10, 3, 1), + PIN_FIELD_BASE(77, 77, 10, 0x0080, 0x10, 4, 1), + PIN_FIELD_BASE(78, 78, 10, 0x0080, 0x10, 5, 1), + PIN_FIELD_BASE(79, 79, 10, 0x0080, 0x10, 0, 1), + PIN_FIELD_BASE(80, 80, 10, 0x0080, 0x10, 1, 1), + PIN_FIELD_BASE(81, 81, 11, 0x0070, 0x10, 9, 1), + PIN_FIELD_BASE(82, 82, 11, 0x0070, 0x10, 10, 1), + PIN_FIELD_BASE(83, 83, 11, 0x0070, 0x10, 12, 1), + PIN_FIELD_BASE(84, 84, 11, 0x0070, 0x10, 11, 1), + PIN_FIELD_BASE(85, 85, 11, 0x0070, 0x10, 13, 1), + PIN_FIELD_BASE(86, 86, 11, 0x0070, 0x10, 14, 1), + PIN_FIELD_BASE(87, 87, 11, 0x0070, 0x10, 16, 1), + PIN_FIELD_BASE(88, 88, 11, 0x0070, 0x10, 15, 1), + PIN_FIELD_BASE(89, 89, 11, 0x0070, 0x10, 0, 1), + PIN_FIELD_BASE(90, 90, 11, 0x0070, 0x10, 8, 1), + PIN_FIELD_BASE(91, 91, 12, 0x0080, 0x10, 6, 1), + PIN_FIELD_BASE(92, 92, 12, 0x0080, 0x10, 7, 1), + PIN_FIELD_BASE(93, 93, 12, 0x0080, 0x10, 8, 1), + PIN_FIELD_BASE(94, 94, 12, 0x0080, 0x10, 4, 1), + PIN_FIELD_BASE(95, 95, 12, 0x0080, 0x10, 1, 1), + PIN_FIELD_BASE(96, 96, 12, 0x0080, 0x10, 3, 1), + PIN_FIELD_BASE(97, 97, 12, 0x0080, 0x10, 2, 1), + PIN_FIELD_BASE(98, 98, 12, 0x0080, 0x10, 5, 1), + PIN_FIELD_BASE(99, 99, 12, 0x0080, 0x10, 9, 1), + PIN_FIELD_BASE(100, 100, 12, 0x0080, 0x10, 12, 1), + PIN_FIELD_BASE(101, 101, 12, 0x0080, 0x10, 10, 1), + PIN_FIELD_BASE(102, 102, 12, 0x0080, 0x10, 13, 1), + PIN_FIELD_BASE(103, 103, 12, 0x0080, 0x10, 0, 1), + PIN_FIELD_BASE(104, 104, 12, 0x0080, 0x10, 11, 1), + PIN_FIELD_BASE(105, 105, 12, 0x0080, 0x10, 14, 1), + PIN_FIELD_BASE(106, 106, 5, 0x0080, 0x10, 0, 1), + PIN_FIELD_BASE(107, 107, 5, 0x0080, 0x10, 1, 1), + PIN_FIELD_BASE(108, 108, 5, 0x0080, 0x10, 3, 1), + PIN_FIELD_BASE(109, 109, 5, 0x0080, 0x10, 2, 1), + PIN_FIELD_BASE(110, 110, 5, 0x0080, 0x10, 4, 1), + PIN_FIELD_BASE(111, 111, 5, 0x0080, 0x10, 5, 1), + PIN_FIELD_BASE(112, 112, 5, 0x0080, 0x10, 7, 1), + PIN_FIELD_BASE(113, 113, 5, 0x0080, 0x10, 6, 1), + PIN_FIELD_BASE(114, 114, 5, 0x0080, 0x10, 8, 1), + PIN_FIELD_BASE(115, 115, 5, 0x0080, 0x10, 9, 1), + PIN_FIELD_BASE(116, 116, 5, 0x0080, 0x10, 11, 1), + PIN_FIELD_BASE(117, 117, 5, 0x0080, 0x10, 10, 1), + PIN_FIELD_BASE(118, 118, 6, 0x0090, 0x10, 9, 1), + PIN_FIELD_BASE(119, 119, 6, 0x0090, 0x10, 10, 1), + PIN_FIELD_BASE(120, 120, 6, 0x0090, 0x10, 12, 1), + PIN_FIELD_BASE(121, 121, 6, 0x0090, 0x10, 11, 1), + PIN_FIELD_BASE(122, 122, 6, 0x0090, 0x10, 0, 1), + PIN_FIELD_BASE(123, 123, 6, 0x0090, 0x10, 7, 1), + PIN_FIELD_BASE(124, 124, 6, 0x0090, 0x10, 8, 1), + PIN_FIELD_BASE(157, 157, 2, 0x0080, 0x10, 1, 1), + PIN_FIELD_BASE(158, 158, 2, 0x0080, 0x10, 2, 1), + PIN_FIELD_BASE(159, 159, 2, 0x0080, 0x10, 0, 1), + PIN_FIELD_BASE(160, 160, 3, 0x0080, 0x10, 22, 1), + PIN_FIELD_BASE(161, 161, 3, 0x0080, 0x10, 20, 1), + PIN_FIELD_BASE(162, 162, 3, 0x0080, 0x10, 23, 1), + PIN_FIELD_BASE(163, 163, 3, 0x0080, 0x10, 21, 1), + PIN_FIELD_BASE(164, 164, 3, 0x0080, 0x10, 12, 1), + PIN_FIELD_BASE(165, 165, 3, 0x0080, 0x10, 14, 1), + PIN_FIELD_BASE(166, 166, 3, 0x0080, 0x10, 13, 1), + PIN_FIELD_BASE(167, 167, 3, 0x0080, 0x10, 15, 1), + PIN_FIELD_BASE(168, 168, 3, 0x0080, 0x10, 16, 1), + PIN_FIELD_BASE(169, 169, 3, 0x0080, 0x10, 17, 1), + PIN_FIELD_BASE(170, 170, 3, 0x0080, 0x10, 19, 1), + PIN_FIELD_BASE(171, 171, 3, 0x0080, 0x10, 18, 1), + PIN_FIELD_BASE(172, 172, 3, 0x0080, 0x10, 10, 1), + PIN_FIELD_BASE(173, 173, 3, 0x0080, 0x10, 11, 1), + PIN_FIELD_BASE(174, 174, 1, 0x0080, 0x10, 15, 1), + PIN_FIELD_BASE(175, 175, 1, 0x0080, 0x10, 16, 1), + PIN_FIELD_BASE(176, 176, 1, 0x0080, 0x10, 17, 1), + PIN_FIELD_BASE(177, 177, 1, 0x0080, 0x10, 18, 1), + PIN_FIELD_BASE(178, 178, 1, 0x0080, 0x10, 6, 1), + PIN_FIELD_BASE(179, 179, 1, 0x0080, 0x10, 7, 1), + PIN_FIELD_BASE(180, 180, 1, 0x0080, 0x10, 0, 1), + PIN_FIELD_BASE(181, 181, 1, 0x0080, 0x10, 1, 1), + PIN_FIELD_BASE(182, 182, 1, 0x0080, 0x10, 2, 1), + PIN_FIELD_BASE(183, 183, 1, 0x0080, 0x10, 3, 1), + PIN_FIELD_BASE(184, 184, 1, 0x0080, 0x10, 4, 1), + PIN_FIELD_BASE(185, 185, 1, 0x0080, 0x10, 5, 1), + PIN_FIELD_BASE(186, 186, 13, 0x00c0, 0x10, 4, 1), + PIN_FIELD_BASE(187, 187, 13, 0x00c0, 0x10, 5, 1), + PIN_FIELD_BASE(188, 188, 13, 0x00c0, 0x10, 12, 1), + PIN_FIELD_BASE(189, 189, 13, 0x00c0, 0x10, 17, 1), + PIN_FIELD_BASE(190, 190, 13, 0x00c0, 0x10, 13, 1), + PIN_FIELD_BASE(191, 191, 13, 0x00c0, 0x10, 18, 1), + PIN_FIELD_BASE(192, 192, 13, 0x00c0, 0x10, 0, 1), + PIN_FIELD_BASE(193, 193, 13, 0x00c0, 0x10, 6, 1), + PIN_FIELD_BASE(194, 194, 13, 0x00c0, 0x10, 14, 1), + PIN_FIELD_BASE(195, 195, 13, 0x00c0, 0x10, 19, 1), + PIN_FIELD_BASE(196, 196, 13, 0x00c0, 0x10, 1, 1), + PIN_FIELD_BASE(197, 197, 13, 0x00c0, 0x10, 7, 1), + PIN_FIELD_BASE(198, 198, 13, 0x00c0, 0x10, 15, 1), + PIN_FIELD_BASE(199, 199, 13, 0x00c0, 0x10, 20, 1), + PIN_FIELD_BASE(200, 200, 13, 0x00c0, 0x10, 22, 1), + PIN_FIELD_BASE(201, 201, 13, 0x00c0, 0x10, 25, 1), + PIN_FIELD_BASE(202, 202, 13, 0x00c0, 0x10, 16, 1), + PIN_FIELD_BASE(203, 203, 13, 0x00c0, 0x10, 21, 1), + PIN_FIELD_BASE(204, 204, 13, 0x00c0, 0x10, 2, 1), + PIN_FIELD_BASE(205, 205, 13, 0x00c0, 0x10, 3, 1), + PIN_FIELD_BASE(206, 206, 13, 0x00c0, 0x10, 8, 1), + PIN_FIELD_BASE(207, 207, 13, 0x00c0, 0x10, 9, 1), + PIN_FIELD_BASE(208, 208, 13, 0x00c0, 0x10, 10, 1), + PIN_FIELD_BASE(209, 209, 13, 0x00c0, 0x10, 11, 1), + PIN_FIELD_BASE(210, 210, 14, 0x0090, 0x10, 0, 1), + PIN_FIELD_BASE(211, 211, 14, 0x0090, 0x10, 1, 1), + PIN_FIELD_BASE(212, 212, 14, 0x0090, 0x10, 2, 1), + PIN_FIELD_BASE(213, 213, 14, 0x0090, 0x10, 3, 1), + PIN_FIELD_BASE(214, 214, 13, 0x00c0, 0x10, 23, 1), + PIN_FIELD_BASE(215, 215, 13, 0x00c0, 0x10, 24, 1), + PIN_FIELD_BASE(216, 216, 14, 0x0090, 0x10, 4, 1), + PIN_FIELD_BASE(220, 220, 14, 0x0090, 0x10, 5, 1), + PIN_FIELD_BASE(221, 221, 14, 0x0090, 0x10, 6, 1), + PIN_FIELD_BASE(222, 222, 14, 0x0090, 0x10, 8, 1), + PIN_FIELD_BASE(223, 223, 14, 0x0090, 0x10, 7, 1), + PIN_FIELD_BASE(230, 230, 15, 0x0070, 0x10, 13, 1), + PIN_FIELD_BASE(231, 231, 15, 0x0070, 0x10, 14, 1), + PIN_FIELD_BASE(232, 232, 15, 0x0070, 0x10, 10, 1), + PIN_FIELD_BASE(233, 233, 15, 0x0070, 0x10, 0, 1), + PIN_FIELD_BASE(234, 234, 15, 0x0070, 0x10, 3, 1), + PIN_FIELD_BASE(235, 235, 15, 0x0070, 0x10, 1, 1), + PIN_FIELD_BASE(236, 236, 15, 0x0070, 0x10, 2, 1), + PIN_FIELD_BASE(237, 237, 15, 0x0070, 0x10, 6, 1), + PIN_FIELD_BASE(238, 238, 15, 0x0070, 0x10, 5, 1), + PIN_FIELD_BASE(239, 239, 15, 0x0070, 0x10, 19, 1), + PIN_FIELD_BASE(240, 240, 15, 0x0070, 0x10, 18, 1), + PIN_FIELD_BASE(241, 241, 15, 0x0070, 0x10, 16, 1), + PIN_FIELD_BASE(242, 242, 15, 0x0070, 0x10, 17, 1), + PIN_FIELD_BASE(243, 243, 15, 0x0070, 0x10, 15, 1), + PIN_FIELD_BASE(244, 244, 15, 0x0070, 0x10, 12, 1), + PIN_FIELD_BASE(245, 245, 15, 0x0070, 0x10, 9, 1), + PIN_FIELD_BASE(246, 246, 15, 0x0070, 0x10, 8, 1), + PIN_FIELD_BASE(247, 247, 15, 0x0070, 0x10, 7, 1), + PIN_FIELD_BASE(248, 248, 15, 0x0070, 0x10, 4, 1), + PIN_FIELD_BASE(249, 249, 15, 0x0070, 0x10, 20, 1), + PIN_FIELD_BASE(250, 250, 15, 0x0070, 0x10, 11, 1), + PIN_FIELD_BASE(251, 251, 3, 0x0080, 0x10, 2, 1), + PIN_FIELD_BASE(252, 252, 3, 0x0080, 0x10, 3, 1), + PIN_FIELD_BASE(253, 253, 3, 0x0080, 0x10, 4, 1), + PIN_FIELD_BASE(254, 254, 3, 0x0080, 0x10, 5, 1), + PIN_FIELD_BASE(255, 255, 3, 0x0080, 0x10, 6, 1), + PIN_FIELD_BASE(256, 256, 3, 0x0080, 0x10, 7, 1), + PIN_FIELD_BASE(257, 257, 3, 0x0080, 0x10, 8, 1), + PIN_FIELD_BASE(258, 258, 3, 0x0080, 0x10, 9, 1), +}; + +static const struct mtk_pin_field_calc mt8196_pin_drv_range[] = { + PIN_FIELD_BASE(0, 0, 8, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(1, 1, 8, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(2, 2, 11, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(3, 3, 11, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(4, 4, 11, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(5, 5, 11, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(6, 6, 11, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(7, 7, 11, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(8, 8, 11, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(9, 9, 9, 0x0010, 0x10, 9, 3), + PIN_FIELD_BASE(10, 10, 9, 0x0010, 0x10, 6, 3), + PIN_FIELD_BASE(11, 11, 8, 0x0000, 0x10, 24, 3), + PIN_FIELD_BASE(12, 12, 9, 0x0010, 0x10, 15, 3), + PIN_FIELD_BASE(13, 13, 6, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(14, 14, 3, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(15, 15, 6, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(16, 16, 6, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(17, 17, 6, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(18, 18, 6, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(19, 19, 6, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(20, 20, 3, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(21, 21, 2, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(22, 22, 2, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(23, 23, 2, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(24, 24, 2, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(25, 25, 2, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(26, 26, 2, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(27, 27, 2, 0x0010, 0x10, 3, 3), + PIN_FIELD_BASE(28, 28, 2, 0x0000, 0x10, 18, 3), + PIN_FIELD_BASE(29, 29, 2, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(30, 30, 2, 0x0000, 0x10, 24, 3), + PIN_FIELD_BASE(31, 31, 2, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(32, 32, 1, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(33, 33, 1, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(34, 34, 1, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(35, 35, 1, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(36, 36, 1, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(37, 37, 1, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(38, 38, 1, 0x0000, 0x10, 18, 3), + PIN_FIELD_BASE(39, 39, 8, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(40, 40, 8, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(41, 41, 8, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(42, 42, 8, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(43, 43, 8, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(44, 44, 8, 0x0000, 0x10, 18, 3), + PIN_FIELD_BASE(45, 45, 8, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(46, 46, 8, 0x0010, 0x10, 0, 3), + PIN_FIELD_BASE(47, 47, 8, 0x0010, 0x10, 9, 3), + PIN_FIELD_BASE(48, 48, 8, 0x0010, 0x10, 3, 3), + PIN_FIELD_BASE(49, 49, 8, 0x0010, 0x10, 12, 3), + PIN_FIELD_BASE(50, 50, 8, 0x0010, 0x10, 6, 3), + PIN_FIELD_BASE(51, 51, 8, 0x0010, 0x10, 15, 3), + PIN_FIELD_BASE(52, 52, 9, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(53, 53, 9, 0x0000, 0x10, 24, 3), + PIN_FIELD_BASE(54, 54, 9, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(55, 55, 9, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(56, 56, 9, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(57, 57, 9, 0x0000, 0x10, 18, 3), + PIN_FIELD_BASE(58, 58, 9, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(59, 59, 9, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(60, 60, 9, 0x0010, 0x10, 24, 3), + PIN_FIELD_BASE(61, 61, 9, 0x0010, 0x10, 0, 3), + PIN_FIELD_BASE(62, 62, 9, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(63, 63, 9, 0x0010, 0x10, 12, 3), + PIN_FIELD_BASE(64, 64, 9, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(65, 65, 9, 0x0010, 0x10, 3, 3), + PIN_FIELD_BASE(66, 66, 9, 0x0010, 0x10, 27, 3), + PIN_FIELD_BASE(67, 67, 9, 0x0010, 0x10, 21, 3), + PIN_FIELD_BASE(68, 68, 9, 0x0010, 0x10, 21, 3), + PIN_FIELD_BASE(69, 69, 9, 0x0010, 0x10, 21, 3), + PIN_FIELD_BASE(70, 70, 9, 0x0010, 0x10, 18, 3), + PIN_FIELD_BASE(71, 71, 9, 0x0010, 0x10, 18, 3), + PIN_FIELD_BASE(72, 72, 9, 0x0010, 0x10, 21, 3), + PIN_FIELD_BASE(73, 73, 9, 0x0010, 0x10, 18, 3), + PIN_FIELD_BASE(74, 74, 9, 0x0010, 0x10, 18, 3), + PIN_FIELD_BASE(75, 75, 10, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(76, 76, 10, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(77, 77, 10, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(78, 78, 10, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(79, 79, 10, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(80, 80, 10, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(81, 81, 11, 0x0000, 0x10, 18, 3), + PIN_FIELD_BASE(82, 82, 11, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(83, 83, 11, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(84, 84, 11, 0x0000, 0x10, 24, 3), + PIN_FIELD_BASE(85, 85, 11, 0x0010, 0x10, 0, 3), + PIN_FIELD_BASE(86, 86, 11, 0x0010, 0x10, 3, 3), + PIN_FIELD_BASE(87, 87, 11, 0x0010, 0x10, 9, 3), + PIN_FIELD_BASE(88, 88, 11, 0x0010, 0x10, 6, 3), + PIN_FIELD_BASE(89, 89, 11, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(90, 90, 11, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(91, 91, 12, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(92, 92, 12, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(93, 93, 12, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(94, 94, 12, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(95, 95, 12, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(96, 96, 12, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(97, 97, 12, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(98, 98, 12, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(99, 99, 12, 0x0000, 0x10, 18, 3), + PIN_FIELD_BASE(100, 100, 12, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(101, 101, 12, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(102, 102, 12, 0x0010, 0x10, 0, 3), + PIN_FIELD_BASE(103, 103, 12, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(104, 104, 12, 0x0000, 0x10, 24, 3), + PIN_FIELD_BASE(105, 105, 12, 0x0010, 0x10, 3, 3), + PIN_FIELD_BASE(106, 106, 5, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(107, 107, 5, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(108, 108, 5, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(109, 109, 5, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(110, 110, 5, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(111, 111, 5, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(112, 112, 5, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(113, 113, 5, 0x0000, 0x10, 18, 3), + PIN_FIELD_BASE(114, 114, 5, 0x0000, 0x10, 24, 3), + PIN_FIELD_BASE(115, 115, 5, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(116, 116, 5, 0x0010, 0x10, 3, 3), + PIN_FIELD_BASE(117, 117, 5, 0x0010, 0x10, 0, 3), + PIN_FIELD_BASE(118, 118, 6, 0x0000, 0x10, 18, 3), + PIN_FIELD_BASE(119, 119, 6, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(120, 120, 6, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(121, 121, 6, 0x0000, 0x10, 24, 3), + PIN_FIELD_BASE(122, 122, 6, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(123, 123, 6, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(124, 124, 6, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(125, 125, 7, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(126, 126, 7, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(127, 127, 7, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(128, 128, 7, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(129, 129, 7, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(130, 130, 7, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(131, 131, 7, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(132, 132, 7, 0x0010, 0x10, 3, 3), + PIN_FIELD_BASE(133, 133, 7, 0x0010, 0x10, 0, 3), + PIN_FIELD_BASE(134, 134, 7, 0x0000, 0x10, 18, 3), + PIN_FIELD_BASE(135, 135, 7, 0x0000, 0x10, 24, 3), + PIN_FIELD_BASE(136, 136, 7, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(137, 137, 4, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(138, 138, 4, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(139, 139, 4, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(140, 140, 4, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(141, 141, 4, 0x0000, 0x10, 18, 3), + PIN_FIELD_BASE(142, 142, 4, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(143, 143, 4, 0x0000, 0x10, 24, 3), + PIN_FIELD_BASE(144, 144, 4, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(145, 145, 4, 0x0010, 0x10, 0, 3), + PIN_FIELD_BASE(146, 146, 4, 0x0010, 0x10, 0, 3), + PIN_FIELD_BASE(147, 147, 4, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(148, 148, 4, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(149, 149, 4, 0x0010, 0x10, 3, 3), + PIN_FIELD_BASE(150, 150, 4, 0x0010, 0x10, 3, 3), + PIN_FIELD_BASE(151, 151, 4, 0x0010, 0x10, 3, 3), + PIN_FIELD_BASE(152, 152, 4, 0x0010, 0x10, 3, 3), + PIN_FIELD_BASE(153, 153, 4, 0x0010, 0x10, 12, 3), + PIN_FIELD_BASE(154, 154, 4, 0x0010, 0x10, 9, 3), + PIN_FIELD_BASE(155, 155, 4, 0x0010, 0x10, 6, 3), + PIN_FIELD_BASE(156, 156, 4, 0x0010, 0x10, 6, 3), + PIN_FIELD_BASE(157, 157, 2, 0x0010, 0x10, 0, 3), + PIN_FIELD_BASE(158, 158, 2, 0x0010, 0x10, 0, 3), + PIN_FIELD_BASE(159, 159, 2, 0x0010, 0x10, 0, 3), + PIN_FIELD_BASE(160, 160, 3, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(161, 161, 3, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(162, 162, 3, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(163, 163, 3, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(164, 164, 3, 0x0000, 0x10, 18, 3), + PIN_FIELD_BASE(165, 165, 3, 0x0000, 0x10, 24, 3), + PIN_FIELD_BASE(166, 166, 3, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(167, 167, 3, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(168, 168, 3, 0x0010, 0x10, 0, 3), + PIN_FIELD_BASE(169, 169, 3, 0x0010, 0x10, 3, 3), + PIN_FIELD_BASE(170, 170, 3, 0x0010, 0x10, 9, 3), + PIN_FIELD_BASE(171, 171, 3, 0x0010, 0x10, 6, 3), + PIN_FIELD_BASE(172, 172, 3, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(173, 173, 3, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(174, 174, 1, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(175, 175, 1, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(176, 176, 1, 0x0010, 0x10, 0, 3), + PIN_FIELD_BASE(177, 177, 1, 0x0010, 0x10, 3, 3), + PIN_FIELD_BASE(178, 178, 1, 0x0000, 0x10, 24, 3), + PIN_FIELD_BASE(179, 179, 1, 0x0000, 0x10, 24, 3), + PIN_FIELD_BASE(180, 180, 1, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(181, 181, 1, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(182, 182, 1, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(183, 183, 1, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(184, 184, 1, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(185, 185, 1, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(186, 186, 13, 0x0010, 0x10, 12, 3), + PIN_FIELD_BASE(187, 187, 13, 0x0010, 0x10, 12, 3), + PIN_FIELD_BASE(188, 188, 13, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(189, 189, 13, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(190, 190, 13, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(191, 191, 13, 0x0010, 0x10, 0, 3), + PIN_FIELD_BASE(192, 192, 13, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(193, 193, 13, 0x0010, 0x10, 15, 3), + PIN_FIELD_BASE(194, 194, 13, 0x0000, 0x10, 18, 3), + PIN_FIELD_BASE(195, 195, 13, 0x0010, 0x10, 3, 3), + PIN_FIELD_BASE(196, 196, 13, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(197, 197, 13, 0x0010, 0x10, 18, 3), + PIN_FIELD_BASE(198, 198, 13, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(199, 199, 13, 0x0010, 0x10, 6, 3), + PIN_FIELD_BASE(200, 200, 13, 0x0010, 0x10, 27, 3), + PIN_FIELD_BASE(201, 201, 13, 0x0020, 0x10, 6, 3), + PIN_FIELD_BASE(202, 202, 13, 0x0000, 0x10, 24, 3), + PIN_FIELD_BASE(203, 203, 13, 0x0010, 0x10, 9, 3), + PIN_FIELD_BASE(204, 204, 13, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(205, 205, 13, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(206, 206, 13, 0x0010, 0x10, 24, 3), + PIN_FIELD_BASE(207, 207, 13, 0x0010, 0x10, 21, 3), + PIN_FIELD_BASE(208, 208, 13, 0x0010, 0x10, 21, 3), + PIN_FIELD_BASE(209, 209, 13, 0x0010, 0x10, 21, 3), + PIN_FIELD_BASE(210, 210, 14, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(211, 211, 14, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(212, 212, 14, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(213, 213, 14, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(214, 214, 13, 0x0020, 0x10, 0, 3), + PIN_FIELD_BASE(215, 215, 13, 0x0020, 0x10, 3, 3), + PIN_FIELD_BASE(216, 216, 14, 0x0010, 0x10, 6, 3), + PIN_FIELD_BASE(217, 217, 14, 0x0010, 0x10, 6, 3), + PIN_FIELD_BASE(218, 218, 14, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(219, 219, 14, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(220, 220, 14, 0x0020, 0x10, 3, 3), + PIN_FIELD_BASE(221, 221, 14, 0x0020, 0x10, 6, 3), + PIN_FIELD_BASE(222, 222, 14, 0x0020, 0x10, 12, 3), + PIN_FIELD_BASE(223, 223, 14, 0x0020, 0x10, 9, 3), + PIN_FIELD_BASE(224, 224, 14, 0x0000, 0x10, 18, 3), + PIN_FIELD_BASE(225, 225, 14, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(226, 226, 14, 0x0000, 0x10, 24, 3), + PIN_FIELD_BASE(227, 227, 14, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(228, 228, 14, 0x0010, 0x10, 0, 3), + PIN_FIELD_BASE(229, 229, 14, 0x0010, 0x10, 3, 3), + PIN_FIELD_BASE(230, 230, 15, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(231, 231, 15, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(232, 232, 15, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(233, 233, 15, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(234, 234, 15, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(235, 235, 15, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(236, 236, 15, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(237, 237, 15, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(238, 238, 15, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(239, 239, 15, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(240, 240, 15, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(241, 241, 15, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(242, 242, 15, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(243, 243, 15, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(244, 244, 15, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(245, 245, 15, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(246, 246, 15, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(247, 247, 15, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(248, 248, 15, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(249, 249, 15, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(250, 250, 15, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(251, 251, 3, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(252, 252, 3, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(253, 253, 3, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(254, 254, 3, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(255, 255, 3, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(256, 256, 3, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(257, 257, 3, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(258, 258, 3, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(259, 259, 14, 0x0010, 0x10, 9, 3), + PIN_FIELD_BASE(260, 260, 14, 0x0010, 0x10, 12, 3), + PIN_FIELD_BASE(261, 261, 14, 0x0010, 0x10, 15, 3), + PIN_FIELD_BASE(262, 262, 14, 0x0010, 0x10, 18, 3), + PIN_FIELD_BASE(263, 263, 14, 0x0010, 0x10, 21, 3), + PIN_FIELD_BASE(264, 264, 14, 0x0010, 0x10, 24, 3), + PIN_FIELD_BASE(265, 265, 14, 0x0010, 0x10, 27, 3), + PIN_FIELD_BASE(266, 266, 14, 0x0020, 0x10, 0, 3), + PIN_FIELD_BASE(267, 267, 15, 0x0000, 0x10, 24, 3), + PIN_FIELD_BASE(268, 268, 15, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(269, 269, 15, 0x0000, 0x10, 18, 3), + PIN_FIELD_BASE(270, 270, 15, 0x0000, 0x10, 21, 3), +}; + +static const struct mtk_pin_field_calc mt8196_pin_drv_adv_range[] = { + PIN_FIELD_BASE(46, 46, 8, 0x0030, 0x10, 0, 3), + PIN_FIELD_BASE(47, 47, 8, 0x0030, 0x10, 9, 3), + PIN_FIELD_BASE(48, 48, 8, 0x0030, 0x10, 3, 3), + PIN_FIELD_BASE(49, 49, 8, 0x0030, 0x10, 12, 3), + PIN_FIELD_BASE(50, 50, 8, 0x0030, 0x10, 6, 3), + PIN_FIELD_BASE(51, 51, 8, 0x0030, 0x10, 15, 3), + PIN_FIELD_BASE(52, 52, 9, 0x0030, 0x10, 0, 3), + PIN_FIELD_BASE(53, 53, 9, 0x0030, 0x10, 3, 3), + PIN_FIELD_BASE(75, 75, 10, 0x0020, 0x10, 0, 5), + PIN_FIELD_BASE(76, 76, 10, 0x0020, 0x10, 5, 5), + PIN_FIELD_BASE(77, 77, 10, 0x0020, 0x10, 10, 5), + PIN_FIELD_BASE(78, 78, 10, 0x0020, 0x10, 15, 5), + PIN_FIELD_BASE(99, 99, 12, 0x0020, 0x10, 0, 3), + PIN_FIELD_BASE(100, 100, 12, 0x0020, 0x10, 9, 3), + PIN_FIELD_BASE(101, 101, 12, 0x0020, 0x10, 3, 3), + PIN_FIELD_BASE(102, 102, 12, 0x0020, 0x10, 12, 3), + PIN_FIELD_BASE(104, 104, 12, 0x0020, 0x10, 6, 3), + PIN_FIELD_BASE(105, 105, 12, 0x0020, 0x10, 15, 3), + PIN_FIELD_BASE(123, 123, 6, 0x0020, 0x10, 0, 3), + PIN_FIELD_BASE(124, 124, 6, 0x0020, 0x10, 3, 3), + PIN_FIELD_BASE(164, 164, 3, 0x0020, 0x10, 0, 3), + PIN_FIELD_BASE(165, 165, 3, 0x0020, 0x10, 6, 3), + PIN_FIELD_BASE(166, 166, 3, 0x0020, 0x10, 3, 3), + PIN_FIELD_BASE(167, 167, 3, 0x0020, 0x10, 9, 3), + PIN_FIELD_BASE(168, 168, 3, 0x0020, 0x10, 12, 3), + PIN_FIELD_BASE(170, 170, 3, 0x0020, 0x10, 15, 3), + PIN_FIELD_BASE(176, 176, 1, 0x0020, 0x10, 0, 3), + PIN_FIELD_BASE(177, 177, 1, 0x0020, 0x10, 3, 3), + PIN_FIELD_BASE(188, 188, 13, 0x0040, 0x10, 0, 3), + PIN_FIELD_BASE(189, 189, 13, 0x0040, 0x10, 15, 3), + PIN_FIELD_BASE(190, 190, 13, 0x0040, 0x10, 3, 3), + PIN_FIELD_BASE(191, 191, 13, 0x0040, 0x10, 18, 3), + PIN_FIELD_BASE(194, 194, 13, 0x0040, 0x10, 6, 3), + PIN_FIELD_BASE(195, 195, 13, 0x0040, 0x10, 21, 3), + PIN_FIELD_BASE(198, 198, 13, 0x0040, 0x10, 9, 3), + PIN_FIELD_BASE(199, 199, 13, 0x0040, 0x10, 24, 3), + PIN_FIELD_BASE(200, 200, 13, 0x0050, 0x10, 0, 3), + PIN_FIELD_BASE(201, 201, 13, 0x0050, 0x10, 9, 3), + PIN_FIELD_BASE(202, 202, 13, 0x0040, 0x10, 12, 3), + PIN_FIELD_BASE(203, 203, 13, 0x0040, 0x10, 27, 3), + PIN_FIELD_BASE(214, 214, 13, 0x0050, 0x10, 3, 3), + PIN_FIELD_BASE(215, 215, 13, 0x0050, 0x10, 6, 3), +}; + +static const struct mtk_pin_field_calc mt8196_pin_rsel_range[] = { + PIN_FIELD_BASE(46, 46, 8, 0x00c0, 0x10, 0, 3), + PIN_FIELD_BASE(47, 47, 8, 0x00c0, 0x10, 9, 3), + PIN_FIELD_BASE(48, 48, 8, 0x00c0, 0x10, 3, 3), + PIN_FIELD_BASE(49, 49, 8, 0x00c0, 0x10, 12, 3), + PIN_FIELD_BASE(50, 50, 8, 0x00c0, 0x10, 6, 3), + PIN_FIELD_BASE(51, 51, 8, 0x00c0, 0x10, 15, 3), + PIN_FIELD_BASE(52, 52, 9, 0x0110, 0x10, 0, 3), + PIN_FIELD_BASE(53, 53, 9, 0x0110, 0x10, 3, 3), + PIN_FIELD_BASE(99, 99, 12, 0x00b0, 0x10, 0, 3), + PIN_FIELD_BASE(100, 100, 12, 0x00b0, 0x10, 9, 3), + PIN_FIELD_BASE(101, 101, 12, 0x00b0, 0x10, 3, 3), + PIN_FIELD_BASE(102, 102, 12, 0x00b0, 0x10, 12, 3), + PIN_FIELD_BASE(104, 104, 12, 0x00b0, 0x10, 6, 3), + PIN_FIELD_BASE(105, 105, 12, 0x00b0, 0x10, 15, 3), + PIN_FIELD_BASE(123, 123, 6, 0x0100, 0x10, 0, 3), + PIN_FIELD_BASE(124, 124, 6, 0x0100, 0x10, 3, 3), + PIN_FIELD_BASE(164, 164, 3, 0x00b0, 0x10, 0, 3), + PIN_FIELD_BASE(165, 165, 3, 0x00b0, 0x10, 6, 3), + PIN_FIELD_BASE(166, 166, 3, 0x00b0, 0x10, 3, 3), + PIN_FIELD_BASE(167, 167, 3, 0x00b0, 0x10, 9, 3), + PIN_FIELD_BASE(168, 168, 3, 0x00b0, 0x10, 12, 3), + PIN_FIELD_BASE(170, 170, 3, 0x00b0, 0x10, 15, 3), + PIN_FIELD_BASE(176, 176, 1, 0x00b0, 0x10, 0, 3), + PIN_FIELD_BASE(177, 177, 1, 0x00b0, 0x10, 3, 3), + PIN_FIELD_BASE(188, 188, 13, 0x00f0, 0x10, 0, 3), + PIN_FIELD_BASE(189, 189, 13, 0x00f0, 0x10, 15, 3), + PIN_FIELD_BASE(190, 190, 13, 0x00f0, 0x10, 3, 3), + PIN_FIELD_BASE(191, 191, 13, 0x00f0, 0x10, 18, 3), + PIN_FIELD_BASE(194, 194, 13, 0x00f0, 0x10, 6, 3), + PIN_FIELD_BASE(195, 195, 13, 0x00f0, 0x10, 21, 3), + PIN_FIELD_BASE(198, 198, 13, 0x00f0, 0x10, 9, 3), + PIN_FIELD_BASE(199, 199, 13, 0x00f0, 0x10, 24, 3), + PIN_FIELD_BASE(200, 200, 13, 0x0100, 0x10, 0, 3), + PIN_FIELD_BASE(201, 201, 13, 0x0100, 0x10, 9, 3), + PIN_FIELD_BASE(202, 202, 13, 0x00f0, 0x10, 12, 3), + PIN_FIELD_BASE(203, 203, 13, 0x00f0, 0x10, 27, 3), + PIN_FIELD_BASE(214, 214, 13, 0x0100, 0x10, 3, 3), + PIN_FIELD_BASE(215, 215, 13, 0x0100, 0x10, 6, 3), +}; + +static const struct mtk_pin_rsel mt8196_pin_rsel_val_range[] = { + PIN_RSEL(46, 53, 0x0, 75000, 75000), + PIN_RSEL(46, 53, 0x1, 10000, 5000), + PIN_RSEL(46, 53, 0x2, 5000, 75000), + PIN_RSEL(46, 53, 0x3, 4000, 5000), + PIN_RSEL(46, 53, 0x4, 3000, 75000), + PIN_RSEL(46, 53, 0x5, 2000, 5000), + PIN_RSEL(46, 53, 0x6, 1500, 75000), + PIN_RSEL(46, 53, 0x7, 1000, 5000), + PIN_RSEL(99, 102, 0x0, 75000, 75000), + PIN_RSEL(99, 102, 0x1, 10000, 5000), + PIN_RSEL(99, 102, 0x2, 5000, 75000), + PIN_RSEL(99, 102, 0x3, 4000, 5000), + PIN_RSEL(99, 102, 0x4, 3000, 75000), + PIN_RSEL(99, 102, 0x5, 2000, 5000), + PIN_RSEL(99, 102, 0x6, 1500, 75000), + PIN_RSEL(99, 102, 0x7, 1000, 5000), + PIN_RSEL(104, 105, 0x0, 75000, 75000), + PIN_RSEL(104, 105, 0x1, 10000, 5000), + PIN_RSEL(104, 105, 0x2, 5000, 75000), + PIN_RSEL(104, 105, 0x3, 4000, 5000), + PIN_RSEL(104, 105, 0x4, 3000, 75000), + PIN_RSEL(104, 105, 0x5, 2000, 5000), + PIN_RSEL(104, 105, 0x6, 1500, 75000), + PIN_RSEL(104, 105, 0x7, 1000, 5000), + PIN_RSEL(123, 124, 0x0, 75000, 75000), + PIN_RSEL(123, 124, 0x1, 10000, 5000), + PIN_RSEL(123, 124, 0x2, 5000, 75000), + PIN_RSEL(123, 124, 0x3, 4000, 5000), + PIN_RSEL(123, 124, 0x4, 3000, 75000), + PIN_RSEL(123, 124, 0x5, 2000, 5000), + PIN_RSEL(123, 124, 0x6, 1500, 75000), + PIN_RSEL(123, 124, 0x7, 1000, 5000), + PIN_RSEL(164, 168, 0x0, 75000, 75000), + PIN_RSEL(164, 168, 0x1, 10000, 5000), + PIN_RSEL(164, 168, 0x2, 5000, 75000), + PIN_RSEL(164, 168, 0x3, 4000, 5000), + PIN_RSEL(164, 168, 0x4, 3000, 75000), + PIN_RSEL(164, 168, 0x5, 2000, 5000), + PIN_RSEL(164, 168, 0x6, 1500, 75000), + PIN_RSEL(164, 168, 0x7, 1000, 5000), + PIN_RSEL(170, 170, 0x0, 75000, 75000), + PIN_RSEL(170, 170, 0x1, 10000, 5000), + PIN_RSEL(170, 170, 0x2, 5000, 75000), + PIN_RSEL(170, 170, 0x3, 4000, 5000), + PIN_RSEL(170, 170, 0x4, 3000, 75000), + PIN_RSEL(170, 170, 0x5, 2000, 5000), + PIN_RSEL(170, 170, 0x6, 1500, 75000), + PIN_RSEL(170, 170, 0x7, 1000, 5000), + PIN_RSEL(176, 177, 0x0, 75000, 75000), + PIN_RSEL(176, 177, 0x1, 10000, 5000), + PIN_RSEL(176, 177, 0x2, 5000, 75000), + PIN_RSEL(176, 177, 0x3, 4000, 5000), + PIN_RSEL(176, 177, 0x4, 3000, 75000), + PIN_RSEL(176, 177, 0x5, 2000, 5000), + PIN_RSEL(176, 177, 0x6, 1500, 75000), + PIN_RSEL(176, 177, 0x7, 1000, 5000), + PIN_RSEL(188, 191, 0x0, 75000, 75000), + PIN_RSEL(188, 191, 0x1, 10000, 5000), + PIN_RSEL(188, 191, 0x2, 5000, 75000), + PIN_RSEL(188, 191, 0x3, 4000, 5000), + PIN_RSEL(188, 191, 0x4, 3000, 75000), + PIN_RSEL(188, 191, 0x5, 2000, 5000), + PIN_RSEL(188, 191, 0x6, 1500, 75000), + PIN_RSEL(188, 191, 0x7, 1000, 5000), + PIN_RSEL(194, 195, 0x0, 75000, 75000), + PIN_RSEL(194, 195, 0x1, 10000, 5000), + PIN_RSEL(194, 195, 0x2, 5000, 75000), + PIN_RSEL(194, 195, 0x3, 4000, 5000), + PIN_RSEL(194, 195, 0x4, 3000, 75000), + PIN_RSEL(194, 195, 0x5, 2000, 5000), + PIN_RSEL(194, 195, 0x6, 1500, 75000), + PIN_RSEL(194, 195, 0x7, 1000, 5000), + PIN_RSEL(198, 203, 0x0, 75000, 75000), + PIN_RSEL(198, 203, 0x1, 10000, 5000), + PIN_RSEL(198, 203, 0x2, 5000, 75000), + PIN_RSEL(198, 203, 0x3, 4000, 5000), + PIN_RSEL(198, 203, 0x4, 3000, 75000), + PIN_RSEL(198, 203, 0x5, 2000, 5000), + PIN_RSEL(198, 203, 0x6, 1500, 75000), + PIN_RSEL(198, 203, 0x7, 1000, 5000), + PIN_RSEL(214, 215, 0x0, 75000, 75000), + PIN_RSEL(214, 215, 0x1, 10000, 5000), + PIN_RSEL(214, 215, 0x2, 5000, 75000), + PIN_RSEL(214, 215, 0x3, 4000, 5000), + PIN_RSEL(214, 215, 0x4, 3000, 75000), + PIN_RSEL(214, 215, 0x5, 2000, 5000), + PIN_RSEL(214, 215, 0x6, 1500, 75000), + PIN_RSEL(214, 215, 0x7, 1000, 5000), +}; + +static const unsigned int mt8196_pull_type[] = { + MTK_PULL_PU_PD_TYPE,/*0*/ MTK_PULL_PU_PD_TYPE,/*1*/ + MTK_PULL_PU_PD_TYPE,/*2*/ MTK_PULL_PU_PD_TYPE,/*3*/ + MTK_PULL_PU_PD_TYPE,/*4*/ MTK_PULL_PU_PD_TYPE,/*5*/ + MTK_PULL_PU_PD_TYPE,/*6*/ MTK_PULL_PU_PD_TYPE,/*7*/ + MTK_PULL_PU_PD_TYPE,/*8*/ MTK_PULL_PU_PD_TYPE,/*9*/ + MTK_PULL_PU_PD_TYPE,/*10*/ MTK_PULL_PU_PD_TYPE,/*11*/ + MTK_PULL_PU_PD_TYPE,/*12*/ MTK_PULL_PU_PD_TYPE,/*13*/ + MTK_PULL_PU_PD_TYPE,/*14*/ MTK_PULL_PU_PD_TYPE,/*15*/ + MTK_PULL_PU_PD_TYPE,/*16*/ MTK_PULL_PU_PD_TYPE,/*17*/ + MTK_PULL_PU_PD_TYPE,/*18*/ MTK_PULL_PU_PD_TYPE,/*19*/ + MTK_PULL_PU_PD_TYPE,/*20*/ MTK_PULL_PU_PD_TYPE,/*21*/ + MTK_PULL_PU_PD_TYPE,/*22*/ MTK_PULL_PU_PD_TYPE,/*23*/ + MTK_PULL_PU_PD_TYPE,/*24*/ MTK_PULL_PU_PD_TYPE,/*25*/ + MTK_PULL_PU_PD_TYPE,/*26*/ MTK_PULL_PU_PD_TYPE,/*27*/ + MTK_PULL_PU_PD_TYPE,/*28*/ MTK_PULL_PU_PD_TYPE,/*29*/ + MTK_PULL_PU_PD_TYPE,/*30*/ MTK_PULL_PU_PD_TYPE,/*31*/ + MTK_PULL_PU_PD_TYPE,/*32*/ MTK_PULL_PU_PD_TYPE,/*33*/ + MTK_PULL_PU_PD_TYPE,/*34*/ MTK_PULL_PU_PD_TYPE,/*35*/ + MTK_PULL_PU_PD_TYPE,/*36*/ MTK_PULL_PU_PD_TYPE,/*37*/ + MTK_PULL_PU_PD_TYPE,/*38*/ MTK_PULL_PU_PD_TYPE,/*39*/ + MTK_PULL_PU_PD_TYPE,/*40*/ MTK_PULL_PU_PD_TYPE,/*41*/ + MTK_PULL_PU_PD_TYPE,/*42*/ MTK_PULL_PU_PD_TYPE,/*43*/ + MTK_PULL_PU_PD_TYPE,/*44*/ MTK_PULL_PU_PD_TYPE,/*45*/ + MTK_PULL_PU_PD_RSEL_TYPE,/*46*/ MTK_PULL_PU_PD_RSEL_TYPE,/*47*/ + MTK_PULL_PU_PD_RSEL_TYPE,/*48*/ MTK_PULL_PU_PD_RSEL_TYPE,/*49*/ + MTK_PULL_PU_PD_RSEL_TYPE,/*50*/ MTK_PULL_PU_PD_RSEL_TYPE,/*51*/ + MTK_PULL_PU_PD_RSEL_TYPE,/*52*/ MTK_PULL_PU_PD_RSEL_TYPE,/*53*/ + MTK_PULL_PU_PD_TYPE,/*54*/ MTK_PULL_PU_PD_TYPE,/*55*/ + MTK_PULL_PU_PD_TYPE,/*56*/ MTK_PULL_PU_PD_TYPE,/*57*/ + MTK_PULL_PU_PD_TYPE,/*58*/ MTK_PULL_PU_PD_TYPE,/*59*/ + MTK_PULL_PUPD_R1R0_TYPE,/*60*/ MTK_PULL_PU_PD_TYPE,/*61*/ + MTK_PULL_PU_PD_TYPE,/*62*/ MTK_PULL_PU_PD_TYPE,/*63*/ + MTK_PULL_PU_PD_TYPE,/*64*/ MTK_PULL_PU_PD_TYPE,/*65*/ + MTK_PULL_PU_PD_TYPE,/*66*/ MTK_PULL_PU_PD_TYPE,/*67*/ + MTK_PULL_PU_PD_TYPE,/*68*/ MTK_PULL_PU_PD_TYPE,/*69*/ + MTK_PULL_PU_PD_TYPE,/*70*/ MTK_PULL_PU_PD_TYPE,/*71*/ + MTK_PULL_PU_PD_TYPE,/*72*/ MTK_PULL_PU_PD_TYPE,/*73*/ + MTK_PULL_PU_PD_TYPE,/*74*/ MTK_PULL_PU_PD_TYPE,/*75*/ + MTK_PULL_PU_PD_TYPE,/*76*/ MTK_PULL_PU_PD_TYPE,/*77*/ + MTK_PULL_PU_PD_TYPE,/*78*/ MTK_PULL_PU_PD_TYPE,/*79*/ + MTK_PULL_PU_PD_TYPE,/*80*/ MTK_PULL_PU_PD_TYPE,/*81*/ + MTK_PULL_PU_PD_TYPE,/*82*/ MTK_PULL_PU_PD_TYPE,/*83*/ + MTK_PULL_PU_PD_TYPE,/*84*/ MTK_PULL_PU_PD_TYPE,/*85*/ + MTK_PULL_PU_PD_TYPE,/*86*/ MTK_PULL_PU_PD_TYPE,/*87*/ + MTK_PULL_PU_PD_TYPE,/*88*/ MTK_PULL_PU_PD_TYPE,/*89*/ + MTK_PULL_PU_PD_TYPE,/*90*/ MTK_PULL_PU_PD_TYPE,/*91*/ + MTK_PULL_PU_PD_TYPE,/*92*/ MTK_PULL_PU_PD_TYPE,/*93*/ + MTK_PULL_PU_PD_TYPE,/*94*/ MTK_PULL_PU_PD_TYPE,/*95*/ + MTK_PULL_PU_PD_TYPE,/*96*/ MTK_PULL_PU_PD_TYPE,/*97*/ + MTK_PULL_PU_PD_TYPE,/*98*/ MTK_PULL_PU_PD_RSEL_TYPE,/*99*/ + MTK_PULL_PU_PD_RSEL_TYPE,/*100*/ MTK_PULL_PU_PD_RSEL_TYPE,/*101*/ + MTK_PULL_PU_PD_RSEL_TYPE,/*102*/ MTK_PULL_PU_PD_TYPE,/*103*/ + MTK_PULL_PU_PD_RSEL_TYPE,/*104*/ MTK_PULL_PU_PD_RSEL_TYPE,/*105*/ + MTK_PULL_PU_PD_TYPE,/*106*/ MTK_PULL_PU_PD_TYPE,/*107*/ + MTK_PULL_PU_PD_TYPE,/*108*/ MTK_PULL_PU_PD_TYPE,/*109*/ + MTK_PULL_PU_PD_TYPE,/*110*/ MTK_PULL_PU_PD_TYPE,/*111*/ + MTK_PULL_PU_PD_TYPE,/*112*/ MTK_PULL_PU_PD_TYPE,/*113*/ + MTK_PULL_PU_PD_TYPE,/*114*/ MTK_PULL_PU_PD_TYPE,/*115*/ + MTK_PULL_PU_PD_TYPE,/*116*/ MTK_PULL_PU_PD_TYPE,/*117*/ + MTK_PULL_PU_PD_TYPE,/*118*/ MTK_PULL_PU_PD_TYPE,/*119*/ + MTK_PULL_PU_PD_TYPE,/*120*/ MTK_PULL_PU_PD_TYPE,/*121*/ + MTK_PULL_PU_PD_TYPE,/*122*/ MTK_PULL_PU_PD_RSEL_TYPE,/*123*/ + MTK_PULL_PU_PD_RSEL_TYPE,/*124*/ MTK_PULL_PUPD_R1R0_TYPE,/*125*/ + MTK_PULL_PUPD_R1R0_TYPE,/*126*/ MTK_PULL_PUPD_R1R0_TYPE,/*127*/ + MTK_PULL_PUPD_R1R0_TYPE,/*128*/ MTK_PULL_PUPD_R1R0_TYPE,/*129*/ + MTK_PULL_PUPD_R1R0_TYPE,/*130*/ MTK_PULL_PUPD_R1R0_TYPE,/*131*/ + MTK_PULL_PUPD_R1R0_TYPE,/*132*/ MTK_PULL_PUPD_R1R0_TYPE,/*133*/ + MTK_PULL_PUPD_R1R0_TYPE,/*134*/ MTK_PULL_PUPD_R1R0_TYPE,/*135*/ + MTK_PULL_PUPD_R1R0_TYPE,/*136*/ MTK_PULL_PUPD_R1R0_TYPE,/*137*/ + MTK_PULL_PUPD_R1R0_TYPE,/*138*/ MTK_PULL_PUPD_R1R0_TYPE,/*139*/ + MTK_PULL_PUPD_R1R0_TYPE,/*140*/ MTK_PULL_PUPD_R1R0_TYPE,/*141*/ + MTK_PULL_PUPD_R1R0_TYPE,/*142*/ MTK_PULL_PUPD_R1R0_TYPE,/*143*/ + MTK_PULL_PUPD_R1R0_TYPE,/*144*/ MTK_PULL_PUPD_R1R0_TYPE,/*145*/ + MTK_PULL_PUPD_R1R0_TYPE,/*146*/ MTK_PULL_PUPD_R1R0_TYPE,/*147*/ + MTK_PULL_PUPD_R1R0_TYPE,/*148*/ MTK_PULL_PUPD_R1R0_TYPE,/*149*/ + MTK_PULL_PUPD_R1R0_TYPE,/*150*/ MTK_PULL_PUPD_R1R0_TYPE,/*151*/ + MTK_PULL_PUPD_R1R0_TYPE,/*152*/ MTK_PULL_PUPD_R1R0_TYPE,/*153*/ + MTK_PULL_PUPD_R1R0_TYPE,/*154*/ MTK_PULL_PUPD_R1R0_TYPE,/*155*/ + MTK_PULL_PUPD_R1R0_TYPE,/*156*/ MTK_PULL_PU_PD_TYPE,/*157*/ + MTK_PULL_PU_PD_TYPE,/*158*/ MTK_PULL_PU_PD_TYPE,/*159*/ + MTK_PULL_PU_PD_TYPE,/*160*/ MTK_PULL_PU_PD_TYPE,/*161*/ + MTK_PULL_PU_PD_TYPE,/*162*/ MTK_PULL_PU_PD_TYPE,/*163*/ + MTK_PULL_PU_PD_RSEL_TYPE,/*164*/ MTK_PULL_PU_PD_RSEL_TYPE,/*165*/ + MTK_PULL_PU_PD_RSEL_TYPE,/*166*/ MTK_PULL_PU_PD_RSEL_TYPE,/*167*/ + MTK_PULL_PU_PD_RSEL_TYPE,/*168*/ MTK_PULL_PU_PD_TYPE,/*169*/ + MTK_PULL_PU_PD_RSEL_TYPE,/*170*/ MTK_PULL_PU_PD_TYPE,/*171*/ + MTK_PULL_PU_PD_TYPE,/*172*/ MTK_PULL_PU_PD_TYPE,/*173*/ + MTK_PULL_PU_PD_TYPE,/*174*/ MTK_PULL_PU_PD_TYPE,/*175*/ + MTK_PULL_PU_PD_RSEL_TYPE,/*176*/ MTK_PULL_PU_PD_RSEL_TYPE,/*177*/ + MTK_PULL_PU_PD_TYPE,/*178*/ MTK_PULL_PU_PD_TYPE,/*179*/ + MTK_PULL_PU_PD_TYPE,/*180*/ MTK_PULL_PU_PD_TYPE,/*181*/ + MTK_PULL_PU_PD_TYPE,/*182*/ MTK_PULL_PU_PD_TYPE,/*183*/ + MTK_PULL_PU_PD_TYPE,/*184*/ MTK_PULL_PU_PD_TYPE,/*185*/ + MTK_PULL_PU_PD_TYPE,/*186*/ MTK_PULL_PU_PD_TYPE,/*187*/ + MTK_PULL_PU_PD_RSEL_TYPE,/*188*/ MTK_PULL_PU_PD_RSEL_TYPE,/*189*/ + MTK_PULL_PU_PD_RSEL_TYPE,/*190*/ MTK_PULL_PU_PD_RSEL_TYPE,/*191*/ + MTK_PULL_PU_PD_TYPE,/*192*/ MTK_PULL_PU_PD_TYPE,/*193*/ + MTK_PULL_PU_PD_RSEL_TYPE,/*194*/ MTK_PULL_PU_PD_RSEL_TYPE,/*195*/ + MTK_PULL_PU_PD_TYPE,/*196*/ MTK_PULL_PU_PD_TYPE,/*197*/ + MTK_PULL_PU_PD_RSEL_TYPE,/*198*/ MTK_PULL_PU_PD_RSEL_TYPE,/*199*/ + MTK_PULL_PU_PD_RSEL_TYPE,/*200*/ MTK_PULL_PU_PD_RSEL_TYPE,/*201*/ + MTK_PULL_PU_PD_RSEL_TYPE,/*202*/ MTK_PULL_PU_PD_RSEL_TYPE,/*203*/ + MTK_PULL_PU_PD_TYPE,/*204*/ MTK_PULL_PU_PD_TYPE,/*205*/ + MTK_PULL_PU_PD_TYPE,/*206*/ MTK_PULL_PU_PD_TYPE,/*207*/ + MTK_PULL_PU_PD_TYPE,/*208*/ MTK_PULL_PU_PD_TYPE,/*209*/ + MTK_PULL_PU_PD_TYPE,/*210*/ MTK_PULL_PU_PD_TYPE,/*211*/ + MTK_PULL_PU_PD_TYPE,/*212*/ MTK_PULL_PU_PD_TYPE,/*213*/ + MTK_PULL_PU_PD_RSEL_TYPE,/*214*/ MTK_PULL_PU_PD_RSEL_TYPE,/*215*/ + MTK_PULL_PU_PD_TYPE,/*216*/ MTK_PULL_PUPD_R1R0_TYPE,/*217*/ + MTK_PULL_PUPD_R1R0_TYPE,/*218*/ MTK_PULL_PUPD_R1R0_TYPE,/*219*/ + MTK_PULL_PU_PD_TYPE,/*220*/ MTK_PULL_PU_PD_TYPE,/*221*/ + MTK_PULL_PU_PD_TYPE,/*222*/ MTK_PULL_PU_PD_TYPE,/*223*/ + MTK_PULL_PUPD_R1R0_TYPE,/*224*/ MTK_PULL_PUPD_R1R0_TYPE,/*225*/ + MTK_PULL_PUPD_R1R0_TYPE,/*226*/ MTK_PULL_PUPD_R1R0_TYPE,/*227*/ + MTK_PULL_PUPD_R1R0_TYPE,/*228*/ MTK_PULL_PUPD_R1R0_TYPE,/*229*/ + MTK_PULL_PU_PD_TYPE,/*230*/ MTK_PULL_PU_PD_TYPE,/*231*/ + MTK_PULL_PU_PD_TYPE,/*232*/ MTK_PULL_PU_PD_TYPE,/*233*/ + MTK_PULL_PU_PD_TYPE,/*234*/ MTK_PULL_PU_PD_TYPE,/*235*/ + MTK_PULL_PU_PD_TYPE,/*236*/ MTK_PULL_PU_PD_TYPE,/*237*/ + MTK_PULL_PU_PD_TYPE,/*238*/ MTK_PULL_PU_PD_TYPE,/*239*/ + MTK_PULL_PU_PD_TYPE,/*240*/ MTK_PULL_PU_PD_TYPE,/*241*/ + MTK_PULL_PU_PD_TYPE,/*242*/ MTK_PULL_PU_PD_TYPE,/*243*/ + MTK_PULL_PU_PD_TYPE,/*244*/ MTK_PULL_PU_PD_TYPE,/*245*/ + MTK_PULL_PU_PD_TYPE,/*246*/ MTK_PULL_PU_PD_TYPE,/*247*/ + MTK_PULL_PU_PD_TYPE,/*248*/ MTK_PULL_PU_PD_TYPE,/*249*/ + MTK_PULL_PU_PD_TYPE,/*250*/ MTK_PULL_PU_PD_TYPE,/*251*/ + MTK_PULL_PU_PD_TYPE,/*252*/ MTK_PULL_PU_PD_TYPE,/*253*/ + MTK_PULL_PU_PD_TYPE,/*254*/ MTK_PULL_PU_PD_TYPE,/*255*/ + MTK_PULL_PU_PD_TYPE,/*256*/ MTK_PULL_PU_PD_TYPE,/*257*/ + MTK_PULL_PU_PD_TYPE,/*258*/ MTK_PULL_PUPD_R1R0_TYPE,/*259*/ + MTK_PULL_PUPD_R1R0_TYPE,/*260*/ MTK_PULL_PUPD_R1R0_TYPE,/*261*/ + MTK_PULL_PUPD_R1R0_TYPE,/*262*/ MTK_PULL_PUPD_R1R0_TYPE,/*263*/ + MTK_PULL_PUPD_R1R0_TYPE,/*264*/ MTK_PULL_PUPD_R1R0_TYPE,/*265*/ + MTK_PULL_PUPD_R1R0_TYPE,/*266*/ MTK_PULL_PUPD_R1R0_TYPE,/*267*/ + MTK_PULL_PUPD_R1R0_TYPE,/*268*/ MTK_PULL_PUPD_R1R0_TYPE,/*269*/ + MTK_PULL_PUPD_R1R0_TYPE,/*270*/ +}; + +static const struct mtk_pin_reg_calc mt8196_reg_cals[PINCTRL_PIN_REG_MAX] = { + [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt8196_pin_mode_range), + [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8196_pin_dir_range), + [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8196_pin_di_range), + [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8196_pin_do_range), + [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8196_pin_smt_range), + [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8196_pin_ies_range), + [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt8196_pin_pupd_range), + [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt8196_pin_r0_range), + [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt8196_pin_r1_range), + [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8196_pin_pu_range), + [PINCTRL_PIN_REG_PD] = MTK_RANGE(mt8196_pin_pd_range), + [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt8196_pin_drv_range), + [PINCTRL_PIN_REG_DRV_ADV] = MTK_RANGE(mt8196_pin_drv_adv_range), + [PINCTRL_PIN_REG_RSEL] = MTK_RANGE(mt8196_pin_rsel_range), +}; + +static const char * const mt8196_pinctrl_register_base_names[] = { + "iocfg0", "iocfg_rt", "iocfg_rm1", "iocfg_rm2", + "iocfg_rb", "iocfg_bm1", "iocfg_bm2", "iocfg_bm3", + "iocfg_lt", "iocfg_lm1", "iocfg_lm2", "iocfg_lb1", + "iocfg_lb2", "iocfg_tm1", "iocfg_tm2", "iocfg_tm3", +}; + +static const struct mtk_eint_hw mt8196_eint_hw = { + .port_mask = 0xf, + .ports = 3, + .ap_num = 293, + .db_cnt = 32, + .db_time = debounce_time_mt6765, +}; + +static const struct mtk_pin_soc mt8196_data = { + .reg_cal = mt8196_reg_cals, + .pins = mtk_pins_mt8196, + .npins = ARRAY_SIZE(mtk_pins_mt8196), + .ngrps = ARRAY_SIZE(mtk_pins_mt8196), + .eint_hw = &mt8196_eint_hw, + .eint_pin = eint_pins_mt8196, + .nfuncs = 8, + .gpio_m = 0, + .base_names = mt8196_pinctrl_register_base_names, + .nbase_names = ARRAY_SIZE(mt8196_pinctrl_register_base_names), + .pull_type = mt8196_pull_type, + .pin_rsel = mt8196_pin_rsel_val_range, + .npin_rsel = ARRAY_SIZE(mt8196_pin_rsel_val_range), + .bias_set_combo = mtk_pinconf_bias_set_combo, + .bias_get_combo = mtk_pinconf_bias_get_combo, + .drive_set = mtk_pinconf_drive_set_rev1, + .drive_get = mtk_pinconf_drive_get_rev1, + .adv_drive_get = mtk_pinconf_adv_drive_get_raw, + .adv_drive_set = mtk_pinconf_adv_drive_set_raw, +}; + +static const struct of_device_id mt8196_pinctrl_of_match[] = { + { .compatible = "mediatek,mt8196-pinctrl", .data = &mt8196_data }, + { /* sentinel */ } +}; + +static struct platform_driver mt8196_pinctrl_driver = { + .driver = { + .name = "mt8196-pinctrl", + .of_match_table = mt8196_pinctrl_of_match, + .pm = pm_sleep_ptr(&mtk_paris_pinctrl_pm_ops), + }, + .probe = mtk_paris_pinctrl_probe, +}; + +static int __init mt8196_pinctrl_init(void) +{ + return platform_driver_register(&mt8196_pinctrl_driver); +} +arch_initcall(mt8196_pinctrl_init); + +MODULE_DESCRIPTION("MediaTek MT8196 Pinctrl Driver"); diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c index d1556b75d9ef..4918d38abfc2 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c @@ -381,10 +381,13 @@ int mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev) return -ENOMEM; count_reg_names = of_property_count_strings(np, "reg-names"); - if (count_reg_names < hw->soc->nbase_names) + if (count_reg_names < 0) + return -EINVAL; + + hw->eint->nbase = count_reg_names - (int)hw->soc->nbase_names; + if (hw->eint->nbase <= 0) return -EINVAL; - hw->eint->nbase = count_reg_names - hw->soc->nbase_names; hw->eint->base = devm_kmalloc_array(&pdev->dev, hw->eint->nbase, sizeof(*hw->eint->base), GFP_KERNEL | __GFP_ZERO); if (!hw->eint->base) { @@ -416,7 +419,7 @@ int mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev) hw->eint->pctl = hw; hw->eint->gpio_xlate = &mtk_eint_xt; - ret = mtk_eint_do_init(hw->eint); + ret = mtk_eint_do_init(hw->eint, hw->soc->eint_pin); if (ret) goto err_free_eint; diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index 8596f3541265..a4cb6d511fcd 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c @@ -86,7 +86,7 @@ static int mtk_pmx_gpio_set_direction(struct pinctrl_dev *pctldev, return 0; } -static void mtk_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +static int mtk_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { unsigned int reg_addr; unsigned int bit; @@ -100,7 +100,7 @@ static void mtk_gpio_set(struct gpio_chip *chip, unsigned offset, int value) else reg_addr = CLR_ADDR(reg_addr, pctl); - regmap_write(mtk_get_regmap(pctl, offset), reg_addr, bit); + return regmap_write(mtk_get_regmap(pctl, offset), reg_addr, bit); } static int mtk_pconf_set_ies_smt(struct mtk_pinctrl *pctl, unsigned pin, @@ -809,7 +809,12 @@ static const struct pinmux_ops mtk_pmx_ops = { static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { - mtk_gpio_set(chip, offset, value); + int ret; + + ret = mtk_gpio_set(chip, offset, value); + if (ret) + return ret; + return pinctrl_gpio_direction_output(chip, offset); } @@ -893,7 +898,7 @@ static const struct gpio_chip mtk_gpio_chip = { .direction_input = pinctrl_gpio_direction_input, .direction_output = mtk_gpio_direction_output, .get = mtk_gpio_get, - .set = mtk_gpio_set, + .set_rv = mtk_gpio_set, .to_irq = mtk_gpio_to_irq, .set_config = mtk_gpio_set_config, }; @@ -1039,7 +1044,7 @@ static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev) pctl->eint->pctl = pctl; pctl->eint->gpio_xlate = &mtk_eint_xt; - return mtk_eint_do_init(pctl->eint); + return mtk_eint_do_init(pctl->eint, NULL); } /* This is used as a common probe function */ diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt6893.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt6893.h new file mode 100644 index 000000000000..0d3bb16411f8 --- /dev/null +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt6893.h @@ -0,0 +1,2283 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 MediaTek Inc. + * Copyright (C) 2024 Collabora Ltd. + * + * Author: Andy Teng <andy.teng@mediatek.com> + */ + +#ifndef __PINCTRL_MTK_MT6893_H +#define __PINCTRL_MTK_MT6893_H + +#include "pinctrl-paris.h" + +static const struct mtk_pin_desc mtk_pins_mt6893[] = { + MTK_PIN( + 0, "GPIO0", + MTK_EINT_FUNCTION(0, 0), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO0"), + MTK_FUNCTION(1, "SPI6_CLK"), + MTK_FUNCTION(2, "I2S5_MCK"), + MTK_FUNCTION(3, "PWM_0"), + MTK_FUNCTION(4, "MD_INT0"), + MTK_FUNCTION(5, "TP_GPIO0_AO") + ), + MTK_PIN( + 1, "GPIO1", + MTK_EINT_FUNCTION(0, 1), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO1"), + MTK_FUNCTION(1, "SPI6_CSB"), + MTK_FUNCTION(2, "I2S5_BCK"), + MTK_FUNCTION(3, "PWM_1"), + MTK_FUNCTION(4, "MD_INT1_C2K_UIM0_HOT_PLUG"), + MTK_FUNCTION(5, "TP_GPIO1_AO") + ), + MTK_PIN( + 2, "GPIO2", + MTK_EINT_FUNCTION(0, 2), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO2"), + MTK_FUNCTION(1, "SPI6_MI"), + MTK_FUNCTION(2, "I2S5_LRCK"), + MTK_FUNCTION(3, "PWM_2"), + MTK_FUNCTION(4, "MD_INT2_C2K_UIM1_HOT_PLUG"), + MTK_FUNCTION(5, "TP_GPIO2_AO") + ), + MTK_PIN( + 3, "GPIO3", + MTK_EINT_FUNCTION(0, 3), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO3"), + MTK_FUNCTION(1, "SPI6_MO"), + MTK_FUNCTION(2, "I2S5_DO"), + MTK_FUNCTION(3, "PWM_3"), + MTK_FUNCTION(4, "CLKM0"), + MTK_FUNCTION(5, "TP_GPIO3_AO") + ), + MTK_PIN( + 4, "GPIO4", + MTK_EINT_FUNCTION(0, 4), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO4"), + MTK_FUNCTION(1, "SPI7_A_CLK"), + MTK_FUNCTION(2, "I2S2_MCK"), + MTK_FUNCTION(3, "DMIC1_CLK"), + MTK_FUNCTION(4, "PCM1_DI"), + MTK_FUNCTION(5, "TP_GPIO4_AO") + ), + MTK_PIN( + 5, "GPIO5", + MTK_EINT_FUNCTION(0, 5), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO5"), + MTK_FUNCTION(1, "SPI7_A_CSB"), + MTK_FUNCTION(2, "I2S2_BCK"), + MTK_FUNCTION(3, "DMIC1_DAT"), + MTK_FUNCTION(4, "PCM1_CLK"), + MTK_FUNCTION(5, "TP_GPIO5_AO") + ), + MTK_PIN( + 6, "GPIO6", + MTK_EINT_FUNCTION(0, 6), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO6"), + MTK_FUNCTION(1, "SPI7_A_MI"), + MTK_FUNCTION(2, "I2S2_LRCK"), + MTK_FUNCTION(3, "DMIC_CLK"), + MTK_FUNCTION(4, "PCM1_SYNC"), + MTK_FUNCTION(5, "TP_GPIO6_AO"), + MTK_FUNCTION(6, "CONN_TCXOENA_REQ") + ), + MTK_PIN( + 7, "GPIO7", + MTK_EINT_FUNCTION(0, 7), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO7"), + MTK_FUNCTION(1, "SPI7_A_MO"), + MTK_FUNCTION(2, "I2S2_DI"), + MTK_FUNCTION(3, "DMIC_DAT"), + MTK_FUNCTION(4, "PCM1_DO0"), + MTK_FUNCTION(5, "TP_GPIO7_AO"), + MTK_FUNCTION(6, "WIFI_TXD") + ), + MTK_PIN( + 8, "GPIO8", + MTK_EINT_FUNCTION(0, 8), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO8"), + MTK_FUNCTION(1, "SRCLKENAI1"), + MTK_FUNCTION(2, "I2S2_DI2"), + MTK_FUNCTION(3, "KPCOL2"), + MTK_FUNCTION(4, "PCM1_DO1"), + MTK_FUNCTION(5, "CLKM1"), + MTK_FUNCTION(6, "CONN_BT_TXD") + ), + MTK_PIN( + 9, "GPIO9", + MTK_EINT_FUNCTION(0, 9), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO9"), + MTK_FUNCTION(1, "SRCLKENAI0"), + MTK_FUNCTION(2, "DVFSRC_EXT_REQ"), + MTK_FUNCTION(3, "KPROW2"), + MTK_FUNCTION(4, "PCM1_DO2"), + MTK_FUNCTION(5, "CLKM3"), + MTK_FUNCTION(6, "CMMCLK4") + ), + MTK_PIN( + 10, "GPIO10", + MTK_EINT_FUNCTION(0, 10), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO10"), + MTK_FUNCTION(1, "MSDC1_CLK_A"), + MTK_FUNCTION(2, "SPI4_B_CLK"), + MTK_FUNCTION(3, "I2S8_MCK"), + MTK_FUNCTION(4, "DSI1_TE"), + MTK_FUNCTION(5, "MD_INT0"), + MTK_FUNCTION(6, "TP_GPIO0_AO") + ), + MTK_PIN( + 11, "GPIO11", + MTK_EINT_FUNCTION(0, 11), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO11"), + MTK_FUNCTION(1, "MSDC1_CMD_A"), + MTK_FUNCTION(2, "SPI4_B_CSB"), + MTK_FUNCTION(3, "I2S8_BCK"), + MTK_FUNCTION(4, "LCM1_RST"), + MTK_FUNCTION(5, "MD_INT1_C2K_UIM0_HOT_PLUG"), + MTK_FUNCTION(6, "TP_GPIO1_AO") + ), + MTK_PIN( + 12, "GPIO12", + MTK_EINT_FUNCTION(0, 12), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO12"), + MTK_FUNCTION(1, "MSDC1_DAT3_A"), + MTK_FUNCTION(2, "SPI4_B_MI"), + MTK_FUNCTION(3, "I2S8_LRCK"), + MTK_FUNCTION(4, "DMIC1_CLK"), + MTK_FUNCTION(5, "MD_INT2_C2K_UIM1_HOT_PLUG"), + MTK_FUNCTION(6, "TP_GPIO2_AO") + ), + MTK_PIN( + 13, "GPIO13", + MTK_EINT_FUNCTION(0, 13), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO13"), + MTK_FUNCTION(1, "MSDC1_DAT0_A"), + MTK_FUNCTION(2, "SPI4_B_MO"), + MTK_FUNCTION(3, "I2S8_DI"), + MTK_FUNCTION(4, "DMIC1_DAT"), + MTK_FUNCTION(5, "ANT_SEL10"), + MTK_FUNCTION(6, "TP_GPIO3_AO") + ), + MTK_PIN( + 14, "GPIO14", + MTK_EINT_FUNCTION(0, 14), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO14"), + MTK_FUNCTION(1, "MSDC1_DAT2_A"), + MTK_FUNCTION(2, "SPI5_C_CLK"), + MTK_FUNCTION(3, "I2S9_MCK"), + MTK_FUNCTION(4, "IDDIG"), + MTK_FUNCTION(5, "ANT_SEL11"), + MTK_FUNCTION(6, "TP_GPIO4_AO") + ), + MTK_PIN( + 15, "GPIO15", + MTK_EINT_FUNCTION(0, 15), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO15"), + MTK_FUNCTION(1, "MSDC1_DAT1_A"), + MTK_FUNCTION(2, "SPI5_C_CSB"), + MTK_FUNCTION(3, "I2S9_BCK"), + MTK_FUNCTION(4, "USB_DRVVBUS"), + MTK_FUNCTION(5, "ANT_SEL12"), + MTK_FUNCTION(6, "TP_GPIO5_AO") + ), + MTK_PIN( + 16, "GPIO16", + MTK_EINT_FUNCTION(0, 16), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO16"), + MTK_FUNCTION(1, "SRCLKENAI1"), + MTK_FUNCTION(2, "SPI5_C_MI"), + MTK_FUNCTION(3, "I2S9_LRCK"), + MTK_FUNCTION(4, "KPCOL2"), + MTK_FUNCTION(5, "GPS_L1_ELNA_EN"), + MTK_FUNCTION(6, "TP_GPIO6_AO"), + MTK_FUNCTION(7, "DBG_MON_A30") + ), + MTK_PIN( + 17, "GPIO17", + MTK_EINT_FUNCTION(0, 17), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO17"), + MTK_FUNCTION(1, "SRCLKENAI0"), + MTK_FUNCTION(2, "SPI5_C_MO"), + MTK_FUNCTION(3, "I2S9_DO"), + MTK_FUNCTION(4, "KPROW2"), + MTK_FUNCTION(5, "GPS_L5_ELNA_EN"), + MTK_FUNCTION(6, "TP_GPIO7_AO"), + MTK_FUNCTION(7, "DBG_MON_A31") + ), + MTK_PIN( + 18, "GPIO18", + MTK_EINT_FUNCTION(0, 18), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO18"), + MTK_FUNCTION(1, "DP_TX_HPD"), + MTK_FUNCTION(2, "SPI4_C_MI"), + MTK_FUNCTION(3, "SPI1_B_MI"), + MTK_FUNCTION(4, "GPS_L1_ELNA_EN"), + MTK_FUNCTION(5, "ANT_SEL10"), + MTK_FUNCTION(6, "MD_INT0") + ), + MTK_PIN( + 19, "GPIO19", + MTK_EINT_FUNCTION(0, 19), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO19"), + MTK_FUNCTION(1, "SRCLKENAI1"), + MTK_FUNCTION(2, "SPI4_C_MO"), + MTK_FUNCTION(3, "SPI1_B_MO"), + MTK_FUNCTION(4, "GPS_L5_ELNA_EN"), + MTK_FUNCTION(5, "ANT_SEL11"), + MTK_FUNCTION(6, "MD_INT1_C2K_UIM0_HOT_PLUG") + ), + MTK_PIN( + 20, "GPIO20", + MTK_EINT_FUNCTION(0, 20), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO20"), + MTK_FUNCTION(1, "SRCLKENAI0"), + MTK_FUNCTION(2, "SPI4_C_CLK"), + MTK_FUNCTION(3, "SPI1_B_CLK"), + MTK_FUNCTION(4, "PWM_3"), + MTK_FUNCTION(5, "ANT_SEL12"), + MTK_FUNCTION(6, "MD_INT2_C2K_UIM1_HOT_PLUG") + ), + MTK_PIN( + 21, "GPIO21", + MTK_EINT_FUNCTION(0, 21), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO21"), + MTK_FUNCTION(1, "DP_TX_HPD"), + MTK_FUNCTION(2, "SPI4_C_CSB"), + MTK_FUNCTION(3, "SPI1_B_CSB"), + MTK_FUNCTION(4, "I2S7_MCK"), + MTK_FUNCTION(5, "I2S9_MCK"), + MTK_FUNCTION(6, "IDDIG") + ), + MTK_PIN( + 22, "GPIO22", + MTK_EINT_FUNCTION(0, 22), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO22"), + MTK_FUNCTION(1, "LCM1_RST"), + MTK_FUNCTION(2, "SPI0_C_CLK"), + MTK_FUNCTION(3, "SPI7_B_CLK"), + MTK_FUNCTION(4, "I2S7_BCK"), + MTK_FUNCTION(5, "I2S9_BCK"), + MTK_FUNCTION(6, "SCL13") + ), + MTK_PIN( + 23, "GPIO23", + MTK_EINT_FUNCTION(0, 23), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO23"), + MTK_FUNCTION(1, "DSI1_TE"), + MTK_FUNCTION(2, "SPI0_C_CSB"), + MTK_FUNCTION(3, "SPI7_B_CSB"), + MTK_FUNCTION(4, "I2S7_LRCK"), + MTK_FUNCTION(5, "I2S9_LRCK"), + MTK_FUNCTION(6, "SDA13") + ), + MTK_PIN( + 24, "GPIO24", + MTK_EINT_FUNCTION(0, 24), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO24"), + MTK_FUNCTION(1, "SRCLKENAI1"), + MTK_FUNCTION(2, "SPI0_C_MI"), + MTK_FUNCTION(3, "SPI7_B_MI"), + MTK_FUNCTION(4, "I2S6_DI"), + MTK_FUNCTION(5, "I2S8_DI"), + MTK_FUNCTION(6, "SCL_6306") + ), + MTK_PIN( + 25, "GPIO25", + MTK_EINT_FUNCTION(0, 25), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO25"), + MTK_FUNCTION(1, "SRCLKENAI0"), + MTK_FUNCTION(2, "SPI0_C_MO"), + MTK_FUNCTION(3, "SPI7_B_MO"), + MTK_FUNCTION(4, "I2S7_DO"), + MTK_FUNCTION(5, "I2S9_DO"), + MTK_FUNCTION(6, "SDA_6306") + ), + MTK_PIN( + 26, "GPIO26", + MTK_EINT_FUNCTION(0, 26), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO26"), + MTK_FUNCTION(1, "PWM_2"), + MTK_FUNCTION(2, "CLKM0"), + MTK_FUNCTION(3, "USB_DRVVBUS") + ), + MTK_PIN( + 27, "GPIO27", + MTK_EINT_FUNCTION(0, 27), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO27"), + MTK_FUNCTION(1, "PWM_3"), + MTK_FUNCTION(2, "CLKM1") + ), + MTK_PIN( + 28, "GPIO28", + MTK_EINT_FUNCTION(0, 28), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO28"), + MTK_FUNCTION(1, "PWM_0"), + MTK_FUNCTION(2, "CLKM2") + ), + MTK_PIN( + 29, "GPIO29", + MTK_EINT_FUNCTION(0, 29), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO29"), + MTK_FUNCTION(1, "PWM_1"), + MTK_FUNCTION(2, "CLKM3"), + MTK_FUNCTION(3, "DSI1_TE") + ), + MTK_PIN( + 30, "GPIO30", + MTK_EINT_FUNCTION(0, 30), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO30"), + MTK_FUNCTION(1, "PWM_2"), + MTK_FUNCTION(2, "CLKM0"), + MTK_FUNCTION(3, "LCM1_RST") + ), + MTK_PIN( + 31, "GPIO31", + MTK_EINT_FUNCTION(0, 31), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO31"), + MTK_FUNCTION(1, "I2S3_MCK"), + MTK_FUNCTION(2, "I2S1_MCK"), + MTK_FUNCTION(3, "I2S5_MCK"), + MTK_FUNCTION(4, "SRCLKENAI0"), + MTK_FUNCTION(5, "I2S0_MCK") + ), + MTK_PIN( + 32, "GPIO32", + MTK_EINT_FUNCTION(0, 32), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO32"), + MTK_FUNCTION(1, "I2S3_BCK"), + MTK_FUNCTION(2, "I2S1_BCK"), + MTK_FUNCTION(3, "I2S5_BCK"), + MTK_FUNCTION(4, "PCM0_CLK"), + MTK_FUNCTION(5, "I2S0_BCK") + ), + MTK_PIN( + 33, "GPIO33", + MTK_EINT_FUNCTION(0, 33), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO33"), + MTK_FUNCTION(1, "I2S3_LRCK"), + MTK_FUNCTION(2, "I2S1_LRCK"), + MTK_FUNCTION(3, "I2S5_LRCK"), + MTK_FUNCTION(4, "PCM0_SYNC"), + MTK_FUNCTION(5, "I2S0_LRCK") + ), + MTK_PIN( + 34, "GPIO34", + MTK_EINT_FUNCTION(0, 34), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO34"), + MTK_FUNCTION(1, "I2S0_DI"), + MTK_FUNCTION(2, "I2S2_DI"), + MTK_FUNCTION(3, "I2S2_DI2"), + MTK_FUNCTION(4, "PCM0_DI"), + MTK_FUNCTION(5, "I2S0_DI") + ), + MTK_PIN( + 35, "GPIO35", + MTK_EINT_FUNCTION(0, 35), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO35"), + MTK_FUNCTION(1, "I2S3_DO"), + MTK_FUNCTION(2, "I2S1_DO"), + MTK_FUNCTION(3, "I2S5_DO"), + MTK_FUNCTION(4, "PCM0_DO") + ), + MTK_PIN( + 36, "GPIO36", + MTK_EINT_FUNCTION(0, 36), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO36"), + MTK_FUNCTION(1, "SPI5_A_CLK"), + MTK_FUNCTION(2, "DMIC1_CLK"), + MTK_FUNCTION(3, "IDDIG"), + MTK_FUNCTION(4, "MD_URXD0"), + MTK_FUNCTION(5, "UCTS0"), + MTK_FUNCTION(6, "URXD1"), + MTK_FUNCTION(7, "DBG_MON_A0") + ), + MTK_PIN( + 37, "GPIO37", + MTK_EINT_FUNCTION(0, 37), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO37"), + MTK_FUNCTION(1, "SPI5_A_CSB"), + MTK_FUNCTION(2, "DMIC1_DAT"), + MTK_FUNCTION(3, "USB_DRVVBUS"), + MTK_FUNCTION(4, "MD_UTXD0"), + MTK_FUNCTION(5, "URTS0"), + MTK_FUNCTION(6, "UTXD1"), + MTK_FUNCTION(7, "DBG_MON_A1") + ), + MTK_PIN( + 38, "GPIO38", + MTK_EINT_FUNCTION(0, 38), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO38"), + MTK_FUNCTION(1, "SPI5_A_MI"), + MTK_FUNCTION(2, "DMIC_CLK"), + MTK_FUNCTION(3, "DSI1_TE"), + MTK_FUNCTION(4, "MD_URXD1"), + MTK_FUNCTION(5, "URXD0"), + MTK_FUNCTION(6, "UCTS1"), + MTK_FUNCTION(7, "DBG_MON_A2") + ), + MTK_PIN( + 39, "GPIO39", + MTK_EINT_FUNCTION(0, 39), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO39"), + MTK_FUNCTION(1, "SPI5_A_MO"), + MTK_FUNCTION(2, "DMIC_DAT"), + MTK_FUNCTION(3, "LCM1_RST"), + MTK_FUNCTION(4, "MD_UTXD1"), + MTK_FUNCTION(5, "UTXD0"), + MTK_FUNCTION(6, "URTS1"), + MTK_FUNCTION(7, "DBG_MON_A3") + ), + MTK_PIN( + 40, "GPIO40", + MTK_EINT_FUNCTION(0, 40), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO40"), + MTK_FUNCTION(1, "DISP_PWM"), + MTK_FUNCTION(7, "DBG_MON_A6") + ), + MTK_PIN( + 41, "GPIO41", + MTK_EINT_FUNCTION(0, 41), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO41"), + MTK_FUNCTION(1, "DSI_TE") + ), + MTK_PIN( + 42, "GPIO42", + MTK_EINT_FUNCTION(0, 42), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO42"), + MTK_FUNCTION(1, "LCM_RST") + ), + MTK_PIN( + 43, "GPIO43", + MTK_EINT_FUNCTION(0, 43), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO43"), + MTK_FUNCTION(1, "MD_INT1_C2K_UIM0_HOT_PLUG"), + MTK_FUNCTION(2, "MD_INT2_C2K_UIM1_HOT_PLUG"), + MTK_FUNCTION(3, "SCL_6306"), + MTK_FUNCTION(4, "ADSP_URXD0"), + MTK_FUNCTION(5, "PTA_RXD"), + MTK_FUNCTION(6, "SSPM_URXD_AO"), + MTK_FUNCTION(7, "DBG_MON_A4") + ), + MTK_PIN( + 44, "GPIO44", + MTK_EINT_FUNCTION(0, 44), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO44"), + MTK_FUNCTION(1, "MD_INT2_C2K_UIM1_HOT_PLUG"), + MTK_FUNCTION(2, "MD_INT1_C2K_UIM0_HOT_PLUG"), + MTK_FUNCTION(3, "SDA_6306"), + MTK_FUNCTION(4, "ADSP_UTXD0"), + MTK_FUNCTION(5, "PTA_TXD"), + MTK_FUNCTION(6, "SSPM_UTXD_AO"), + MTK_FUNCTION(7, "DBG_MON_A5") + ), + MTK_PIN( + 45, "GPIO45", + MTK_EINT_FUNCTION(0, 45), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO45"), + MTK_FUNCTION(1, "MD1_SIM2_SCLK"), + MTK_FUNCTION(2, "MD1_SIM1_SCLK"), + MTK_FUNCTION(3, "MCUPM_JTAG_TDI"), + MTK_FUNCTION(4, "APU_JTAG_TDI"), + MTK_FUNCTION(5, "CCU_JTAG_TDI"), + MTK_FUNCTION(6, "LVTS_SCK") + ), + MTK_PIN( + 46, "GPIO46", + MTK_EINT_FUNCTION(0, 46), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO46"), + MTK_FUNCTION(1, "MD1_SIM2_SRST"), + MTK_FUNCTION(2, "MD1_SIM1_SRST"), + MTK_FUNCTION(3, "MCUPM_JTAG_TMS"), + MTK_FUNCTION(4, "APU_JTAG_TMS"), + MTK_FUNCTION(5, "CCU_JTAG_TMS"), + MTK_FUNCTION(6, "LVTS_SDI") + ), + MTK_PIN( + 47, "GPIO47", + MTK_EINT_FUNCTION(0, 47), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO47"), + MTK_FUNCTION(1, "MD1_SIM2_SIO"), + MTK_FUNCTION(2, "MD1_SIM1_SIO"), + MTK_FUNCTION(3, "MCUPM_JTAG_TDO"), + MTK_FUNCTION(4, "APU_JTAG_TDO"), + MTK_FUNCTION(5, "CCU_JTAG_TDO"), + MTK_FUNCTION(6, "LVTS_SCF") + ), + MTK_PIN( + 48, "GPIO48", + MTK_EINT_FUNCTION(0, 48), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO48"), + MTK_FUNCTION(1, "MD1_SIM1_SIO"), + MTK_FUNCTION(2, "MD1_SIM2_SIO"), + MTK_FUNCTION(3, "MCUPM_JTAG_TRSTN"), + MTK_FUNCTION(4, "APU_JTAG_TRST"), + MTK_FUNCTION(5, "CCU_JTAG_TRST"), + MTK_FUNCTION(6, "LVTS_FOUT") + ), + MTK_PIN( + 49, "GPIO49", + MTK_EINT_FUNCTION(0, 49), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO49"), + MTK_FUNCTION(1, "MD1_SIM1_SRST"), + MTK_FUNCTION(2, "MD1_SIM2_SRST"), + MTK_FUNCTION(3, "MCUPM_JTAG_TCK"), + MTK_FUNCTION(4, "APU_JTAG_TCK"), + MTK_FUNCTION(5, "CCU_JTAG_TCK"), + MTK_FUNCTION(6, "LVTS_SDO") + ), + MTK_PIN( + 50, "GPIO50", + MTK_EINT_FUNCTION(0, 50), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO50"), + MTK_FUNCTION(1, "MD1_SIM1_SCLK"), + MTK_FUNCTION(2, "MD1_SIM2_SCLK"), + MTK_FUNCTION(6, "LVTS_26M") + ), + MTK_PIN( + 51, "GPIO51", + MTK_EINT_FUNCTION(0, 51), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO51"), + MTK_FUNCTION(1, "MSDC1_CLK"), + MTK_FUNCTION(2, "PCM1_CLK"), + MTK_FUNCTION(3, "VPU_UDI_TCK"), + MTK_FUNCTION(4, "UDI_TCK"), + MTK_FUNCTION(5, "IPU_JTAG_TCK"), + MTK_FUNCTION(6, "SSPM_JTAG_TCK"), + MTK_FUNCTION(7, "JTCK_SEL3") + ), + MTK_PIN( + 52, "GPIO52", + MTK_EINT_FUNCTION(0, 52), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO52"), + MTK_FUNCTION(1, "MSDC1_CMD"), + MTK_FUNCTION(2, "PCM1_SYNC"), + MTK_FUNCTION(3, "VPU_UDI_TMS"), + MTK_FUNCTION(4, "UDI_TMS"), + MTK_FUNCTION(5, "IPU_JTAG_TMS"), + MTK_FUNCTION(6, "SSPM_JTAG_TMS"), + MTK_FUNCTION(7, "JTMS_SEL3") + ), + MTK_PIN( + 53, "GPIO53", + MTK_EINT_FUNCTION(0, 53), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO53"), + MTK_FUNCTION(1, "MSDC1_DAT3"), + MTK_FUNCTION(2, "PCM1_DI") + ), + MTK_PIN( + 54, "GPIO54", + MTK_EINT_FUNCTION(0, 54), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO54"), + MTK_FUNCTION(1, "MSDC1_DAT0"), + MTK_FUNCTION(2, "PCM1_DO0"), + MTK_FUNCTION(3, "VPU_UDI_TDI"), + MTK_FUNCTION(4, "UDI_TDI"), + MTK_FUNCTION(5, "IPU_JTAG_TDI"), + MTK_FUNCTION(6, "SSPM_JTAG_TDI"), + MTK_FUNCTION(7, "JTDI_SEL3") + ), + MTK_PIN( + 55, "GPIO55", + MTK_EINT_FUNCTION(0, 55), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO55"), + MTK_FUNCTION(1, "MSDC1_DAT2"), + MTK_FUNCTION(2, "PCM1_DO2"), + MTK_FUNCTION(3, "VPU_UDI_NTRST"), + MTK_FUNCTION(4, "UDI_NTRST"), + MTK_FUNCTION(5, "IPU_JTAG_TRST"), + MTK_FUNCTION(6, "SSPM_JTAG_TRSTN"), + MTK_FUNCTION(7, "JTRSTN_SEL3") + ), + MTK_PIN( + 56, "GPIO56", + MTK_EINT_FUNCTION(0, 56), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO56"), + MTK_FUNCTION(1, "MSDC1_DAT1"), + MTK_FUNCTION(2, "PCM1_DO1"), + MTK_FUNCTION(3, "VPU_UDI_TDO"), + MTK_FUNCTION(4, "UDI_TDO"), + MTK_FUNCTION(5, "IPU_JTAG_TDO"), + MTK_FUNCTION(6, "SSPM_JTAG_TDO"), + MTK_FUNCTION(7, "JTDO_SEL3") + ), + MTK_PIN( + 57, "GPIO57", + MTK_EINT_FUNCTION(0, 57), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO57"), + MTK_FUNCTION(1, "MIPI2_D_SCLK"), + MTK_FUNCTION(7, "DBG_MON_A14") + ), + MTK_PIN( + 58, "GPIO58", + MTK_EINT_FUNCTION(0, 58), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO58"), + MTK_FUNCTION(1, "MIPI2_D_SDATA"), + MTK_FUNCTION(7, "DBG_MON_A15") + ), + MTK_PIN( + 59, "GPIO59", + MTK_EINT_FUNCTION(0, 59), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO59"), + MTK_FUNCTION(1, "MIPI_M_SCLK"), + MTK_FUNCTION(7, "DBG_MON_A17") + ), + MTK_PIN( + 60, "GPIO60", + MTK_EINT_FUNCTION(0, 60), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO60"), + MTK_FUNCTION(1, "MIPI_M_SDATA"), + MTK_FUNCTION(7, "DBG_MON_A18") + ), + MTK_PIN( + 61, "GPIO61", + MTK_EINT_FUNCTION(0, 61), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO61"), + MTK_FUNCTION(1, "MD_UCNT_A_TGL"), + MTK_FUNCTION(7, "DBG_MON_A16") + ), + MTK_PIN( + 62, "GPIO62", + MTK_EINT_FUNCTION(0, 62), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO62"), + MTK_FUNCTION(1, "DIGRF_IRQ") + ), + MTK_PIN( + 63, "GPIO63", + MTK_EINT_FUNCTION(0, 63), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO63"), + MTK_FUNCTION(1, "BPI_BUS0"), + MTK_FUNCTION(7, "DBG_MON_A19") + ), + MTK_PIN( + 64, "GPIO64", + MTK_EINT_FUNCTION(0, 64), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO64"), + MTK_FUNCTION(1, "BPI_BUS1"), + MTK_FUNCTION(7, "DBG_MON_A20") + ), + MTK_PIN( + 65, "GPIO65", + MTK_EINT_FUNCTION(0, 65), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO65"), + MTK_FUNCTION(1, "BPI_BUS2"), + MTK_FUNCTION(7, "DBG_MON_A21") + ), + MTK_PIN( + 66, "GPIO66", + MTK_EINT_FUNCTION(0, 66), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO66"), + MTK_FUNCTION(1, "BPI_BUS3"), + MTK_FUNCTION(7, "DBG_MON_A22") + ), + MTK_PIN( + 67, "GPIO67", + MTK_EINT_FUNCTION(0, 67), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO67"), + MTK_FUNCTION(1, "BPI_BUS4") + ), + MTK_PIN( + 68, "GPIO68", + MTK_EINT_FUNCTION(0, 68), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO68"), + MTK_FUNCTION(1, "BPI_BUS5") + ), + MTK_PIN( + 69, "GPIO69", + MTK_EINT_FUNCTION(0, 69), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO69"), + MTK_FUNCTION(1, "BPI_BUS6"), + MTK_FUNCTION(2, "CONN_BPI_BUS6") + ), + MTK_PIN( + 70, "GPIO70", + MTK_EINT_FUNCTION(0, 70), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO70"), + MTK_FUNCTION(1, "BPI_BUS7"), + MTK_FUNCTION(2, "CONN_BPI_BUS7") + ), + MTK_PIN( + 71, "GPIO71", + MTK_EINT_FUNCTION(0, 71), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO71"), + MTK_FUNCTION(1, "BPI_BUS8"), + MTK_FUNCTION(2, "CONN_BPI_BUS8") + ), + MTK_PIN( + 72, "GPIO72", + MTK_EINT_FUNCTION(0, 72), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO72"), + MTK_FUNCTION(1, "BPI_BUS9"), + MTK_FUNCTION(2, "CONN_BPI_BUS9") + ), + MTK_PIN( + 73, "GPIO73", + MTK_EINT_FUNCTION(0, 73), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO73"), + MTK_FUNCTION(1, "BPI_BUS10"), + MTK_FUNCTION(2, "CONN_BPI_BUS10") + ), + MTK_PIN( + 74, "GPIO74", + MTK_EINT_FUNCTION(0, 74), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO74"), + MTK_FUNCTION(1, "BPI_BUS11_OLAT0"), + MTK_FUNCTION(2, "CONN_BPI_BUS11_OLAT0") + ), + MTK_PIN( + 75, "GPIO75", + MTK_EINT_FUNCTION(0, 75), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO75"), + MTK_FUNCTION(1, "BPI_BUS12_OLAT1"), + MTK_FUNCTION(2, "CONN_BPI_BUS12_OLAT1") + ), + MTK_PIN( + 76, "GPIO76", + MTK_EINT_FUNCTION(0, 76), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO76"), + MTK_FUNCTION(1, "BPI_BUS13_OLAT2"), + MTK_FUNCTION(2, "CONN_BPI_BUS13_OLAT2") + ), + MTK_PIN( + 77, "GPIO77", + MTK_EINT_FUNCTION(0, 77), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO77"), + MTK_FUNCTION(1, "BPI_BUS14_OLAT3"), + MTK_FUNCTION(2, "CONN_BPI_BUS14_OLAT3") + ), + MTK_PIN( + 78, "GPIO78", + MTK_EINT_FUNCTION(0, 78), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO78"), + MTK_FUNCTION(1, "BPI_BUS15_OLAT4"), + MTK_FUNCTION(2, "CONN_BPI_BUS15_OLAT4"), + MTK_FUNCTION(7, "DBG_MON_A7") + ), + MTK_PIN( + 79, "GPIO79", + MTK_EINT_FUNCTION(0, 79), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO79"), + MTK_FUNCTION(1, "BPI_BUS16_OLAT5"), + MTK_FUNCTION(2, "CONN_BPI_BUS16_OLAT5"), + MTK_FUNCTION(7, "DBG_MON_A8") + ), + MTK_PIN( + 80, "GPIO80", + MTK_EINT_FUNCTION(0, 80), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO80"), + MTK_FUNCTION(1, "BPI_BUS17_ANT0"), + MTK_FUNCTION(2, "CONN_BPI_BUS17_ANT0"), + MTK_FUNCTION(7, "DBG_MON_A9") + ), + MTK_PIN( + 81, "GPIO81", + MTK_EINT_FUNCTION(0, 81), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO81"), + MTK_FUNCTION(1, "BPI_BUS18_ANT1"), + MTK_FUNCTION(2, "CONN_BPI_BUS18_ANT1"), + MTK_FUNCTION(7, "DBG_MON_A10") + ), + MTK_PIN( + 82, "GPIO82", + MTK_EINT_FUNCTION(0, 82), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO82"), + MTK_FUNCTION(1, "BPI_BUS19_ANT2"), + MTK_FUNCTION(2, "CONN_BPI_BUS19_ANT2"), + MTK_FUNCTION(7, "DBG_MON_A11") + ), + MTK_PIN( + 83, "GPIO83", + MTK_EINT_FUNCTION(0, 83), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO83"), + MTK_FUNCTION(1, "BPI_BUS20_ANT3"), + MTK_FUNCTION(2, "CONN_BPI_BUS20_ANT3"), + MTK_FUNCTION(7, "DBG_MON_A12") + ), + MTK_PIN( + 84, "GPIO84", + MTK_EINT_FUNCTION(0, 84), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO84"), + MTK_FUNCTION(1, "BPI_BUS21_ANT4"), + MTK_FUNCTION(2, "CONN_BPI_BUS21_ANT4"), + MTK_FUNCTION(7, "DBG_MON_A13") + ), + MTK_PIN( + 85, "GPIO85", + MTK_EINT_FUNCTION(0, 85), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO85"), + MTK_FUNCTION(1, "MIPI1_D_SCLK"), + MTK_FUNCTION(2, "CONN_MIPI1_SCLK") + ), + MTK_PIN( + 86, "GPIO86", + MTK_EINT_FUNCTION(0, 86), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO86"), + MTK_FUNCTION(1, "MIPI1_D_SDATA"), + MTK_FUNCTION(2, "CONN_MIPI1_SDATA") + ), + MTK_PIN( + 87, "GPIO87", + MTK_EINT_FUNCTION(0, 87), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO87"), + MTK_FUNCTION(1, "MIPI0_D_SCLK"), + MTK_FUNCTION(2, "CONN_MIPI0_SCLK") + ), + MTK_PIN( + 88, "GPIO88", + MTK_EINT_FUNCTION(0, 88), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO88"), + MTK_FUNCTION(1, "MIPI0_D_SDATA"), + MTK_FUNCTION(2, "CONN_MIPI0_SDATA") + ), + MTK_PIN( + 89, "GPIO89", + MTK_EINT_FUNCTION(0, 89), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO89"), + MTK_FUNCTION(1, "SPMI_SCL"), + MTK_FUNCTION(2, "SCL10") + ), + MTK_PIN( + 90, "GPIO90", + MTK_EINT_FUNCTION(0, 90), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO90"), + MTK_FUNCTION(1, "SPMI_SDA"), + MTK_FUNCTION(2, "SDA10") + ), + MTK_PIN( + 91, "GPIO91", + MTK_EINT_FUNCTION(0, 91), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO91"), + MTK_FUNCTION(1, "AP_GOOD") + ), + MTK_PIN( + 92, "GPIO92", + MTK_EINT_FUNCTION(0, 92), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO92"), + MTK_FUNCTION(1, "URXD0"), + MTK_FUNCTION(2, "MD_URXD0"), + MTK_FUNCTION(3, "MD_URXD1"), + MTK_FUNCTION(4, "SSPM_URXD_AO"), + MTK_FUNCTION(5, "CONN_BGF_UART0_RXD") + ), + MTK_PIN( + 93, "GPIO93", + MTK_EINT_FUNCTION(0, 93), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO93"), + MTK_FUNCTION(1, "UTXD0"), + MTK_FUNCTION(2, "MD_UTXD0"), + MTK_FUNCTION(3, "MD_UTXD1"), + MTK_FUNCTION(4, "SSPM_UTXD_AO"), + MTK_FUNCTION(5, "CONN_BGF_UART0_TXD"), + MTK_FUNCTION(6, "WIFI_TXD") + ), + MTK_PIN( + 94, "GPIO94", + MTK_EINT_FUNCTION(0, 94), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO94"), + MTK_FUNCTION(1, "URXD1"), + MTK_FUNCTION(2, "ADSP_URXD0"), + MTK_FUNCTION(3, "MD32_0_RXD"), + MTK_FUNCTION(4, "SSPM_URXD_AO"), + MTK_FUNCTION(5, "TP_URXD1_AO"), + MTK_FUNCTION(6, "TP_URXD2_AO"), + MTK_FUNCTION(7, "MBISTREADEN_TRIGGER") + ), + MTK_PIN( + 95, "GPIO95", + MTK_EINT_FUNCTION(0, 95), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO95"), + MTK_FUNCTION(1, "UTXD1"), + MTK_FUNCTION(2, "ADSP_UTXD0"), + MTK_FUNCTION(3, "MD32_0_TXD"), + MTK_FUNCTION(4, "SSPM_UTXD_AO"), + MTK_FUNCTION(5, "TP_UTXD1_AO"), + MTK_FUNCTION(6, "TP_UTXD2_AO"), + MTK_FUNCTION(7, "MBISTWRITEEN_TRIGGER") + ), + MTK_PIN( + 96, "GPIO96", + MTK_EINT_FUNCTION(0, 96), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO96"), + MTK_FUNCTION(1, "TDM_LRCK"), + MTK_FUNCTION(2, "I2S7_LRCK"), + MTK_FUNCTION(3, "I2S9_LRCK"), + MTK_FUNCTION(4, "SPI4_A_CLK"), + MTK_FUNCTION(5, "ADSP_JTAG0_TDI"), + MTK_FUNCTION(6, "CONN_BGF_DSP_L1_JDI"), + MTK_FUNCTION(7, "IO_JTAG_TDI") + ), + MTK_PIN( + 97, "GPIO97", + MTK_EINT_FUNCTION(0, 97), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO97"), + MTK_FUNCTION(1, "TDM_BCK"), + MTK_FUNCTION(2, "I2S7_BCK"), + MTK_FUNCTION(3, "I2S9_BCK"), + MTK_FUNCTION(4, "SPI4_A_CSB"), + MTK_FUNCTION(5, "ADSP_JTAG0_TRSTN"), + MTK_FUNCTION(6, "CONN_BGF_DSP_L1_JINTP"), + MTK_FUNCTION(7, "IO_JTAG_TRSTN") + ), + MTK_PIN( + 98, "GPIO98", + MTK_EINT_FUNCTION(0, 98), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO98"), + MTK_FUNCTION(1, "TDM_MCK"), + MTK_FUNCTION(2, "I2S7_MCK"), + MTK_FUNCTION(3, "I2S9_MCK"), + MTK_FUNCTION(4, "SPI4_A_MI"), + MTK_FUNCTION(5, "ADSP_JTAG0_TCK"), + MTK_FUNCTION(6, "CONN_BGF_DSP_L1_JCK"), + MTK_FUNCTION(7, "IO_JTAG_TCK") + ), + MTK_PIN( + 99, "GPIO99", + MTK_EINT_FUNCTION(0, 99), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO99"), + MTK_FUNCTION(1, "TDM_DATA0"), + MTK_FUNCTION(2, "I2S6_DI"), + MTK_FUNCTION(3, "I2S8_DI"), + MTK_FUNCTION(4, "SPI4_A_MO"), + MTK_FUNCTION(5, "ADSP_JTAG0_TDO"), + MTK_FUNCTION(6, "CONN_BGF_DSP_L1_JDO"), + MTK_FUNCTION(7, "IO_JTAG_TDO") + ), + MTK_PIN( + 100, "GPIO100", + MTK_EINT_FUNCTION(0, 100), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO100"), + MTK_FUNCTION(1, "TDM_DATA1"), + MTK_FUNCTION(2, "I2S7_DO"), + MTK_FUNCTION(3, "I2S9_DO"), + MTK_FUNCTION(4, "DP_TX_HPD"), + MTK_FUNCTION(5, "ADSP_JTAG0_TMS"), + MTK_FUNCTION(6, "CONN_BGF_DSP_L1_JMS"), + MTK_FUNCTION(7, "IO_JTAG_TMS") + ), + MTK_PIN( + 101, "GPIO101", + MTK_EINT_FUNCTION(0, 101), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO101"), + MTK_FUNCTION(1, "TDM_DATA2"), + MTK_FUNCTION(2, "DMIC1_CLK"), + MTK_FUNCTION(3, "SRCLKENAI0"), + MTK_FUNCTION(4, "SPI5_B_CLK"), + MTK_FUNCTION(5, "CLKM0"), + MTK_FUNCTION(7, "DAP_MD32_SWD") + ), + MTK_PIN( + 102, "GPIO102", + MTK_EINT_FUNCTION(0, 102), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO102"), + MTK_FUNCTION(1, "TDM_DATA3"), + MTK_FUNCTION(2, "DMIC1_DAT"), + MTK_FUNCTION(3, "SRCLKENAI1"), + MTK_FUNCTION(4, "SPI5_B_CSB"), + MTK_FUNCTION(5, "DP_TX_HPD"), + MTK_FUNCTION(6, "DVFSRC_EXT_REQ"), + MTK_FUNCTION(7, "DAP_MD32_SWCK") + ), + MTK_PIN( + 103, "GPIO103", + MTK_EINT_FUNCTION(0, 103), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO103"), + MTK_FUNCTION(1, "SPI0_A_MI"), + MTK_FUNCTION(2, "SCP_SPI0_MI"), + MTK_FUNCTION(5, "DFD_TDO"), + MTK_FUNCTION(6, "SPM_JTAG_TDO"), + MTK_FUNCTION(7, "JTDO_SEL1") + ), + MTK_PIN( + 104, "GPIO104", + MTK_EINT_FUNCTION(0, 104), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO104"), + MTK_FUNCTION(1, "SPI0_A_CSB"), + MTK_FUNCTION(2, "SCP_SPI0_CS"), + MTK_FUNCTION(5, "DFD_TMS"), + MTK_FUNCTION(6, "SPM_JTAG_TMS"), + MTK_FUNCTION(7, "JTMS_SEL1") + ), + MTK_PIN( + 105, "GPIO105", + MTK_EINT_FUNCTION(0, 105), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO105"), + MTK_FUNCTION(1, "SPI0_A_MO"), + MTK_FUNCTION(2, "SCP_SPI0_MO"), + MTK_FUNCTION(3, "SCP_SDA0"), + MTK_FUNCTION(5, "DFD_TDI"), + MTK_FUNCTION(6, "SPM_JTAG_TDI"), + MTK_FUNCTION(7, "JTDI_SEL1") + ), + MTK_PIN( + 106, "GPIO106", + MTK_EINT_FUNCTION(0, 106), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO106"), + MTK_FUNCTION(1, "SPI0_A_CLK"), + MTK_FUNCTION(2, "SCP_SPI0_CK"), + MTK_FUNCTION(3, "SCP_SCL0"), + MTK_FUNCTION(5, "DFD_TCK_XI"), + MTK_FUNCTION(6, "SPM_JTAG_TCK"), + MTK_FUNCTION(7, "JTCK_SEL1") + ), + MTK_PIN( + 107, "GPIO107", + MTK_EINT_FUNCTION(0, 107), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO107"), + MTK_FUNCTION(1, "DMIC_CLK"), + MTK_FUNCTION(2, "PWM_0"), + MTK_FUNCTION(3, "CLKM2"), + MTK_FUNCTION(4, "SPI5_B_MI"), + MTK_FUNCTION(6, "SPM_JTAG_TRSTN"), + MTK_FUNCTION(7, "JTRSTN_SEL1") + ), + MTK_PIN( + 108, "GPIO108", + MTK_EINT_FUNCTION(0, 108), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO108"), + MTK_FUNCTION(1, "DMIC_DAT"), + MTK_FUNCTION(2, "PWM_1"), + MTK_FUNCTION(3, "CLKM3"), + MTK_FUNCTION(4, "SPI5_B_MO"), + MTK_FUNCTION(7, "DAP_SONIC_SWD") + ), + MTK_PIN( + 109, "GPIO109", + MTK_EINT_FUNCTION(0, 109), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO109"), + MTK_FUNCTION(1, "I2S1_MCK"), + MTK_FUNCTION(2, "I2S3_MCK"), + MTK_FUNCTION(3, "I2S2_MCK"), + MTK_FUNCTION(4, "DP_TX_HPD"), + MTK_FUNCTION(5, "I2S2_MCK"), + MTK_FUNCTION(6, "SRCLKENAI0"), + MTK_FUNCTION(7, "DAP_SONIC_SWCK") + ), + MTK_PIN( + 110, "GPIO110", + MTK_EINT_FUNCTION(0, 110), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO110"), + MTK_FUNCTION(1, "I2S1_BCK"), + MTK_FUNCTION(2, "I2S3_BCK"), + MTK_FUNCTION(3, "I2S2_BCK"), + MTK_FUNCTION(4, "PCM0_CLK"), + MTK_FUNCTION(5, "I2S2_BCK"), + MTK_FUNCTION(6, "CONN_BGF_MCU_TDO") + ), + MTK_PIN( + 111, "GPIO111", + MTK_EINT_FUNCTION(0, 111), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO111"), + MTK_FUNCTION(1, "I2S1_LRCK"), + MTK_FUNCTION(2, "I2S3_LRCK"), + MTK_FUNCTION(3, "I2S2_LRCK"), + MTK_FUNCTION(4, "PCM0_SYNC"), + MTK_FUNCTION(5, "I2S2_LRCK"), + MTK_FUNCTION(6, "CONN_BGF_MCU_TDI") + ), + MTK_PIN( + 112, "GPIO112", + MTK_EINT_FUNCTION(0, 112), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO112"), + MTK_FUNCTION(1, "I2S2_DI"), + MTK_FUNCTION(2, "I2S0_DI"), + MTK_FUNCTION(3, "I2S2_DI2"), + MTK_FUNCTION(4, "PCM0_DI"), + MTK_FUNCTION(5, "I2S2_DI"), + MTK_FUNCTION(6, "CONN_BGF_MCU_TMS") + ), + MTK_PIN( + 113, "GPIO113", + MTK_EINT_FUNCTION(0, 113), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO113"), + MTK_FUNCTION(1, "I2S1_DO"), + MTK_FUNCTION(2, "I2S3_DO"), + MTK_FUNCTION(3, "I2S5_DO"), + MTK_FUNCTION(4, "PCM0_DO"), + MTK_FUNCTION(5, "I2S2_DI2"), + MTK_FUNCTION(6, "CONN_BGF_MCU_TCK") + ), + MTK_PIN( + 114, "GPIO114", + MTK_EINT_FUNCTION(0, 114), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO114"), + MTK_FUNCTION(1, "SPI2_MI"), + MTK_FUNCTION(2, "SCP_SPI2_MI"), + MTK_FUNCTION(6, "CONN_BGF_MCU_TRST_B") + ), + MTK_PIN( + 115, "GPIO115", + MTK_EINT_FUNCTION(0, 115), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO115"), + MTK_FUNCTION(1, "SPI2_CSB"), + MTK_FUNCTION(2, "SCP_SPI2_CS"), + MTK_FUNCTION(6, "CONN_BGF_MCU_DBGI_N") + ), + MTK_PIN( + 116, "GPIO116", + MTK_EINT_FUNCTION(0, 116), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO116"), + MTK_FUNCTION(1, "SPI2_MO"), + MTK_FUNCTION(2, "SCP_SPI2_MO"), + MTK_FUNCTION(3, "SCP_SDA1"), + MTK_FUNCTION(6, "CONN_BGF_MCU_DBGACK_N") + ), + MTK_PIN( + 117, "GPIO117", + MTK_EINT_FUNCTION(0, 117), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO117"), + MTK_FUNCTION(1, "SPI2_CLK"), + MTK_FUNCTION(2, "SCP_SPI2_CK"), + MTK_FUNCTION(3, "SCP_SCL1") + ), + MTK_PIN( + 118, "GPIO118", + MTK_EINT_FUNCTION(0, 118), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO118"), + MTK_FUNCTION(1, "SCL1"), + MTK_FUNCTION(2, "SCP_SCL0"), + MTK_FUNCTION(3, "SCP_SCL1") + ), + MTK_PIN( + 119, "GPIO119", + MTK_EINT_FUNCTION(0, 119), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO119"), + MTK_FUNCTION(1, "SDA1"), + MTK_FUNCTION(2, "SCP_SDA0"), + MTK_FUNCTION(3, "SCP_SDA1") + ), + MTK_PIN( + 120, "GPIO120", + MTK_EINT_FUNCTION(0, 120), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO120"), + MTK_FUNCTION(1, "SCL9") + ), + MTK_PIN( + 121, "GPIO121", + MTK_EINT_FUNCTION(0, 121), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO121"), + MTK_FUNCTION(1, "SDA9") + ), + MTK_PIN( + 122, "GPIO122", + MTK_EINT_FUNCTION(0, 122), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO122"), + MTK_FUNCTION(1, "SCL8") + ), + MTK_PIN( + 123, "GPIO123", + MTK_EINT_FUNCTION(0, 123), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO123"), + MTK_FUNCTION(1, "SDA8") + ), + MTK_PIN( + 124, "GPIO124", + MTK_EINT_FUNCTION(0, 124), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO124"), + MTK_FUNCTION(1, "SCL7"), + MTK_FUNCTION(2, "DMIC1_CLK") + ), + MTK_PIN( + 125, "GPIO125", + MTK_EINT_FUNCTION(0, 125), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO125"), + MTK_FUNCTION(1, "SDA7"), + MTK_FUNCTION(2, "DMIC1_DAT") + ), + MTK_PIN( + 126, "GPIO126", + MTK_EINT_FUNCTION(0, 126), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO126"), + MTK_FUNCTION(1, "CMFLASH0"), + MTK_FUNCTION(2, "PWM_2"), + MTK_FUNCTION(3, "TP_UCTS1_AO"), + MTK_FUNCTION(4, "UCTS0"), + MTK_FUNCTION(5, "SCL11"), + MTK_FUNCTION(6, "MD32_1_GPIO0") + ), + MTK_PIN( + 127, "GPIO127", + MTK_EINT_FUNCTION(0, 127), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO127"), + MTK_FUNCTION(1, "CMFLASH1"), + MTK_FUNCTION(2, "PWM_3"), + MTK_FUNCTION(3, "TP_URTS1_AO"), + MTK_FUNCTION(4, "URTS0"), + MTK_FUNCTION(5, "SDA11"), + MTK_FUNCTION(6, "MD32_1_GPIO1") + ), + MTK_PIN( + 128, "GPIO128", + MTK_EINT_FUNCTION(0, 128), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO128"), + MTK_FUNCTION(1, "CMFLASH2"), + MTK_FUNCTION(2, "PWM_0"), + MTK_FUNCTION(3, "TP_UCTS2_AO"), + MTK_FUNCTION(4, "UCTS1"), + MTK_FUNCTION(5, "SCL12"), + MTK_FUNCTION(6, "MD32_1_GPIO2") + ), + MTK_PIN( + 129, "GPIO129", + MTK_EINT_FUNCTION(0, 129), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO129"), + MTK_FUNCTION(1, "CMFLASH3"), + MTK_FUNCTION(2, "PWM_1"), + MTK_FUNCTION(3, "TP_URTS2_AO"), + MTK_FUNCTION(4, "URTS1"), + MTK_FUNCTION(5, "SDA12") + ), + MTK_PIN( + 130, "GPIO130", + MTK_EINT_FUNCTION(0, 130), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO130"), + MTK_FUNCTION(1, "CMVREF0"), + MTK_FUNCTION(2, "ANT_SEL10"), + MTK_FUNCTION(3, "SCP_JTAG0_TDO"), + MTK_FUNCTION(4, "MD32_0_JTAG_TDO"), + MTK_FUNCTION(5, "SCL11"), + MTK_FUNCTION(6, "CONN_WF_MCU_TDO"), + MTK_FUNCTION(7, "DBG_MON_A23") + ), + MTK_PIN( + 131, "GPIO131", + MTK_EINT_FUNCTION(0, 131), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO131"), + MTK_FUNCTION(1, "CMVREF1"), + MTK_FUNCTION(2, "ANT_SEL11"), + MTK_FUNCTION(3, "SCP_JTAG0_TDI"), + MTK_FUNCTION(4, "MD32_0_JTAG_TDI"), + MTK_FUNCTION(5, "SDA11"), + MTK_FUNCTION(6, "CONN_WF_MCU_TDI"), + MTK_FUNCTION(7, "DBG_MON_A26") + ), + MTK_PIN( + 132, "GPIO132", + MTK_EINT_FUNCTION(0, 132), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO132"), + MTK_FUNCTION(1, "CMVREF2"), + MTK_FUNCTION(2, "ANT_SEL12"), + MTK_FUNCTION(3, "SCP_JTAG0_TMS"), + MTK_FUNCTION(4, "MD32_0_JTAG_TMS"), + MTK_FUNCTION(6, "CONN_WF_MCU_TMS"), + MTK_FUNCTION(7, "DBG_MON_A28") + ), + MTK_PIN( + 133, "GPIO133", + MTK_EINT_FUNCTION(0, 133), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO133"), + MTK_FUNCTION(1, "CMVREF3"), + MTK_FUNCTION(2, "GPS_L1_ELNA_EN"), + MTK_FUNCTION(3, "SCP_JTAG0_TCK"), + MTK_FUNCTION(4, "MD32_0_JTAG_TCK"), + MTK_FUNCTION(6, "CONN_WF_MCU_TCK"), + MTK_FUNCTION(7, "DBG_MON_A24") + ), + MTK_PIN( + 134, "GPIO134", + MTK_EINT_FUNCTION(0, 134), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO134"), + MTK_FUNCTION(1, "CMVREF4"), + MTK_FUNCTION(2, "GPS_L5_ELNA_EN"), + MTK_FUNCTION(3, "SCP_JTAG0_TRSTN"), + MTK_FUNCTION(4, "MD32_0_JTAG_TRST"), + MTK_FUNCTION(6, "CONN_WF_MCU_TRST_B"), + MTK_FUNCTION(7, "DBG_MON_A27") + ), + MTK_PIN( + 135, "GPIO135", + MTK_EINT_FUNCTION(0, 135), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO135"), + MTK_FUNCTION(1, "PWM_0"), + MTK_FUNCTION(2, "SRCLKENAI1"), + MTK_FUNCTION(3, "MD_URXD0"), + MTK_FUNCTION(4, "MD32_0_RXD"), + MTK_FUNCTION(5, "CONN_TCXOENA_REQ"), + MTK_FUNCTION(6, "CONN_WF_MCU_DBGI_N"), + MTK_FUNCTION(7, "DBG_MON_A29") + ), + MTK_PIN( + 136, "GPIO136", + MTK_EINT_FUNCTION(0, 136), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO136"), + MTK_FUNCTION(1, "CMMCLK3"), + MTK_FUNCTION(2, "CLKM1"), + MTK_FUNCTION(3, "MD_UTXD0"), + MTK_FUNCTION(4, "MD32_0_TXD"), + MTK_FUNCTION(5, "CONN_BT_TXD"), + MTK_FUNCTION(6, "CONN_WF_MCU_DBGACK_N"), + MTK_FUNCTION(7, "DBG_MON_A25") + ), + MTK_PIN( + 137, "GPIO137", + MTK_EINT_FUNCTION(0, 137), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO137"), + MTK_FUNCTION(1, "CMMCLK4"), + MTK_FUNCTION(2, "CLKM2"), + MTK_FUNCTION(3, "MD_URXD1"), + MTK_FUNCTION(4, "MD32_1_RXD"), + MTK_FUNCTION(5, "ILDO_DOUT0"), + MTK_FUNCTION(6, "CONN_BGF_UART0_RXD") + ), + MTK_PIN( + 138, "GPIO138", + MTK_EINT_FUNCTION(0, 138), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO138"), + MTK_FUNCTION(1, "CMMCLK5"), + MTK_FUNCTION(2, "CLKM3"), + MTK_FUNCTION(3, "MD_UTXD1"), + MTK_FUNCTION(4, "MD32_1_TXD"), + MTK_FUNCTION(5, "ILDO_DOUT1"), + MTK_FUNCTION(6, "CONN_BGF_UART0_TXD") + ), + MTK_PIN( + 139, "GPIO139", + MTK_EINT_FUNCTION(0, 139), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO139"), + MTK_FUNCTION(1, "SCL4") + ), + MTK_PIN( + 140, "GPIO140", + MTK_EINT_FUNCTION(0, 140), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO140"), + MTK_FUNCTION(1, "SDA4") + ), + MTK_PIN( + 141, "GPIO141", + MTK_EINT_FUNCTION(0, 141), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO141"), + MTK_FUNCTION(1, "SCL2") + ), + MTK_PIN( + 142, "GPIO142", + MTK_EINT_FUNCTION(0, 142), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO142"), + MTK_FUNCTION(1, "SDA2") + ), + MTK_PIN( + 143, "GPIO143", + MTK_EINT_FUNCTION(0, 143), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO143"), + MTK_FUNCTION(1, "CMVREF0"), + MTK_FUNCTION(2, "SPI3_CLK"), + MTK_FUNCTION(3, "ADSP_JTAG1_TDO"), + MTK_FUNCTION(4, "SCP_JTAG1_TDO"), + MTK_FUNCTION(5, "MD32_1_JTAG_TDO"), + MTK_FUNCTION(6, "CONN_BGF_DSP_L5_JDO") + ), + MTK_PIN( + 144, "GPIO144", + MTK_EINT_FUNCTION(0, 144), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO144"), + MTK_FUNCTION(1, "CMVREF1"), + MTK_FUNCTION(2, "SPI3_CSB"), + MTK_FUNCTION(3, "ADSP_JTAG1_TDI"), + MTK_FUNCTION(4, "SCP_JTAG1_TDI"), + MTK_FUNCTION(5, "MD32_1_JTAG_TDI"), + MTK_FUNCTION(6, "CONN_BGF_DSP_L5_JDI") + ), + MTK_PIN( + 145, "GPIO145", + MTK_EINT_FUNCTION(0, 145), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO145"), + MTK_FUNCTION(1, "CMVREF2"), + MTK_FUNCTION(2, "SPI3_MI"), + MTK_FUNCTION(3, "ADSP_JTAG1_TMS"), + MTK_FUNCTION(4, "SCP_JTAG1_TMS"), + MTK_FUNCTION(5, "MD32_1_JTAG_TMS"), + MTK_FUNCTION(6, "CONN_BGF_DSP_L5_JMS") + ), + MTK_PIN( + 146, "GPIO146", + MTK_EINT_FUNCTION(0, 146), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO146"), + MTK_FUNCTION(1, "CMVREF3"), + MTK_FUNCTION(2, "SPI3_MO"), + MTK_FUNCTION(3, "ADSP_JTAG1_TCK"), + MTK_FUNCTION(4, "SCP_JTAG1_TCK"), + MTK_FUNCTION(5, "MD32_1_JTAG_TCK"), + MTK_FUNCTION(6, "CONN_BGF_DSP_L5_JCK") + ), + MTK_PIN( + 147, "GPIO147", + MTK_EINT_FUNCTION(0, 147), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO147"), + MTK_FUNCTION(1, "CMVREF4"), + MTK_FUNCTION(2, "EXT_FRAME_SYNC"), + MTK_FUNCTION(3, "ADSP_JTAG1_TRSTN"), + MTK_FUNCTION(4, "SCP_JTAG1_TRSTN"), + MTK_FUNCTION(5, "MD32_1_JTAG_TRST"), + MTK_FUNCTION(6, "CONN_BGF_DSP_L5_JINTP") + ), + MTK_PIN( + 148, "GPIO148", + MTK_EINT_FUNCTION(0, 148), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO148"), + MTK_FUNCTION(1, "PWM_1"), + MTK_FUNCTION(2, "AGPS_SYNC"), + MTK_FUNCTION(3, "CMMCLK5"), + MTK_FUNCTION(6, "CONN_WF_MCU_AICE_TMSC") + ), + MTK_PIN( + 149, "GPIO149", + MTK_EINT_FUNCTION(0, 149), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO149"), + MTK_FUNCTION(1, "CMMCLK0"), + MTK_FUNCTION(2, "CLKM0"), + MTK_FUNCTION(3, "MD32_0_GPIO0"), + MTK_FUNCTION(6, "CONN_WF_MCU_AICE_TCKC") + ), + MTK_PIN( + 150, "GPIO150", + MTK_EINT_FUNCTION(0, 150), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO150"), + MTK_FUNCTION(1, "CMMCLK1"), + MTK_FUNCTION(2, "CLKM1"), + MTK_FUNCTION(3, "MD32_0_GPIO1"), + MTK_FUNCTION(6, "CONN_BGF_MCU_AICE_TMSC") + ), + MTK_PIN( + 151, "GPIO151", + MTK_EINT_FUNCTION(0, 151), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO151"), + MTK_FUNCTION(1, "CMMCLK2"), + MTK_FUNCTION(2, "CLKM2"), + MTK_FUNCTION(3, "MD32_0_GPIO2"), + MTK_FUNCTION(6, "CONN_BGF_MCU_AICE_TCKC") + ), + MTK_PIN( + 152, "GPIO152", + MTK_EINT_FUNCTION(0, 152), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO152"), + MTK_FUNCTION(1, "KPROW1"), + MTK_FUNCTION(2, "PWM_2"), + MTK_FUNCTION(3, "IDDIG"), + MTK_FUNCTION(4, "DP_TX_HPD"), + MTK_FUNCTION(5, "DSI1_TE"), + MTK_FUNCTION(6, "MBISTREADEN_TRIGGER"), + MTK_FUNCTION(7, "DBG_MON_B2") + ), + MTK_PIN( + 153, "GPIO153", + MTK_EINT_FUNCTION(0, 153), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO153"), + MTK_FUNCTION(1, "KPROW0"), + MTK_FUNCTION(7, "DBG_MON_B1") + ), + MTK_PIN( + 154, "GPIO154", + MTK_EINT_FUNCTION(0, 154), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO154"), + MTK_FUNCTION(1, "KPCOL0"), + MTK_FUNCTION(7, "DBG_MON_A32") + ), + MTK_PIN( + 155, "GPIO155", + MTK_EINT_FUNCTION(0, 155), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO155"), + MTK_FUNCTION(1, "KPCOL1"), + MTK_FUNCTION(2, "PWM_3"), + MTK_FUNCTION(3, "USB_DRVVBUS"), + MTK_FUNCTION(4, "CONN_TCXOENA_REQ"), + MTK_FUNCTION(5, "LCM1_RST"), + MTK_FUNCTION(6, "MBISTWRITEEN_TRIGGER"), + MTK_FUNCTION(7, "DBG_MON_B0") + ), + MTK_PIN( + 156, "GPIO156", + MTK_EINT_FUNCTION(0, 156), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO156"), + MTK_FUNCTION(1, "SPI1_A_CLK"), + MTK_FUNCTION(2, "SCP_SPI1_A_CK"), + MTK_FUNCTION(3, "MRG_CLK"), + MTK_FUNCTION(4, "AGPS_SYNC"), + MTK_FUNCTION(5, "SCL12"), + MTK_FUNCTION(7, "DBG_MON_B3") + ), + MTK_PIN( + 157, "GPIO157", + MTK_EINT_FUNCTION(0, 157), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO157"), + MTK_FUNCTION(1, "SPI1_A_CSB"), + MTK_FUNCTION(2, "SCP_SPI1_A_CS"), + MTK_FUNCTION(3, "MRG_SYNC"), + MTK_FUNCTION(4, "EXT_FRAME_SYNC"), + MTK_FUNCTION(5, "SDA12"), + MTK_FUNCTION(7, "DBG_MON_B4") + ), + MTK_PIN( + 158, "GPIO158", + MTK_EINT_FUNCTION(0, 158), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO158"), + MTK_FUNCTION(1, "SPI1_A_MI"), + MTK_FUNCTION(2, "SCP_SPI1_A_MI"), + MTK_FUNCTION(3, "MRG_DI"), + MTK_FUNCTION(4, "PTA_RXD"), + MTK_FUNCTION(5, "SCL13"), + MTK_FUNCTION(7, "DBG_MON_B5") + ), + MTK_PIN( + 159, "GPIO159", + MTK_EINT_FUNCTION(0, 159), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO159"), + MTK_FUNCTION(1, "SPI1_A_MO"), + MTK_FUNCTION(2, "SCP_SPI1_A_MO"), + MTK_FUNCTION(3, "MRG_DO"), + MTK_FUNCTION(4, "PTA_TXD"), + MTK_FUNCTION(5, "SDA13"), + MTK_FUNCTION(7, "DBG_MON_B6") + ), + MTK_PIN( + 160, "GPIO160", + MTK_EINT_FUNCTION(0, 160), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO160"), + MTK_FUNCTION(1, "SCL3"), + MTK_FUNCTION(2, "SCP_SCL0"), + MTK_FUNCTION(3, "SCP_SCL1") + ), + MTK_PIN( + 161, "GPIO161", + MTK_EINT_FUNCTION(0, 161), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO161"), + MTK_FUNCTION(1, "SDA3"), + MTK_FUNCTION(2, "SCP_SDA0"), + MTK_FUNCTION(3, "SCP_SDA1") + ), + MTK_PIN( + 162, "GPIO162", + MTK_EINT_FUNCTION(0, 162), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO162"), + MTK_FUNCTION(1, "ANT_SEL0"), + MTK_FUNCTION(2, "GPS_L1_ELNA_EN"), + MTK_FUNCTION(7, "DBG_MON_B7") + ), + MTK_PIN( + 163, "GPIO163", + MTK_EINT_FUNCTION(0, 163), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO163"), + MTK_FUNCTION(1, "ANT_SEL1"), + MTK_FUNCTION(2, "GPS_L5_ELNA_EN"), + MTK_FUNCTION(7, "DBG_MON_B8") + ), + MTK_PIN( + 164, "GPIO164", + MTK_EINT_FUNCTION(0, 164), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO164"), + MTK_FUNCTION(1, "ANT_SEL2"), + MTK_FUNCTION(2, "SCP_SPI1_B_CK"), + MTK_FUNCTION(3, "TP_URXD1_AO"), + MTK_FUNCTION(5, "UCTS0"), + MTK_FUNCTION(7, "DBG_MON_B9") + ), + MTK_PIN( + 165, "GPIO165", + MTK_EINT_FUNCTION(0, 165), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO165"), + MTK_FUNCTION(1, "ANT_SEL3"), + MTK_FUNCTION(2, "SCP_SPI1_B_CS"), + MTK_FUNCTION(3, "TP_UTXD1_AO"), + MTK_FUNCTION(4, "CONN_TCXOENA_REQ"), + MTK_FUNCTION(5, "URTS0"), + MTK_FUNCTION(7, "DBG_MON_B10") + ), + MTK_PIN( + 166, "GPIO166", + MTK_EINT_FUNCTION(0, 166), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO166"), + MTK_FUNCTION(1, "ANT_SEL4"), + MTK_FUNCTION(2, "SCP_SPI1_B_MI"), + MTK_FUNCTION(3, "TP_URXD2_AO"), + MTK_FUNCTION(4, "SRCLKENAI1"), + MTK_FUNCTION(5, "UCTS1"), + MTK_FUNCTION(7, "DBG_MON_B11") + ), + MTK_PIN( + 167, "GPIO167", + MTK_EINT_FUNCTION(0, 167), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO167"), + MTK_FUNCTION(1, "ANT_SEL5"), + MTK_FUNCTION(2, "SCP_SPI1_B_MO"), + MTK_FUNCTION(3, "TP_UTXD2_AO"), + MTK_FUNCTION(4, "SRCLKENAI0"), + MTK_FUNCTION(5, "URTS1"), + MTK_FUNCTION(7, "DBG_MON_B12") + ), + MTK_PIN( + 168, "GPIO168", + MTK_EINT_FUNCTION(0, 168), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO168"), + MTK_FUNCTION(1, "ANT_SEL6"), + MTK_FUNCTION(2, "SPI0_B_CLK"), + MTK_FUNCTION(3, "TP_UCTS1_AO"), + MTK_FUNCTION(4, "KPCOL2"), + MTK_FUNCTION(5, "MD_UCTS0"), + MTK_FUNCTION(6, "SCL12"), + MTK_FUNCTION(7, "DBG_MON_B13") + ), + MTK_PIN( + 169, "GPIO169", + MTK_EINT_FUNCTION(0, 169), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO169"), + MTK_FUNCTION(1, "ANT_SEL7"), + MTK_FUNCTION(2, "SPI0_B_CSB"), + MTK_FUNCTION(3, "TP_URTS1_AO"), + MTK_FUNCTION(4, "KPROW2"), + MTK_FUNCTION(5, "MD_URTS0"), + MTK_FUNCTION(6, "SDA12"), + MTK_FUNCTION(7, "DBG_MON_B14") + ), + MTK_PIN( + 170, "GPIO170", + MTK_EINT_FUNCTION(0, 170), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO170"), + MTK_FUNCTION(1, "ANT_SEL8"), + MTK_FUNCTION(2, "SPI0_B_MI"), + MTK_FUNCTION(3, "TP_UCTS2_AO"), + MTK_FUNCTION(4, "SRCLKENAI1"), + MTK_FUNCTION(5, "MD_UCTS1"), + MTK_FUNCTION(6, "SCL13") + ), + MTK_PIN( + 171, "GPIO171", + MTK_EINT_FUNCTION(0, 171), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO171"), + MTK_FUNCTION(1, "ANT_SEL9"), + MTK_FUNCTION(2, "SPI0_B_MO"), + MTK_FUNCTION(3, "TP_URTS2_AO"), + MTK_FUNCTION(4, "SRCLKENAI0"), + MTK_FUNCTION(5, "MD_URTS1"), + MTK_FUNCTION(6, "SDA13") + ), + MTK_PIN( + 172, "GPIO172", + MTK_EINT_FUNCTION(0, 172), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO172"), + MTK_FUNCTION(1, "CONN_TOP_CLK"), + MTK_FUNCTION(2, "AUXIF_CLK0"), + MTK_FUNCTION(7, "DBG_MON_B18") + ), + MTK_PIN( + 173, "GPIO173", + MTK_EINT_FUNCTION(0, 173), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO173"), + MTK_FUNCTION(1, "CONN_TOP_DATA"), + MTK_FUNCTION(2, "AUXIF_ST0"), + MTK_FUNCTION(7, "DBG_MON_B19") + ), + MTK_PIN( + 174, "GPIO174", + MTK_EINT_FUNCTION(0, 174), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO174"), + MTK_FUNCTION(1, "CONN_HRST_B"), + MTK_FUNCTION(7, "DBG_MON_B17") + ), + MTK_PIN( + 175, "GPIO175", + MTK_EINT_FUNCTION(0, 175), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO175"), + MTK_FUNCTION(1, "CONN_WB_PTA"), + MTK_FUNCTION(7, "DBG_MON_B20") + ), + MTK_PIN( + 176, "GPIO176", + MTK_EINT_FUNCTION(0, 176), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO176"), + MTK_FUNCTION(1, "CONN_BT_CLK"), + MTK_FUNCTION(2, "AUXIF_CLK1"), + MTK_FUNCTION(7, "DBG_MON_B15") + ), + MTK_PIN( + 177, "GPIO177", + MTK_EINT_FUNCTION(0, 177), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO177"), + MTK_FUNCTION(1, "CONN_BT_DATA"), + MTK_FUNCTION(2, "AUXIF_ST1"), + MTK_FUNCTION(7, "DBG_MON_B16") + ), + MTK_PIN( + 178, "GPIO178", + MTK_EINT_FUNCTION(0, 178), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO178"), + MTK_FUNCTION(1, "CONN_WF_CTRL0"), + MTK_FUNCTION(7, "DBG_MON_B21") + ), + MTK_PIN( + 179, "GPIO179", + MTK_EINT_FUNCTION(0, 179), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO179"), + MTK_FUNCTION(1, "CONN_WF_CTRL1"), + MTK_FUNCTION(2, "UFS_MPHY_SCL"), + MTK_FUNCTION(7, "DBG_MON_B22") + ), + MTK_PIN( + 180, "GPIO180", + MTK_EINT_FUNCTION(0, 180), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO180"), + MTK_FUNCTION(1, "CONN_WF_CTRL2"), + MTK_FUNCTION(2, "UFS_MPHY_SDA"), + MTK_FUNCTION(7, "DBG_MON_B23") + ), + MTK_PIN( + 181, "GPIO181", + MTK_EINT_FUNCTION(0, 181), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO181"), + MTK_FUNCTION(1, "CONN_WF_CTRL3"), + MTK_FUNCTION(2, "UFS_UNIPRO_SDA") + ), + MTK_PIN( + 182, "GPIO182", + MTK_EINT_FUNCTION(0, 182), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO182"), + MTK_FUNCTION(1, "CONN_WF_CTRL4"), + MTK_FUNCTION(2, "UFS_UNIPRO_SCL") + ), + MTK_PIN( + 183, "GPIO183", + MTK_EINT_FUNCTION(0, 183), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO183"), + MTK_FUNCTION(1, "MSDC0_CMD") + ), + MTK_PIN( + 184, "GPIO184", + MTK_EINT_FUNCTION(0, 184), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO184"), + MTK_FUNCTION(1, "MSDC0_DAT0") + ), + MTK_PIN( + 185, "GPIO185", + MTK_EINT_FUNCTION(0, 185), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO185"), + MTK_FUNCTION(1, "MSDC0_DAT2") + ), + MTK_PIN( + 186, "GPIO186", + MTK_EINT_FUNCTION(0, 186), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO186"), + MTK_FUNCTION(1, "MSDC0_DAT4") + ), + MTK_PIN( + 187, "GPIO187", + MTK_EINT_FUNCTION(0, 187), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO187"), + MTK_FUNCTION(1, "MSDC0_DAT6") + ), + MTK_PIN( + 188, "GPIO188", + MTK_EINT_FUNCTION(0, 188), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO188"), + MTK_FUNCTION(1, "MSDC0_DAT1") + ), + MTK_PIN( + 189, "GPIO189", + MTK_EINT_FUNCTION(0, 189), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO189"), + MTK_FUNCTION(1, "MSDC0_DAT5") + ), + MTK_PIN( + 190, "GPIO190", + MTK_EINT_FUNCTION(0, 190), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO190"), + MTK_FUNCTION(1, "MSDC0_DAT7") + ), + MTK_PIN( + 191, "GPIO191", + MTK_EINT_FUNCTION(0, 191), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO191"), + MTK_FUNCTION(1, "MSDC0_DSL"), + MTK_FUNCTION(2, "GPS_L1_ELNA_EN"), + MTK_FUNCTION(3, "IDDIG"), + MTK_FUNCTION(4, "DMIC_CLK"), + MTK_FUNCTION(5, "DSI1_TE") + ), + MTK_PIN( + 192, "GPIO192", + MTK_EINT_FUNCTION(0, 192), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO192"), + MTK_FUNCTION(1, "MSDC0_CLK"), + MTK_FUNCTION(2, "GPS_L5_ELNA_EN"), + MTK_FUNCTION(3, "USB_DRVVBUS"), + MTK_FUNCTION(4, "DMIC_DAT"), + MTK_FUNCTION(5, "LCM1_RST") + ), + MTK_PIN( + 193, "GPIO193", + MTK_EINT_FUNCTION(0, 193), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO193"), + MTK_FUNCTION(1, "MSDC0_DAT3") + ), + MTK_PIN( + 194, "GPIO194", + MTK_EINT_FUNCTION(0, 194), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO194"), + MTK_FUNCTION(1, "MSDC0_RSTB") + ), + MTK_PIN( + 195, "GPIO195", + MTK_EINT_FUNCTION(0, 195), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO195"), + MTK_FUNCTION(1, "SCP_VREQ_VAO"), + MTK_FUNCTION(2, "DVFSRC_EXT_REQ") + ), + MTK_PIN( + 196, "GPIO196", + MTK_EINT_FUNCTION(0, 196), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO196"), + MTK_FUNCTION(1, "AUD_DAT_MOSI2"), + MTK_FUNCTION(7, "DBG_MON_B27") + ), + MTK_PIN( + 197, "GPIO197", + MTK_EINT_FUNCTION(0, 197), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO197"), + MTK_FUNCTION(1, "AUD_NLE_MOSI1"), + MTK_FUNCTION(2, "AUD_CLK_MISO"), + MTK_FUNCTION(3, "I2S2_MCK"), + MTK_FUNCTION(4, "I2S6_MCK"), + MTK_FUNCTION(5, "I2S8_MCK"), + MTK_FUNCTION(6, "UFS_UNIPRO_SDA"), + MTK_FUNCTION(7, "DBG_MON_B28") + ), + MTK_PIN( + 198, "GPIO198", + MTK_EINT_FUNCTION(0, 198), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO198"), + MTK_FUNCTION(1, "AUD_NLE_MOSI0"), + MTK_FUNCTION(2, "AUD_SYNC_MISO"), + MTK_FUNCTION(3, "I2S2_BCK"), + MTK_FUNCTION(4, "I2S6_BCK"), + MTK_FUNCTION(5, "I2S8_BCK"), + MTK_FUNCTION(7, "DBG_MON_B29") + ), + MTK_PIN( + 199, "GPIO199", + MTK_EINT_FUNCTION(0, 199), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO199"), + MTK_FUNCTION(1, "AUD_DAT_MISO2"), + MTK_FUNCTION(3, "I2S2_DI2"), + MTK_FUNCTION(7, "DBG_MON_B32") + ), + MTK_PIN( + 200, "GPIO200", + MTK_EINT_FUNCTION(0, 200), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO200"), + MTK_FUNCTION(1, "SCL6"), + MTK_FUNCTION(2, "SCP_SCL0"), + MTK_FUNCTION(3, "SCP_SCL1"), + MTK_FUNCTION(4, "SCL_6306") + ), + MTK_PIN( + 201, "GPIO201", + MTK_EINT_FUNCTION(0, 201), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO201"), + MTK_FUNCTION(1, "SDA6"), + MTK_FUNCTION(2, "SCP_SDA0"), + MTK_FUNCTION(3, "SCP_SDA1"), + MTK_FUNCTION(4, "SDA_6306") + ), + MTK_PIN( + 202, "GPIO202", + MTK_EINT_FUNCTION(0, 202), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO202"), + MTK_FUNCTION(1, "SCL5") + ), + MTK_PIN( + 203, "GPIO203", + MTK_EINT_FUNCTION(0, 203), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO203"), + MTK_FUNCTION(1, "SDA5") + ), + MTK_PIN( + 204, "GPIO204", + MTK_EINT_FUNCTION(0, 204), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO204"), + MTK_FUNCTION(1, "SCL0"), + MTK_FUNCTION(2, "SPI4_C_CLK"), + MTK_FUNCTION(3, "SPI7_B_CLK") + ), + MTK_PIN( + 205, "GPIO205", + MTK_EINT_FUNCTION(0, 205), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO205"), + MTK_FUNCTION(1, "SDA0"), + MTK_FUNCTION(2, "SPI4_C_CSB"), + MTK_FUNCTION(3, "SPI7_B_CSB") + ), + MTK_PIN( + 206, "GPIO206", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO206"), + MTK_FUNCTION(1, "SRCLKENA0") + ), + MTK_PIN( + 207, "GPIO207", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO207"), + MTK_FUNCTION(1, "SRCLKENA1") + ), + MTK_PIN( + 208, "GPIO208", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO208"), + MTK_FUNCTION(1, "WATCHDOG") + ), + MTK_PIN( + 209, "GPIO209", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO209"), + MTK_FUNCTION(1, "PWRAP_SPI0_MI"), + MTK_FUNCTION(2, "PWRAP_SPI0_MO") + ), + MTK_PIN( + 210, "GPIO210", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO210"), + MTK_FUNCTION(1, "PWRAP_SPI0_CSN") + ), + MTK_PIN( + 211, "GPIO211", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO211"), + MTK_FUNCTION(1, "PWRAP_SPI0_MO"), + MTK_FUNCTION(2, "PWRAP_SPI0_MI") + ), + MTK_PIN( + 212, "GPIO212", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO212"), + MTK_FUNCTION(1, "PWRAP_SPI0_CK") + ), + MTK_PIN( + 213, "GPIO213", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO213"), + MTK_FUNCTION(1, "RTC32K_CK") + ), + MTK_PIN( + 214, "GPIO214", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO214"), + MTK_FUNCTION(1, "AUD_CLK_MOSI"), + MTK_FUNCTION(3, "I2S1_MCK"), + MTK_FUNCTION(4, "I2S7_MCK"), + MTK_FUNCTION(5, "I2S9_MCK"), + MTK_FUNCTION(6, "UFS_UNIPRO_SCL") + ), + MTK_PIN( + 215, "GPIO215", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO215"), + MTK_FUNCTION(1, "AUD_SYNC_MOSI"), + MTK_FUNCTION(3, "I2S1_BCK"), + MTK_FUNCTION(4, "I2S7_BCK"), + MTK_FUNCTION(5, "I2S9_BCK"), + MTK_FUNCTION(7, "DBG_MON_B24") + ), + MTK_PIN( + 216, "GPIO216", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO216"), + MTK_FUNCTION(1, "AUD_DAT_MOSI0"), + MTK_FUNCTION(3, "I2S1_LRCK"), + MTK_FUNCTION(4, "I2S7_LRCK"), + MTK_FUNCTION(5, "I2S9_LRCK"), + MTK_FUNCTION(7, "DBG_MON_B25") + ), + MTK_PIN( + 217, "GPIO217", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO217"), + MTK_FUNCTION(1, "AUD_DAT_MOSI1"), + MTK_FUNCTION(3, "I2S1_DO"), + MTK_FUNCTION(4, "I2S7_DO"), + MTK_FUNCTION(5, "I2S9_DO"), + MTK_FUNCTION(6, "UFS_MPHY_SDA"), + MTK_FUNCTION(7, "DBG_MON_B26") + ), + MTK_PIN( + 218, "GPIO218", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO218"), + MTK_FUNCTION(1, "AUD_DAT_MISO0"), + MTK_FUNCTION(2, "VOW_DAT_MISO"), + MTK_FUNCTION(3, "I2S2_LRCK"), + MTK_FUNCTION(4, "I2S6_LRCK"), + MTK_FUNCTION(5, "I2S8_LRCK"), + MTK_FUNCTION(7, "DBG_MON_B30") + ), + MTK_PIN( + 219, "GPIO219", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO219"), + MTK_FUNCTION(1, "AUD_DAT_MISO1"), + MTK_FUNCTION(2, "VOW_CLK_MISO"), + MTK_FUNCTION(3, "I2S2_DI"), + MTK_FUNCTION(4, "I2S6_DI"), + MTK_FUNCTION(5, "I2S8_DI"), + MTK_FUNCTION(6, "UFS_MPHY_SCL"), + MTK_FUNCTION(7, "DBG_MON_B31") + ), + MTK_PIN( + 220, "GPIO220", + MTK_EINT_FUNCTION(0, 216), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 221, "GPIO221", + MTK_EINT_FUNCTION(0, 217), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 222, "GPIO222", + MTK_EINT_FUNCTION(0, 218), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 223, "GPIO223", + MTK_EINT_FUNCTION(0, 219), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 224, "GPIO224", + MTK_EINT_FUNCTION(0, 220), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 225, "GPIO225", + MTK_EINT_FUNCTION(0, 222), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 226, "GPIO226", + MTK_EINT_FUNCTION(0, 223), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), +}; + +#endif /* __PINCTRL_MTK_MT6893_H */ diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8196.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8196.h new file mode 100644 index 000000000000..c2a7e239a234 --- /dev/null +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8196.h @@ -0,0 +1,3085 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2025 MediaTek Inc. + * Author: Guodong Liu <Guodong.Liu@mediatek.com> + */ + +#ifndef __PINCTRL_MTK_MT8196_H +#define __PINCTRL_MTK_MT8196_H + +#include "pinctrl-paris.h" +#define EINT_INVALID_BASE 0xff + +static const struct mtk_pin_desc mtk_pins_mt8196[] = { + MTK_PIN( + 0, "GPIO0", + MTK_EINT_FUNCTION(0, 0), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO0"), + MTK_FUNCTION(1, "DMIC1_CLK"), + MTK_FUNCTION(3, "SPI3_A_MO"), + MTK_FUNCTION(4, "FMI2S_B_LRCK"), + MTK_FUNCTION(5, "SCP_DMIC1_CLK"), + MTK_FUNCTION(6, "TP_GPIO14_AO") + ), + MTK_PIN( + 1, "GPIO1", + MTK_EINT_FUNCTION(0, 1), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO1"), + MTK_FUNCTION(1, "DMIC1_DAT"), + MTK_FUNCTION(2, "SRCLKENAI1"), + MTK_FUNCTION(3, "SPI3_A_MI"), + MTK_FUNCTION(4, "FMI2S_B_DI"), + MTK_FUNCTION(5, "SCP_DMIC1_DAT"), + MTK_FUNCTION(6, "TP_GPIO15_AO") + ), + MTK_PIN( + 2, "GPIO2", + MTK_EINT_FUNCTION(0, 2), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO2"), + MTK_FUNCTION(1, "PWM_VLP"), + MTK_FUNCTION(2, "DSI_HSYNC"), + MTK_FUNCTION(5, "RG_TSFDC_LDO_EN"), + MTK_FUNCTION(6, "TP_GPIO8_AO") + ), + MTK_PIN( + 3, "GPIO3", + MTK_EINT_FUNCTION(0, 3), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO3"), + MTK_FUNCTION(1, "MD_INT0"), + MTK_FUNCTION(2, "DSI1_HSYNC"), + MTK_FUNCTION(5, "DA_TSFDC_LDO_MODE"), + MTK_FUNCTION(6, "TP_GPIO9_AO") + ), + MTK_PIN( + 4, "GPIO4", + MTK_EINT_FUNCTION(0, 4), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO4"), + MTK_FUNCTION(1, "DISP_PWM1"), + MTK_FUNCTION(2, "MD32_0_GPIO0") + ), + MTK_PIN( + 5, "GPIO5", + MTK_EINT_FUNCTION(0, 5), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO5"), + MTK_FUNCTION(1, "LCM1_RST"), + MTK_FUNCTION(2, "SPI7_A_CLK") + ), + MTK_PIN( + 6, "GPIO6", + MTK_EINT_FUNCTION(0, 6), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO6"), + MTK_FUNCTION(1, "DSI1_TE"), + MTK_FUNCTION(2, "SPI7_A_CSB") + ), + MTK_PIN( + 7, "GPIO7", + MTK_EINT_FUNCTION(0, 7), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO7"), + MTK_FUNCTION(2, "SPI7_A_MO"), + MTK_FUNCTION(3, "GPS_PPS0") + ), + MTK_PIN( + 8, "GPIO8", + MTK_EINT_FUNCTION(0, 8), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO8"), + MTK_FUNCTION(2, "SPI7_A_MI"), + MTK_FUNCTION(3, "EDP_TX_HPD") + ), + MTK_PIN( + 9, "GPIO9", + MTK_EINT_FUNCTION(0, 9), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO9"), + MTK_FUNCTION(3, "I2SIN1_LRCK"), + MTK_FUNCTION(7, "RG_TSFDC_LDO_REFSEL0") + ), + MTK_PIN( + 10, "GPIO10", + MTK_EINT_FUNCTION(0, 10), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO10"), + MTK_FUNCTION(3, "I2SOUT1_DO"), + MTK_FUNCTION(7, "RG_TSFDC_LDO_REFSEL1") + ), + MTK_PIN( + 11, "GPIO11", + MTK_EINT_FUNCTION(0, 11), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO11"), + MTK_FUNCTION(4, "FMI2S_B_BCK"), + MTK_FUNCTION(7, "DBG_MON_A30") + ), + MTK_PIN( + 12, "GPIO12", + MTK_EINT_FUNCTION(0, 12), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO12"), + MTK_FUNCTION(3, "I2SIN1_DI_B") + ), + MTK_PIN( + 13, "GPIO13", + MTK_EINT_FUNCTION(0, 13), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO13"), + MTK_FUNCTION(1, "EDP_TX_HPD"), + MTK_FUNCTION(2, "GPS_PPS1") + ), + MTK_PIN( + 14, "GPIO14", + MTK_EINT_FUNCTION(0, 14), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO14"), + MTK_FUNCTION(1, "SRCLKENA2"), + MTK_FUNCTION(2, "DSI2_TE"), + MTK_FUNCTION(3, "SPMI_P_TRIG_FLAG"), + MTK_FUNCTION(5, "MD_INT3"), + MTK_FUNCTION(6, "TP_GPIO8_AO") + ), + MTK_PIN( + 15, "GPIO15", + MTK_EINT_FUNCTION(0, 15), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO15"), + MTK_FUNCTION(1, "SRCLKENAI0"), + MTK_FUNCTION(2, "SPMI_M_TRIG_FLAG"), + MTK_FUNCTION(3, "UCTS0"), + MTK_FUNCTION(4, "MD_INT4"), + MTK_FUNCTION(5, "I2SOUT2_DO"), + MTK_FUNCTION(6, "TP_GPIO9_AO") + ), + MTK_PIN( + 16, "GPIO16", + MTK_EINT_FUNCTION(0, 16), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO16"), + MTK_FUNCTION(1, "SRCLKENAI1"), + MTK_FUNCTION(2, "DP_TX_HPD"), + MTK_FUNCTION(3, "URTS0"), + MTK_FUNCTION(4, "GPS_L5_ELNA_EN"), + MTK_FUNCTION(5, "KPROW2"), + MTK_FUNCTION(6, "TP_GPIO10_AO") + ), + MTK_PIN( + 17, "GPIO17", + MTK_EINT_FUNCTION(0, 17), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO17"), + MTK_FUNCTION(1, "MD_INT0"), + MTK_FUNCTION(2, "DP_OC_EN"), + MTK_FUNCTION(3, "UCTS1"), + MTK_FUNCTION(4, "MD_NTN_URXD1"), + MTK_FUNCTION(5, "KPCOL2"), + MTK_FUNCTION(6, "TP_GPIO11_AO") + ), + MTK_PIN( + 18, "GPIO18", + MTK_EINT_FUNCTION(0, 18), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO18"), + MTK_FUNCTION(1, "DMIC1_CLK"), + MTK_FUNCTION(2, "DP_RAUX_SBU1"), + MTK_FUNCTION(3, "URTS1"), + MTK_FUNCTION(4, "MD_NTN_UTXD1"), + MTK_FUNCTION(5, "I2SIN2_DI"), + MTK_FUNCTION(6, "TP_UTXD_GNSS_VLP") + ), + MTK_PIN( + 19, "GPIO19", + MTK_EINT_FUNCTION(0, 19), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO19"), + MTK_FUNCTION(1, "DMIC1_DAT"), + MTK_FUNCTION(2, "DP_RAUX_SBU2"), + MTK_FUNCTION(3, "CONN_TCXOENA_REQ"), + MTK_FUNCTION(4, "CLKM3_A"), + MTK_FUNCTION(5, "I2SIN2_BCK"), + MTK_FUNCTION(6, "TP_URXD_GNSS_VLP") + ), + MTK_PIN( + 20, "GPIO20", + MTK_EINT_FUNCTION(0, 20), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO20"), + MTK_FUNCTION(1, "IDDIG"), + MTK_FUNCTION(2, "LCM2_RST"), + MTK_FUNCTION(3, "GPS_PPS1"), + MTK_FUNCTION(4, "CLKM2_A") + ), + MTK_PIN( + 21, "GPIO21", + MTK_EINT_FUNCTION(0, 21), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO21"), + MTK_FUNCTION(1, "BPI_BUS11"), + MTK_FUNCTION(2, "PCIE_PERSTN_1P"), + MTK_FUNCTION(3, "DSI1_TE"), + MTK_FUNCTION(4, "DMIC_CLK"), + MTK_FUNCTION(5, "SCP_DMIC_CLK") + ), + MTK_PIN( + 22, "GPIO22", + MTK_EINT_FUNCTION(0, 22), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO22"), + MTK_FUNCTION(1, "BPI_BUS12"), + MTK_FUNCTION(2, "PCIE_CLKREQN_1P"), + MTK_FUNCTION(3, "DSI2_TE"), + MTK_FUNCTION(4, "DMIC_DAT"), + MTK_FUNCTION(5, "SCP_DMIC_DAT") + ), + MTK_PIN( + 23, "GPIO23", + MTK_EINT_FUNCTION(0, 23), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO23"), + MTK_FUNCTION(1, "BPI_BUS13"), + MTK_FUNCTION(2, "PCIE_WAKEN_1P"), + MTK_FUNCTION(3, "DSI3_TE"), + MTK_FUNCTION(4, "DMIC1_CLK"), + MTK_FUNCTION(5, "SCP_DMIC1_CLK") + ), + MTK_PIN( + 24, "GPIO24", + MTK_EINT_FUNCTION(0, 24), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO24"), + MTK_FUNCTION(1, "BPI_BUS14"), + MTK_FUNCTION(2, "LCM1_RST"), + MTK_FUNCTION(3, "AGPS_SYNC"), + MTK_FUNCTION(4, "DMIC1_DAT"), + MTK_FUNCTION(5, "SCP_DMIC1_DAT"), + MTK_FUNCTION(6, "DISP_PWM1") + ), + MTK_PIN( + 25, "GPIO25", + MTK_EINT_FUNCTION(0, 25), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO25"), + MTK_FUNCTION(1, "BPI_BUS15"), + MTK_FUNCTION(2, "LCM2_RST"), + MTK_FUNCTION(3, "SRCLKENAI1"), + MTK_FUNCTION(4, "DMIC2_CLK"), + MTK_FUNCTION(6, "DISP_PWM2") + ), + MTK_PIN( + 26, "GPIO26", + MTK_EINT_FUNCTION(0, 26), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO26"), + MTK_FUNCTION(1, "BPI_BUS16"), + MTK_FUNCTION(2, "LCM3_RST"), + MTK_FUNCTION(4, "DMIC2_DAT"), + MTK_FUNCTION(6, "DISP_PWM3") + ), + MTK_PIN( + 27, "GPIO27", + MTK_EINT_FUNCTION(0, 27), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO27"), + MTK_FUNCTION(1, "BPI_BUS17"), + MTK_FUNCTION(2, "UTXD4"), + MTK_FUNCTION(6, "DISP_PWM4"), + MTK_FUNCTION(7, "DBG_MON_A20") + ), + MTK_PIN( + 28, "GPIO28", + MTK_EINT_FUNCTION(0, 28), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO28"), + MTK_FUNCTION(1, "BPI_BUS18"), + MTK_FUNCTION(2, "URXD4"), + MTK_FUNCTION(3, "SPI2_A_MI"), + MTK_FUNCTION(4, "CLKM0_A"), + MTK_FUNCTION(7, "DBG_MON_A21") + ), + MTK_PIN( + 29, "GPIO29", + MTK_EINT_FUNCTION(0, 29), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO29"), + MTK_FUNCTION(1, "BPI_BUS19"), + MTK_FUNCTION(2, "MD_NTN_UTXD1"), + MTK_FUNCTION(3, "SPI2_A_MO"), + MTK_FUNCTION(4, "CLKM1_A"), + MTK_FUNCTION(6, "UCTS4"), + MTK_FUNCTION(7, "DBG_MON_A17") + ), + MTK_PIN( + 30, "GPIO30", + MTK_EINT_FUNCTION(0, 30), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO30"), + MTK_FUNCTION(1, "BPI_BUS20"), + MTK_FUNCTION(2, "MD_NTN_URXD1"), + MTK_FUNCTION(3, "SPI2_A_CLK"), + MTK_FUNCTION(4, "CLKM2_A"), + MTK_FUNCTION(5, "DSI3_HSYNC"), + MTK_FUNCTION(6, "URTS4"), + MTK_FUNCTION(7, "DBG_MON_A18") + ), + MTK_PIN( + 31, "GPIO31", + MTK_EINT_FUNCTION(0, 31), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO31"), + MTK_FUNCTION(1, "BPI_BUS21"), + MTK_FUNCTION(3, "SPI2_A_CSB"), + MTK_FUNCTION(4, "CLKM3_A"), + MTK_FUNCTION(6, "EDP_TX_HPD"), + MTK_FUNCTION(7, "DBG_MON_A19") + ), + MTK_PIN( + 32, "GPIO32", + MTK_EINT_FUNCTION(0, 32), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO32"), + MTK_FUNCTION(1, "LCM4_RST"), + MTK_FUNCTION(2, "DP_TX_HPD"), + MTK_FUNCTION(3, "SSPM_JTAG_TCK_VLP"), + MTK_FUNCTION(4, "ADSP_JTAG0_TCK"), + MTK_FUNCTION(5, "SCP_JTAG0_TCK_VLP"), + MTK_FUNCTION(6, "SPU0_TCK"), + MTK_FUNCTION(7, "IO_JTAG_TCK") + ), + MTK_PIN( + 33, "GPIO33", + MTK_EINT_FUNCTION(0, 33), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO33"), + MTK_FUNCTION(1, "DSI4_TE"), + MTK_FUNCTION(2, "DP_OC_EN"), + MTK_FUNCTION(3, "SSPM_JTAG_TRSTN_VLP"), + MTK_FUNCTION(4, "ADSP_JTAG0_TRSTN"), + MTK_FUNCTION(5, "SCP_JTAG0_TRSTN_VLP"), + MTK_FUNCTION(6, "SPU0_NTRST"), + MTK_FUNCTION(7, "IO_JTAG_TRSTN") + ), + MTK_PIN( + 34, "GPIO34", + MTK_EINT_FUNCTION(0, 34), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO34"), + MTK_FUNCTION(1, "UCTS5"), + MTK_FUNCTION(2, "DP_RAUX_SBU1"), + MTK_FUNCTION(3, "SSPM_JTAG_TDI_VLP"), + MTK_FUNCTION(4, "ADSP_JTAG0_TDI"), + MTK_FUNCTION(5, "SCP_JTAG0_TDI_VLP"), + MTK_FUNCTION(6, "SPU0_TDI"), + MTK_FUNCTION(7, "IO_JTAG_TDI") + ), + MTK_PIN( + 35, "GPIO35", + MTK_EINT_FUNCTION(0, 35), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO35"), + MTK_FUNCTION(1, "URTS5"), + MTK_FUNCTION(2, "DP_RAUX_SBU2"), + MTK_FUNCTION(3, "SSPM_JTAG_TDO_VLP"), + MTK_FUNCTION(4, "ADSP_JTAG0_TDO"), + MTK_FUNCTION(5, "SCP_JTAG0_TDO_VLP"), + MTK_FUNCTION(6, "SPU0_TDO"), + MTK_FUNCTION(7, "IO_JTAG_TDO") + ), + MTK_PIN( + 36, "GPIO36", + MTK_EINT_FUNCTION(0, 36), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO36"), + MTK_FUNCTION(1, "UTXD5"), + MTK_FUNCTION(3, "SSPM_JTAG_TMS_VLP"), + MTK_FUNCTION(4, "ADSP_JTAG0_TMS"), + MTK_FUNCTION(5, "SCP_JTAG0_TMS_VLP"), + MTK_FUNCTION(6, "SPU0_TMS"), + MTK_FUNCTION(7, "IO_JTAG_TMS") + ), + MTK_PIN( + 37, "GPIO37", + MTK_EINT_FUNCTION(0, 37), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO37"), + MTK_FUNCTION(1, "URXD5"), + MTK_FUNCTION(3, "MD_INT3"), + MTK_FUNCTION(4, "CLKM0_B"), + MTK_FUNCTION(5, "TP_GPIO5_AO"), + MTK_FUNCTION(6, "SPU0_UTX"), + MTK_FUNCTION(7, "DAP_MD32_SWCK") + ), + MTK_PIN( + 38, "GPIO38", + MTK_EINT_FUNCTION(0, 38), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO38"), + MTK_FUNCTION(2, "SPMI_P_TRIG_FLAG"), + MTK_FUNCTION(3, "MD_INT4"), + MTK_FUNCTION(4, "CLKM1_B"), + MTK_FUNCTION(5, "TP_GPIO6_AO"), + MTK_FUNCTION(6, "SPU0_URX"), + MTK_FUNCTION(7, "DAP_MD32_SWD") + ), + MTK_PIN( + 39, "GPIO39", + MTK_EINT_FUNCTION(0, 39), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO39"), + MTK_FUNCTION(1, "I2S_MCK0"), + MTK_FUNCTION(3, "GPS_PPS0"), + MTK_FUNCTION(4, "CONN_TCXOENA_REQ"), + MTK_FUNCTION(7, "DBG_MON_B12") + ), + MTK_PIN( + 40, "GPIO40", + MTK_EINT_FUNCTION(0, 40), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO40"), + MTK_FUNCTION(1, "I2SIN6_0_BCK"), + MTK_FUNCTION(3, "SPI4_B_CLK"), + MTK_FUNCTION(4, "UCTS2"), + MTK_FUNCTION(5, "CCU1_UTXD"), + MTK_FUNCTION(7, "DBG_MON_B13") + ), + MTK_PIN( + 41, "GPIO41", + MTK_EINT_FUNCTION(0, 41), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO41"), + MTK_FUNCTION(1, "I2SIN6_0_LRCK"), + MTK_FUNCTION(3, "SPI4_B_CSB"), + MTK_FUNCTION(4, "URTS2"), + MTK_FUNCTION(5, "CCU1_URXD"), + MTK_FUNCTION(7, "DBG_MON_B14") + ), + MTK_PIN( + 42, "GPIO42", + MTK_EINT_FUNCTION(0, 42), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO42"), + MTK_FUNCTION(1, "I2SIN6_0_DI"), + MTK_FUNCTION(3, "SPI4_B_MI"), + MTK_FUNCTION(4, "URXD2"), + MTK_FUNCTION(5, "CCU1_URTS"), + MTK_FUNCTION(6, "MD32_0_RXD"), + MTK_FUNCTION(7, "DBG_MON_B15") + ), + MTK_PIN( + 43, "GPIO43", + MTK_EINT_FUNCTION(0, 43), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO43"), + MTK_FUNCTION(1, "I2SOUT6_0_DO"), + MTK_FUNCTION(3, "SPI4_B_MO"), + MTK_FUNCTION(4, "UTXD2"), + MTK_FUNCTION(5, "CCU1_UCTS"), + MTK_FUNCTION(6, "MD32_0_TXD"), + MTK_FUNCTION(7, "DBG_MON_B16") + ), + MTK_PIN( + 44, "GPIO44", + MTK_EINT_FUNCTION(0, 44), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO44"), + MTK_FUNCTION(1, "MD_INT1_C2K_UIM0_HOT_PLUG"), + MTK_FUNCTION(3, "SPI3_A_CLK"), + MTK_FUNCTION(6, "TP_GPIO10_AO") + ), + MTK_PIN( + 45, "GPIO45", + MTK_EINT_FUNCTION(0, 45), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO45"), + MTK_FUNCTION(1, "MD_INT2_C2K_UIM1_HOT_PLUG"), + MTK_FUNCTION(2, "DSI2_HSYNC"), + MTK_FUNCTION(3, "SPI3_A_CSB"), + MTK_FUNCTION(4, "PWM_VLP"), + MTK_FUNCTION(6, "TP_GPIO11_AO") + ), + MTK_PIN( + 46, "GPIO46", + MTK_EINT_FUNCTION(0, 46), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO46"), + MTK_FUNCTION(1, "SCP_SCL4"), + MTK_FUNCTION(2, "PWM_VLP"), + MTK_FUNCTION(4, "SCP_ILDO_DTEST1_VLP"), + MTK_FUNCTION(5, "UFS_MPHY_SCL"), + MTK_FUNCTION(6, "TP_GPIO0_AO") + ), + MTK_PIN( + 47, "GPIO47", + MTK_EINT_FUNCTION(0, 47), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO47"), + MTK_FUNCTION(1, "SCP_SDA4"), + MTK_FUNCTION(4, "SCP_ILDO_DTEST2_VLP"), + MTK_FUNCTION(5, "UFS_MPHY_SDA"), + MTK_FUNCTION(6, "TP_GPIO1_AO") + ), + MTK_PIN( + 48, "GPIO48", + MTK_EINT_FUNCTION(0, 48), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO48"), + MTK_FUNCTION(1, "SCP_SCL5"), + MTK_FUNCTION(2, "PWM_VLP"), + MTK_FUNCTION(3, "CCU0_UTXD"), + MTK_FUNCTION(4, "SCP_ILDO_DTEST3_VLP"), + MTK_FUNCTION(6, "TP_GPIO2_AO") + ), + MTK_PIN( + 49, "GPIO49", + MTK_EINT_FUNCTION(0, 49), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO49"), + MTK_FUNCTION(1, "SCP_SDA5"), + MTK_FUNCTION(3, "CCU0_URXD"), + MTK_FUNCTION(4, "SCP_ILDO_DTEST4_VLP"), + MTK_FUNCTION(6, "TP_GPIO3_AO") + ), + MTK_PIN( + 50, "GPIO50", + MTK_EINT_FUNCTION(0, 50), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO50"), + MTK_FUNCTION(1, "SCP_SCL6"), + MTK_FUNCTION(2, "PWM_VLP"), + MTK_FUNCTION(3, "CCU0_URTS"), + MTK_FUNCTION(4, "DSI_HSYNC"), + MTK_FUNCTION(6, "TP_GPIO4_AO") + ), + MTK_PIN( + 51, "GPIO51", + MTK_EINT_FUNCTION(0, 51), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO51"), + MTK_FUNCTION(1, "SCP_SDA6"), + MTK_FUNCTION(3, "CCU0_UCTS"), + MTK_FUNCTION(4, "DSI1_HSYNC"), + MTK_FUNCTION(6, "TP_GPIO5_AO") + ), + MTK_PIN( + 52, "GPIO52", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO52"), + MTK_FUNCTION(1, "SCP_SCL1"), + MTK_FUNCTION(3, "TDM_DATA2") + ), + MTK_PIN( + 53, "GPIO53", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO53"), + MTK_FUNCTION(1, "SCP_SDA1"), + MTK_FUNCTION(3, "TDM_DATA3") + ), + MTK_PIN( + 54, "GPIO54", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO54"), + MTK_FUNCTION(1, "AUD_CLK_MOSI"), + MTK_FUNCTION(3, "TDM_MCK") + ), + MTK_PIN( + 55, "GPIO55", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO55"), + MTK_FUNCTION(1, "AUD_CLK_MISO"), + MTK_FUNCTION(2, "I2SOUT2_BCK"), + MTK_FUNCTION(3, "TDM_BCK") + ), + MTK_PIN( + 56, "GPIO56", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO56"), + MTK_FUNCTION(1, "AUD_DAT_MOSI0"), + MTK_FUNCTION(2, "I2SOUT2_LRCK"), + MTK_FUNCTION(3, "TDM_LRCK") + ), + MTK_PIN( + 57, "GPIO57", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO57"), + MTK_FUNCTION(1, "AUD_DAT_MOSI1"), + MTK_FUNCTION(2, "I2SOUT2_DO"), + MTK_FUNCTION(3, "TDM_DATA0") + ), + MTK_PIN( + 58, "GPIO58", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO58"), + MTK_FUNCTION(1, "AUD_DAT_MISO0"), + MTK_FUNCTION(3, "TDM_DATA1") + ), + MTK_PIN( + 59, "GPIO59", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO59"), + MTK_FUNCTION(1, "AUD_DAT_MISO1"), + MTK_FUNCTION(3, "I2SIN1_BCK") + ), + MTK_PIN( + 60, "GPIO60", + MTK_EINT_FUNCTION(0, 60), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO60"), + MTK_FUNCTION(1, "KPCOL0"), + MTK_FUNCTION(6, "TP_GPIO13_AO") + ), + MTK_PIN( + 61, "GPIO61", + MTK_EINT_FUNCTION(0, 61), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO61"), + MTK_FUNCTION(1, "MCU_M_PMIC_POC_I") + ), + MTK_PIN( + 62, "GPIO62", + MTK_EINT_FUNCTION(0, 62), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO62"), + MTK_FUNCTION(1, "MCU_B_PMIC_POC_I") + ), + MTK_PIN( + 63, "GPIO63", + MTK_EINT_FUNCTION(0, 63), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO63"), + MTK_FUNCTION(1, "MFG_PMIC_POC_I") + ), + MTK_PIN( + 64, "GPIO64", + MTK_EINT_FUNCTION(0, 64), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO64"), + MTK_FUNCTION(1, "PRE_UVLO") + ), + MTK_PIN( + 65, "GPIO65", + MTK_EINT_FUNCTION(0, 65), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO65"), + MTK_FUNCTION(1, "DPM2PMIC"), + MTK_FUNCTION(2, "SRCLKENA1") + ), + MTK_PIN( + 66, "GPIO66", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO66"), + MTK_FUNCTION(1, "WATCHDOG") + ), + MTK_PIN( + 67, "GPIO67", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO67"), + MTK_FUNCTION(1, "SRCLKENA0") + ), + MTK_PIN( + 68, "GPIO68", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO68"), + MTK_FUNCTION(1, "SCP_VREQ_VAO") + ), + MTK_PIN( + 69, "GPIO69", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO69"), + MTK_FUNCTION(1, "RTC32K_CK") + ), + MTK_PIN( + 70, "GPIO70", + MTK_EINT_FUNCTION(0, 70), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO70"), + MTK_FUNCTION(1, "CMFLASH0") + ), + MTK_PIN( + 71, "GPIO71", + MTK_EINT_FUNCTION(0, 71), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO71") + ), + MTK_PIN( + 72, "GPIO72", + MTK_EINT_FUNCTION(0, 72), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO72") + ), + MTK_PIN( + 73, "GPIO73", + MTK_EINT_FUNCTION(0, 73), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO73") + ), + MTK_PIN( + 74, "GPIO74", + MTK_EINT_FUNCTION(0, 74), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO74"), + MTK_FUNCTION(1, "DCXO_FPM_LPM") + ), + MTK_PIN( + 75, "GPIO75", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO75"), + MTK_FUNCTION(1, "SPMI_M_SCL") + ), + MTK_PIN( + 76, "GPIO76", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO76"), + MTK_FUNCTION(1, "SPMI_M_SDA") + ), + MTK_PIN( + 77, "GPIO77", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO77"), + MTK_FUNCTION(1, "SPMI_P_SCL") + ), + MTK_PIN( + 78, "GPIO78", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO78"), + MTK_FUNCTION(1, "SPMI_P_SDA") + ), + MTK_PIN( + 79, "GPIO79", + MTK_EINT_FUNCTION(0, 79), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO79"), + MTK_FUNCTION(1, "CMMCLK0"), + MTK_FUNCTION(2, "MD_INT4") + ), + MTK_PIN( + 80, "GPIO80", + MTK_EINT_FUNCTION(0, 80), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO80"), + MTK_FUNCTION(1, "CMMCLK1") + ), + MTK_PIN( + 81, "GPIO81", + MTK_EINT_FUNCTION(0, 81), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO81"), + MTK_FUNCTION(1, "SCP_SPI0_CK"), + MTK_FUNCTION(2, "SPI6_B_CLK"), + MTK_FUNCTION(3, "PWM_VLP"), + MTK_FUNCTION(4, "I2SOUT5_BCK"), + MTK_FUNCTION(6, "TP_GPIO0_AO") + ), + MTK_PIN( + 82, "GPIO82", + MTK_EINT_FUNCTION(0, 82), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO82"), + MTK_FUNCTION(1, "SCP_SPI0_CS"), + MTK_FUNCTION(2, "SPI6_B_CSB"), + MTK_FUNCTION(4, "I2SOUT5_LRCK"), + MTK_FUNCTION(6, "TP_GPIO1_AO") + ), + MTK_PIN( + 83, "GPIO83", + MTK_EINT_FUNCTION(0, 83), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO83"), + MTK_FUNCTION(1, "SCP_SPI0_MO"), + MTK_FUNCTION(2, "SPI6_B_MO"), + MTK_FUNCTION(4, "I2SOUT5_DATA0"), + MTK_FUNCTION(6, "TP_GPIO2_AO") + ), + MTK_PIN( + 84, "GPIO84", + MTK_EINT_FUNCTION(0, 84), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO84"), + MTK_FUNCTION(1, "SCP_SPI0_MI"), + MTK_FUNCTION(2, "SPI6_B_MI"), + MTK_FUNCTION(4, "I2SOUT5_DATA1"), + MTK_FUNCTION(6, "TP_GPIO3_AO") + ), + MTK_PIN( + 85, "GPIO85", + MTK_EINT_FUNCTION(0, 85), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO85"), + MTK_FUNCTION(1, "SCP_SPI1_CK"), + MTK_FUNCTION(2, "SPI7_B_CLK"), + MTK_FUNCTION(4, "I2SIN5_DATA0"), + MTK_FUNCTION(5, "PWM_VLP"), + MTK_FUNCTION(6, "TP_GPIO4_AO") + ), + MTK_PIN( + 86, "GPIO86", + MTK_EINT_FUNCTION(0, 86), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO86"), + MTK_FUNCTION(1, "SCP_SPI1_CS"), + MTK_FUNCTION(2, "SPI7_B_CSB"), + MTK_FUNCTION(4, "I2SIN5_DATA1"), + MTK_FUNCTION(6, "TP_GPIO5_AO") + ), + MTK_PIN( + 87, "GPIO87", + MTK_EINT_FUNCTION(0, 87), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO87"), + MTK_FUNCTION(1, "SCP_SPI1_MO"), + MTK_FUNCTION(2, "SPI7_B_MO"), + MTK_FUNCTION(4, "I2SIN5_BCK"), + MTK_FUNCTION(6, "TP_GPIO6_AO") + ), + MTK_PIN( + 88, "GPIO88", + MTK_EINT_FUNCTION(0, 88), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO88"), + MTK_FUNCTION(1, "SCP_SPI1_MI"), + MTK_FUNCTION(2, "SPI7_B_MI"), + MTK_FUNCTION(4, "I2SIN5_LRCK"), + MTK_FUNCTION(6, "TP_GPIO7_AO") + ), + MTK_PIN( + 89, "GPIO89", + MTK_EINT_FUNCTION(0, 89), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO89"), + MTK_FUNCTION(1, "DSI_TE"), + MTK_FUNCTION(2, "DSI1_TE"), + MTK_FUNCTION(7, "DBG_MON_B30") + ), + MTK_PIN( + 90, "GPIO90", + MTK_EINT_FUNCTION(0, 90), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO90"), + MTK_FUNCTION(1, "LCM_RST"), + MTK_FUNCTION(2, "LCM1_RST"), + MTK_FUNCTION(7, "DBG_MON_B31") + ), + MTK_PIN( + 91, "GPIO91", + MTK_EINT_FUNCTION(0, 91), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO91"), + MTK_FUNCTION(1, "CMFLASH2"), + MTK_FUNCTION(2, "SF_D0"), + MTK_FUNCTION(3, "SRCLKENAI1"), + MTK_FUNCTION(5, "KPCOL2"), + MTK_FUNCTION(6, "TP_GPIO11_AO") + ), + MTK_PIN( + 92, "GPIO92", + MTK_EINT_FUNCTION(0, 92), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO92"), + MTK_FUNCTION(1, "CMFLASH3"), + MTK_FUNCTION(2, "SF_D1"), + MTK_FUNCTION(4, "DISP_PWM1"), + MTK_FUNCTION(6, "TP_GPIO12_AO") + ), + MTK_PIN( + 93, "GPIO93", + MTK_EINT_FUNCTION(0, 93), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO93"), + MTK_FUNCTION(1, "CMFLASH1"), + MTK_FUNCTION(2, "SF_D2"), + MTK_FUNCTION(3, "SRCLKENAI0"), + MTK_FUNCTION(5, "KPROW2"), + MTK_FUNCTION(6, "TP_GPIO13_AO") + ), + MTK_PIN( + 94, "GPIO94", + MTK_EINT_FUNCTION(0, 94), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO94"), + MTK_FUNCTION(1, "I2S_MCK1"), + MTK_FUNCTION(2, "SF_D3"), + MTK_FUNCTION(4, "MD32_0_GPIO0"), + MTK_FUNCTION(5, "CLKM0_A"), + MTK_FUNCTION(6, "TP_GPIO14_AO"), + MTK_FUNCTION(7, "DBG_MON_B18") + ), + MTK_PIN( + 95, "GPIO95", + MTK_EINT_FUNCTION(0, 95), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO95"), + MTK_FUNCTION(1, "I2SIN1_BCK"), + MTK_FUNCTION(2, "I2SIN4_BCK"), + MTK_FUNCTION(3, "SPI6_A_CLK"), + MTK_FUNCTION(4, "MD32_1_GPIO0"), + MTK_FUNCTION(5, "CLKM1_A"), + MTK_FUNCTION(6, "TP_GPIO15_AO"), + MTK_FUNCTION(7, "DBG_MON_B19") + ), + MTK_PIN( + 96, "GPIO96", + MTK_EINT_FUNCTION(0, 96), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO96"), + MTK_FUNCTION(1, "I2SIN1_LRCK"), + MTK_FUNCTION(2, "I2SIN4_LRCK"), + MTK_FUNCTION(3, "SPI6_A_CSB"), + MTK_FUNCTION(4, "MD32_2_GPIO0"), + MTK_FUNCTION(5, "CLKM2_A"), + MTK_FUNCTION(7, "DBG_MON_B20") + ), + MTK_PIN( + 97, "GPIO97", + MTK_EINT_FUNCTION(0, 97), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO97"), + MTK_FUNCTION(1, "I2SIN1_DI_A"), + MTK_FUNCTION(2, "I2SIN4_DATA0"), + MTK_FUNCTION(3, "SPI6_A_MO"), + MTK_FUNCTION(4, "MD32_3_GPIO0"), + MTK_FUNCTION(5, "CLKM3_A"), + MTK_FUNCTION(7, "DBG_MON_B21") + ), + MTK_PIN( + 98, "GPIO98", + MTK_EINT_FUNCTION(0, 98), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO98"), + MTK_FUNCTION(1, "I2SOUT1_DO"), + MTK_FUNCTION(2, "I2SOUT4_DATA0"), + MTK_FUNCTION(3, "SPI6_A_MI"), + MTK_FUNCTION(7, "DBG_MON_B22") + ), + MTK_PIN( + 99, "GPIO99", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO99"), + MTK_FUNCTION(1, "SCL0"), + MTK_FUNCTION(2, "LCM2_RST"), + MTK_FUNCTION(3, "AUD_DAC_26M_CLK"), + MTK_FUNCTION(4, "SPU0_SCL"), + MTK_FUNCTION(7, "DBG_MON_B24") + ), + MTK_PIN( + 100, "GPIO100", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO100"), + MTK_FUNCTION(1, "SDA0"), + MTK_FUNCTION(2, "DSI2_TE"), + MTK_FUNCTION(4, "SPU0_SDA"), + MTK_FUNCTION(7, "DBG_MON_B25") + ), + MTK_PIN( + 101, "GPIO101", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO101"), + MTK_FUNCTION(1, "SCL10"), + MTK_FUNCTION(2, "SF_CS"), + MTK_FUNCTION(3, "SCP_DMIC1_CLK"), + MTK_FUNCTION(4, "I2SIN5_DATA2"), + MTK_FUNCTION(5, "SCP_SCL_OIS"), + MTK_FUNCTION(6, "TP_GPIO10_AO"), + MTK_FUNCTION(7, "DBG_MON_B28") + ), + MTK_PIN( + 102, "GPIO102", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO102"), + MTK_FUNCTION(1, "SDA10"), + MTK_FUNCTION(2, "SF_CK"), + MTK_FUNCTION(3, "SCP_DMIC1_DAT"), + MTK_FUNCTION(4, "I2SIN5_DATA3"), + MTK_FUNCTION(5, "SCP_SDA_OIS"), + MTK_FUNCTION(6, "TP_GPIO11_AO"), + MTK_FUNCTION(7, "DBG_MON_B29") + ), + MTK_PIN( + 103, "GPIO103", + MTK_EINT_FUNCTION(0, 103), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO103"), + MTK_FUNCTION(1, "DISP_PWM"), + MTK_FUNCTION(2, "DSI1_TE"), + MTK_FUNCTION(5, "I2S_MCK0"), + MTK_FUNCTION(7, "DBG_MON_B23") + ), + MTK_PIN( + 104, "GPIO104", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO104"), + MTK_FUNCTION(1, "SCL6"), + MTK_FUNCTION(2, "SPU1_SCL"), + MTK_FUNCTION(3, "AUD_DAC_26M_CLK"), + MTK_FUNCTION(4, "USB_DRVVBUS_2P"), + MTK_FUNCTION(5, "I2S_MCK1"), + MTK_FUNCTION(6, "IDDIG_2P"), + MTK_FUNCTION(7, "DBG_MON_B26") + ), + MTK_PIN( + 105, "GPIO105", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO105"), + MTK_FUNCTION(1, "SDA6"), + MTK_FUNCTION(2, "SPU1_SDA"), + MTK_FUNCTION(3, "DISP_PWM2"), + MTK_FUNCTION(4, "VBUSVALID_2P"), + MTK_FUNCTION(5, "I2S_MCK2"), + MTK_FUNCTION(6, "VBUSVALID_3P"), + MTK_FUNCTION(7, "DBG_MON_B27") + ), + MTK_PIN( + 106, "GPIO106", + MTK_EINT_FUNCTION(0, 106), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO106"), + MTK_FUNCTION(1, "SCP_SPI3_CK"), + MTK_FUNCTION(2, "SPI3_B_CLK"), + MTK_FUNCTION(3, "MD_UTXD0"), + MTK_FUNCTION(4, "TP_UTXD1_VLP"), + MTK_FUNCTION(5, "CONN_BG_GPS_MCU_UART0_TXD"), + MTK_FUNCTION(6, "TP_GPIO6_AO"), + MTK_FUNCTION(7, "DBG_MON_B0") + ), + MTK_PIN( + 107, "GPIO107", + MTK_EINT_FUNCTION(0, 107), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO107"), + MTK_FUNCTION(1, "SCP_SPI3_CS"), + MTK_FUNCTION(2, "SPI3_B_CSB"), + MTK_FUNCTION(3, "MD_URXD0"), + MTK_FUNCTION(4, "TP_URXD1_VLP"), + MTK_FUNCTION(5, "CONN_BG_GPS_MCU_UART0_RXD"), + MTK_FUNCTION(6, "TP_GPIO7_AO"), + MTK_FUNCTION(7, "DBG_MON_B1") + ), + MTK_PIN( + 108, "GPIO108", + MTK_EINT_FUNCTION(0, 108), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO108"), + MTK_FUNCTION(1, "SCP_SPI3_MO"), + MTK_FUNCTION(2, "SPI3_B_MO"), + MTK_FUNCTION(3, "MD_UTXD1"), + MTK_FUNCTION(4, "MD32PCM_UTXD_AO_VLP"), + MTK_FUNCTION(5, "CONN_BG_GPS_MCU_UART1_TXD"), + MTK_FUNCTION(6, "TP_GPIO8_AO"), + MTK_FUNCTION(7, "DBG_MON_B2") + ), + MTK_PIN( + 109, "GPIO109", + MTK_EINT_FUNCTION(0, 109), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO109"), + MTK_FUNCTION(1, "SCP_SPI3_MI"), + MTK_FUNCTION(2, "SPI3_B_MI"), + MTK_FUNCTION(3, "MD_URXD1"), + MTK_FUNCTION(4, "MD32PCM_URXD_AO_VLP"), + MTK_FUNCTION(5, "CONN_BG_GPS_MCU_UART1_RXD"), + MTK_FUNCTION(6, "TP_GPIO9_AO"), + MTK_FUNCTION(7, "DBG_MON_B3") + ), + MTK_PIN( + 110, "GPIO110", + MTK_EINT_FUNCTION(0, 110), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO110"), + MTK_FUNCTION(1, "SPI1_CLK"), + MTK_FUNCTION(2, "PWM_0"), + MTK_FUNCTION(3, "MD_UCTS0"), + MTK_FUNCTION(4, "TP_UCTS1_VLP"), + MTK_FUNCTION(6, "SPU0_GPIO_O"), + MTK_FUNCTION(7, "DBG_MON_B4") + ), + MTK_PIN( + 111, "GPIO111", + MTK_EINT_FUNCTION(0, 111), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO111"), + MTK_FUNCTION(1, "SPI1_CSB"), + MTK_FUNCTION(2, "PWM_1"), + MTK_FUNCTION(3, "MD_URTS0"), + MTK_FUNCTION(4, "TP_URTS1_VLP"), + MTK_FUNCTION(6, "SPU0_GPIO_I"), + MTK_FUNCTION(7, "DBG_MON_B5") + ), + MTK_PIN( + 112, "GPIO112", + MTK_EINT_FUNCTION(0, 112), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO112"), + MTK_FUNCTION(1, "SPI1_MO"), + MTK_FUNCTION(2, "PWM_2"), + MTK_FUNCTION(3, "MD_UCTS1"), + MTK_FUNCTION(6, "SPU1_GPIO_O"), + MTK_FUNCTION(7, "DBG_MON_B6") + ), + MTK_PIN( + 113, "GPIO113", + MTK_EINT_FUNCTION(0, 113), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO113"), + MTK_FUNCTION(1, "SPI1_MI"), + MTK_FUNCTION(2, "PWM_3"), + MTK_FUNCTION(3, "MD_URTS1"), + MTK_FUNCTION(6, "SPU1_GPIO_I"), + MTK_FUNCTION(7, "DBG_MON_B7") + ), + MTK_PIN( + 114, "GPIO114", + MTK_EINT_FUNCTION(0, 114), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO114"), + MTK_FUNCTION(1, "SPI0_SPU_CLK"), + MTK_FUNCTION(2, "SPI4_A_CLK"), + MTK_FUNCTION(5, "CONN_BG_GPS_MCU_DBG_UART_TX"), + MTK_FUNCTION(7, "DBG_MON_B8") + ), + MTK_PIN( + 115, "GPIO115", + MTK_EINT_FUNCTION(0, 115), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO115"), + MTK_FUNCTION(1, "SPI0_SPU_CSB"), + MTK_FUNCTION(2, "SPI4_A_CSB"), + MTK_FUNCTION(7, "DBG_MON_B9") + ), + MTK_PIN( + 116, "GPIO116", + MTK_EINT_FUNCTION(0, 116), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO116"), + MTK_FUNCTION(1, "SPI0_SPU_MO"), + MTK_FUNCTION(2, "SPI4_A_MO"), + MTK_FUNCTION(3, "LCM1_RST"), + MTK_FUNCTION(7, "DBG_MON_B10") + ), + MTK_PIN( + 117, "GPIO117", + MTK_EINT_FUNCTION(0, 117), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO117"), + MTK_FUNCTION(1, "SPI0_SPU_MI"), + MTK_FUNCTION(2, "SPI4_A_MI"), + MTK_FUNCTION(3, "DSI1_TE"), + MTK_FUNCTION(7, "DBG_MON_B11") + ), + MTK_PIN( + 118, "GPIO118", + MTK_EINT_FUNCTION(0, 118), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO118"), + MTK_FUNCTION(1, "SPI5_CLK"), + MTK_FUNCTION(2, "USB_DRVVBUS"), + MTK_FUNCTION(3, "DP_TX_HPD"), + MTK_FUNCTION(4, "AD_ILDO_DTEST0") + ), + MTK_PIN( + 119, "GPIO119", + MTK_EINT_FUNCTION(0, 119), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO119"), + MTK_FUNCTION(1, "SPI5_CSB"), + MTK_FUNCTION(2, "VBUSVALID"), + MTK_FUNCTION(3, "DP_OC_EN"), + MTK_FUNCTION(4, "AD_ILDO_DTEST1") + ), + MTK_PIN( + 120, "GPIO120", + MTK_EINT_FUNCTION(0, 120), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO120"), + MTK_FUNCTION(1, "SPI5_MO"), + MTK_FUNCTION(2, "LCM2_RST"), + MTK_FUNCTION(3, "DP_RAUX_SBU1"), + MTK_FUNCTION(4, "AD_ILDO_DTEST2"), + MTK_FUNCTION(6, "IDDIG_3P") + ), + MTK_PIN( + 121, "GPIO121", + MTK_EINT_FUNCTION(0, 121), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO121"), + MTK_FUNCTION(1, "SPI5_MI"), + MTK_FUNCTION(2, "DSI2_TE"), + MTK_FUNCTION(3, "DP_RAUX_SBU2"), + MTK_FUNCTION(4, "AD_ILDO_DTEST3"), + MTK_FUNCTION(6, "USB_DRVVBUS_3P"), + MTK_FUNCTION(7, "DBG_MON_B17") + ), + MTK_PIN( + 122, "GPIO122", + MTK_EINT_FUNCTION(0, 122), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO122"), + MTK_FUNCTION(1, "AP_GOOD"), + MTK_FUNCTION(2, "CONN_TCXOENA_REQ") + ), + MTK_PIN( + 123, "GPIO123", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO123"), + MTK_FUNCTION(1, "SCL3"), + MTK_FUNCTION(5, "I2SIN2_LRCK"), + MTK_FUNCTION(6, "TP_UTXD_MD_VCORE") + ), + MTK_PIN( + 124, "GPIO124", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO124"), + MTK_FUNCTION(1, "SDA3"), + MTK_FUNCTION(6, "TP_URXD_MD_VCORE") + ), + MTK_PIN( + 125, "GPIO125", + MTK_EINT_FUNCTION(0, 125), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO125"), + MTK_FUNCTION(1, "MSDC1_CLK"), + MTK_FUNCTION(2, "MD1_SIM2_SCLK"), + MTK_FUNCTION(3, "HFRP_JTAG0_TCK"), + MTK_FUNCTION(4, "UDI_TCK"), + MTK_FUNCTION(5, "CONN_BGF_DSP_L1_JCK"), + MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TCK_VLP"), + MTK_FUNCTION(7, "JTCK2_SEL1") + ), + MTK_PIN( + 126, "GPIO126", + MTK_EINT_FUNCTION(0, 126), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO126"), + MTK_FUNCTION(1, "MSDC1_CMD"), + MTK_FUNCTION(3, "HFRP_JTAG0_TMS"), + MTK_FUNCTION(4, "UDI_TMS"), + MTK_FUNCTION(5, "CONN_BGF_DSP_L1_JMS"), + MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TMS_VLP"), + MTK_FUNCTION(7, "JTMS2_SEL1") + ), + MTK_PIN( + 127, "GPIO127", + MTK_EINT_FUNCTION(0, 127), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO127"), + MTK_FUNCTION(1, "MSDC1_DAT0"), + MTK_FUNCTION(2, "MD1_SIM2_SRST"), + MTK_FUNCTION(3, "HFRP_JTAG0_TDI"), + MTK_FUNCTION(4, "UDI_TDI_0"), + MTK_FUNCTION(5, "CONN_BGF_DSP_L1_JDI"), + MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TDI_VLP"), + MTK_FUNCTION(7, "JTDI2_SEL1") + ), + MTK_PIN( + 128, "GPIO128", + MTK_EINT_FUNCTION(0, 128), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO128"), + MTK_FUNCTION(1, "MSDC1_DAT1"), + MTK_FUNCTION(2, "MD1_SIM2_SIO"), + MTK_FUNCTION(3, "HFRP_JTAG0_TDO"), + MTK_FUNCTION(4, "UDI_TDO_0"), + MTK_FUNCTION(5, "CONN_BGF_DSP_L1_JDO"), + MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TDO_VLP"), + MTK_FUNCTION(7, "JTDO2_SEL1") + ), + MTK_PIN( + 129, "GPIO129", + MTK_EINT_FUNCTION(0, 129), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO129"), + MTK_FUNCTION(1, "MSDC1_DAT2"), + MTK_FUNCTION(2, "DSI2_HSYNC"), + MTK_FUNCTION(3, "HFRP_JTAG0_TRSTN"), + MTK_FUNCTION(4, "UDI_NTRST"), + MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TRSTN_VLP"), + MTK_FUNCTION(7, "JTRSTN2_SEL1") + ), + MTK_PIN( + 130, "GPIO130", + MTK_EINT_FUNCTION(0, 130), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO130"), + MTK_FUNCTION(1, "MSDC1_DAT3"), + MTK_FUNCTION(2, "DSI3_HSYNC"), + MTK_FUNCTION(5, "CONN_BGF_DSP_L1_JINTP") + ), + MTK_PIN( + 131, "GPIO131", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO131"), + MTK_FUNCTION(1, "MD1_SIM2_SCLK"), + MTK_FUNCTION(2, "MD1_SIM1_SCLK"), + MTK_FUNCTION(3, "MCUPM_JTAG_TDI"), + MTK_FUNCTION(4, "CLKM0_A"), + MTK_FUNCTION(5, "CONN_BGF_DSP_L5_JDI"), + MTK_FUNCTION(6, "TSFDC_SCK"), + MTK_FUNCTION(7, "SCP_JTAG0_TDI_VCORE") + ), + MTK_PIN( + 132, "GPIO132", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO132"), + MTK_FUNCTION(1, "MD1_SIM2_SRST"), + MTK_FUNCTION(2, "MD1_SIM1_SRST"), + MTK_FUNCTION(3, "MCUPM_JTAG_TMS"), + MTK_FUNCTION(4, "CLKM1_B"), + MTK_FUNCTION(5, "CONN_BGF_DSP_L5_JMS"), + MTK_FUNCTION(6, "TSFDC_SDI"), + MTK_FUNCTION(7, "SCP_JTAG0_TMS_VCORE") + ), + MTK_PIN( + 133, "GPIO133", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO133"), + MTK_FUNCTION(1, "MD1_SIM2_SIO"), + MTK_FUNCTION(2, "MD1_SIM1_SIO"), + MTK_FUNCTION(3, "MCUPM_JTAG_TDO"), + MTK_FUNCTION(5, "CONN_BGF_DSP_L5_JDO"), + MTK_FUNCTION(6, "TSFDC_SCF"), + MTK_FUNCTION(7, "SCP_JTAG0_TDO_VCORE") + ), + MTK_PIN( + 134, "GPIO134", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO134"), + MTK_FUNCTION(1, "MD1_SIM1_SCLK"), + MTK_FUNCTION(2, "MD1_SIM2_SCLK"), + MTK_FUNCTION(6, "TSFDC_26M") + ), + MTK_PIN( + 135, "GPIO135", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO135"), + MTK_FUNCTION(1, "MD1_SIM1_SRST"), + MTK_FUNCTION(2, "MD1_SIM2_SRST"), + MTK_FUNCTION(3, "MCUPM_JTAG_TCK"), + MTK_FUNCTION(5, "CONN_BGF_DSP_L5_JCK"), + MTK_FUNCTION(6, "TSFDC_SDO"), + MTK_FUNCTION(7, "SCP_JTAG0_TCK_VCORE") + ), + MTK_PIN( + 136, "GPIO136", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO136"), + MTK_FUNCTION(1, "MD1_SIM1_SIO"), + MTK_FUNCTION(2, "MD1_SIM2_SIO"), + MTK_FUNCTION(3, "MCUPM_JTAG_TRSTN"), + MTK_FUNCTION(5, "CONN_BGF_DSP_L5_JINTP"), + MTK_FUNCTION(6, "TSFDC_FOUT"), + MTK_FUNCTION(7, "SCP_JTAG0_TRSTN_VCORE") + ), + MTK_PIN( + 137, "GPIO137", + MTK_EINT_FUNCTION(0, 137), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO137"), + MTK_FUNCTION(1, "MIPI0_D_SCLK"), + MTK_FUNCTION(2, "BPI_BUS16"), + MTK_FUNCTION(4, "EXT_FRAME_SYNC"), + MTK_FUNCTION(6, "SPM_JTAG_TRSTN_VCORE"), + MTK_FUNCTION(7, "DBG_MON_A0") + ), + MTK_PIN( + 138, "GPIO138", + MTK_EINT_FUNCTION(0, 138), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO138"), + MTK_FUNCTION(1, "MIPI0_D_SDATA"), + MTK_FUNCTION(2, "BPI_BUS17"), + MTK_FUNCTION(4, "PCM0_LRCK"), + MTK_FUNCTION(6, "SPM_JTAG_TCK_VCORE"), + MTK_FUNCTION(7, "DBG_MON_A1") + ), + MTK_PIN( + 139, "GPIO139", + MTK_EINT_FUNCTION(0, 139), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO139"), + MTK_FUNCTION(1, "MIPI1_D_SCLK"), + MTK_FUNCTION(2, "BPI_BUS18"), + MTK_FUNCTION(4, "MD_GPS_BLANK"), + MTK_FUNCTION(6, "SPM_JTAG_TMS_VCORE"), + MTK_FUNCTION(7, "DBG_MON_A2") + ), + MTK_PIN( + 140, "GPIO140", + MTK_EINT_FUNCTION(0, 140), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO140"), + MTK_FUNCTION(1, "MIPI1_D_SDATA"), + MTK_FUNCTION(2, "BPI_BUS19"), + MTK_FUNCTION(4, "MD_URXD1_CONN"), + MTK_FUNCTION(6, "SPM_JTAG_TDO_VCORE"), + MTK_FUNCTION(7, "DBG_MON_A3") + ), + MTK_PIN( + 141, "GPIO141", + MTK_EINT_FUNCTION(0, 141), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO141"), + MTK_FUNCTION(1, "MIPI2_D_SCLK"), + MTK_FUNCTION(2, "BPI_BUS20"), + MTK_FUNCTION(4, "MD_UTXD1_CONN"), + MTK_FUNCTION(6, "SPM_JTAG_TDI_VCORE"), + MTK_FUNCTION(7, "DBG_MON_A4") + ), + MTK_PIN( + 142, "GPIO142", + MTK_EINT_FUNCTION(0, 142), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO142"), + MTK_FUNCTION(1, "MIPI2_D_SDATA"), + MTK_FUNCTION(2, "BPI_BUS21"), + MTK_FUNCTION(6, "SSPM_JTAG_TRSTN_VCORE"), + MTK_FUNCTION(7, "DBG_MON_A5") + ), + MTK_PIN( + 143, "GPIO143", + MTK_EINT_FUNCTION(0, 143), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO143"), + MTK_FUNCTION(1, "MIPI3_D_SCLK"), + MTK_FUNCTION(2, "BPI_BUS22"), + MTK_FUNCTION(4, "TP_UTXD_GNSS_VLP"), + MTK_FUNCTION(5, "MD_UTXD1_CONN"), + MTK_FUNCTION(6, "SSPM_JTAG_TCK_VCORE") + ), + MTK_PIN( + 144, "GPIO144", + MTK_EINT_FUNCTION(0, 144), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO144"), + MTK_FUNCTION(1, "MIPI3_D_SDATA"), + MTK_FUNCTION(2, "BPI_BUS23"), + MTK_FUNCTION(4, "TP_URXD_GNSS_VLP"), + MTK_FUNCTION(5, "MD_URXD1_CONN"), + MTK_FUNCTION(6, "SSPM_JTAG_TMS_VCORE") + ), + MTK_PIN( + 145, "GPIO145", + MTK_EINT_FUNCTION(0, 145), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO145"), + MTK_FUNCTION(1, "BPI_BUS0"), + MTK_FUNCTION(4, "PCIE_WAKEN_1P"), + MTK_FUNCTION(6, "SSPM_JTAG_TDO_VCORE"), + MTK_FUNCTION(7, "DBG_MON_A10") + ), + MTK_PIN( + 146, "GPIO146", + MTK_EINT_FUNCTION(0, 146), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO146"), + MTK_FUNCTION(1, "BPI_BUS1"), + MTK_FUNCTION(4, "PCIE_PERSTN_1P"), + MTK_FUNCTION(6, "SSPM_JTAG_TDI_VCORE"), + MTK_FUNCTION(7, "DBG_MON_A11") + ), + MTK_PIN( + 147, "GPIO147", + MTK_EINT_FUNCTION(0, 147), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO147"), + MTK_FUNCTION(1, "BPI_BUS2"), + MTK_FUNCTION(2, "AUD_DAC_26M_CLK"), + MTK_FUNCTION(4, "PCIE_CLKREQN_1P"), + MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TRSTN_VCORE"), + MTK_FUNCTION(7, "DBG_MON_A12") + ), + MTK_PIN( + 148, "GPIO148", + MTK_EINT_FUNCTION(0, 148), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO148"), + MTK_FUNCTION(1, "BPI_BUS3"), + MTK_FUNCTION(2, "AUD_DAC_26M_CLK"), + MTK_FUNCTION(4, "TP_UTXD_MD_VLP"), + MTK_FUNCTION(5, "TP_GPIO0_AO"), + MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TCK_VCORE"), + MTK_FUNCTION(7, "DBG_MON_A13") + ), + MTK_PIN( + 149, "GPIO149", + MTK_EINT_FUNCTION(0, 149), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO149"), + MTK_FUNCTION(1, "BPI_BUS4"), + MTK_FUNCTION(2, "EXT_FRAME_SYNC"), + MTK_FUNCTION(4, "TP_URXD_MD_VLP"), + MTK_FUNCTION(5, "TP_GPIO1_AO"), + MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TMS_VCORE"), + MTK_FUNCTION(7, "DBG_MON_A14") + ), + MTK_PIN( + 150, "GPIO150", + MTK_EINT_FUNCTION(0, 150), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO150"), + MTK_FUNCTION(1, "BPI_BUS5"), + MTK_FUNCTION(2, "GPS_PPS0"), + MTK_FUNCTION(5, "TP_GPIO2_AO"), + MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TDO_VCORE"), + MTK_FUNCTION(7, "DBG_MON_A15") + ), + MTK_PIN( + 151, "GPIO151", + MTK_EINT_FUNCTION(0, 151), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO151"), + MTK_FUNCTION(1, "BPI_BUS6"), + MTK_FUNCTION(2, "GPS_PPS1"), + MTK_FUNCTION(5, "TP_GPIO3_AO"), + MTK_FUNCTION(6, "SCP_JTAG_LITTLE_TDI_VCORE") + ), + MTK_PIN( + 152, "GPIO152", + MTK_EINT_FUNCTION(0, 152), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO152"), + MTK_FUNCTION(1, "BPI_BUS7"), + MTK_FUNCTION(2, "EDP_TX_HPD"), + MTK_FUNCTION(5, "AGPS_SYNC"), + MTK_FUNCTION(6, "SSPM_UTXD_AO_VCORE") + ), + MTK_PIN( + 153, "GPIO153", + MTK_EINT_FUNCTION(0, 153), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO153"), + MTK_FUNCTION(1, "MD_UCNT_A_TGL"), + MTK_FUNCTION(6, "TP_URTS1_VCORE"), + MTK_FUNCTION(7, "DBG_MON_A8") + ), + MTK_PIN( + 154, "GPIO154", + MTK_EINT_FUNCTION(0, 154), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO154"), + MTK_FUNCTION(1, "DIGRF_IRQ"), + MTK_FUNCTION(6, "TP_UCTS1_VCORE"), + MTK_FUNCTION(7, "DBG_MON_A9") + ), + MTK_PIN( + 155, "GPIO155", + MTK_EINT_FUNCTION(0, 155), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO155"), + MTK_FUNCTION(1, "MIPI_M_SCLK"), + MTK_FUNCTION(4, "UCTS2"), + MTK_FUNCTION(6, "TP_UTXD_CONSYS_VCORE"), + MTK_FUNCTION(7, "DBG_MON_A6") + ), + MTK_PIN( + 156, "GPIO156", + MTK_EINT_FUNCTION(0, 156), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO156"), + MTK_FUNCTION(1, "MIPI_M_SDATA"), + MTK_FUNCTION(4, "URTS2"), + MTK_FUNCTION(6, "TP_URXD_CONSYS_VCORE"), + MTK_FUNCTION(7, "DBG_MON_A7") + ), + MTK_PIN( + 157, "GPIO157", + MTK_EINT_FUNCTION(0, 157), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO157"), + MTK_FUNCTION(1, "BPI_BUS8"), + MTK_FUNCTION(4, "UTXD2"), + MTK_FUNCTION(5, "CLKM0_A"), + MTK_FUNCTION(6, "SSPM_URXD_AO_VCORE"), + MTK_FUNCTION(7, "DBG_MON_A16") + ), + MTK_PIN( + 158, "GPIO158", + MTK_EINT_FUNCTION(0, 158), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO158"), + MTK_FUNCTION(1, "BPI_BUS9"), + MTK_FUNCTION(4, "URXD2"), + MTK_FUNCTION(5, "CLKM1_A"), + MTK_FUNCTION(6, "TP_UTXD1_VCORE") + ), + MTK_PIN( + 159, "GPIO159", + MTK_EINT_FUNCTION(0, 159), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO159"), + MTK_FUNCTION(1, "BPI_BUS10"), + MTK_FUNCTION(2, "MD_INT0"), + MTK_FUNCTION(3, "SRCLKENAI1"), + MTK_FUNCTION(5, "CLKM2_A"), + MTK_FUNCTION(6, "TP_URXD1_VCORE") + ), + MTK_PIN( + 160, "GPIO160", + MTK_EINT_FUNCTION(0, 160), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO160"), + MTK_FUNCTION(1, "UTXD0"), + MTK_FUNCTION(2, "MD_UTXD1"), + MTK_FUNCTION(5, "MBISTREADEN_TRIGGER"), + MTK_FUNCTION(6, "CONN_BG_GPS_MCU_DBG_UART_TX") + ), + MTK_PIN( + 161, "GPIO161", + MTK_EINT_FUNCTION(0, 161), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO161"), + MTK_FUNCTION(1, "URXD0"), + MTK_FUNCTION(2, "MD_URXD1"), + MTK_FUNCTION(5, "MBISTWRITEEN_TRIGGER") + ), + MTK_PIN( + 162, "GPIO162", + MTK_EINT_FUNCTION(0, 162), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO162"), + MTK_FUNCTION(1, "UTXD1"), + MTK_FUNCTION(2, "MD_UTXD0"), + MTK_FUNCTION(3, "TP_UTXD1_VLP"), + MTK_FUNCTION(4, "ADSP_UTXD0"), + MTK_FUNCTION(5, "SSPM_UTXD_AO_VLP"), + MTK_FUNCTION(6, "HFRP_UTXD1") + ), + MTK_PIN( + 163, "GPIO163", + MTK_EINT_FUNCTION(0, 163), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO163"), + MTK_FUNCTION(1, "URXD1"), + MTK_FUNCTION(2, "MD_URXD0"), + MTK_FUNCTION(3, "TP_URXD1_VLP"), + MTK_FUNCTION(4, "ADSP_URXD0"), + MTK_FUNCTION(5, "SSPM_URXD_AO_VLP"), + MTK_FUNCTION(6, "HFRP_URXD1") + ), + MTK_PIN( + 164, "GPIO164", + MTK_EINT_FUNCTION(0, 164), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO164"), + MTK_FUNCTION(1, "SCP_SCL0"), + MTK_FUNCTION(6, "TP_GPIO0_AO"), + MTK_FUNCTION(7, "DBG_MON_A22") + ), + MTK_PIN( + 165, "GPIO165", + MTK_EINT_FUNCTION(0, 165), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO165"), + MTK_FUNCTION(1, "SCP_SDA0"), + MTK_FUNCTION(6, "TP_GPIO1_AO"), + MTK_FUNCTION(7, "DBG_MON_A23") + ), + MTK_PIN( + 166, "GPIO166", + MTK_EINT_FUNCTION(0, 166), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO166"), + MTK_FUNCTION(1, "SCP_SCL2"), + MTK_FUNCTION(6, "TP_GPIO2_AO"), + MTK_FUNCTION(7, "DBG_MON_A24") + ), + MTK_PIN( + 167, "GPIO167", + MTK_EINT_FUNCTION(0, 167), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO167"), + MTK_FUNCTION(1, "SCP_SDA2"), + MTK_FUNCTION(6, "TP_GPIO3_AO"), + MTK_FUNCTION(7, "DBG_MON_A25") + ), + MTK_PIN( + 168, "GPIO168", + MTK_EINT_FUNCTION(0, 168), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO168"), + MTK_FUNCTION(1, "SCP_SPI2_CK"), + MTK_FUNCTION(2, "SPI2_B_CLK"), + MTK_FUNCTION(3, "PWM_VLP"), + MTK_FUNCTION(4, "SCP_SCL2"), + MTK_FUNCTION(7, "DBG_MON_A26") + ), + MTK_PIN( + 169, "GPIO169", + MTK_EINT_FUNCTION(0, 169), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO169"), + MTK_FUNCTION(1, "SCP_SPI2_CS"), + MTK_FUNCTION(2, "SPI2_B_CSB"), + MTK_FUNCTION(7, "DBG_MON_A27") + ), + MTK_PIN( + 170, "GPIO170", + MTK_EINT_FUNCTION(0, 170), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO170"), + MTK_FUNCTION(1, "SCP_SPI2_MO"), + MTK_FUNCTION(2, "SPI2_B_MO"), + MTK_FUNCTION(4, "SCP_SDA2"), + MTK_FUNCTION(7, "DBG_MON_A28") + ), + MTK_PIN( + 171, "GPIO171", + MTK_EINT_FUNCTION(0, 171), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO171"), + MTK_FUNCTION(1, "SCP_SPI2_MI"), + MTK_FUNCTION(2, "SPI2_B_MI"), + MTK_FUNCTION(7, "DBG_MON_A29") + ), + MTK_PIN( + 172, "GPIO172", + MTK_EINT_FUNCTION(0, 172), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO172"), + MTK_FUNCTION(1, "CONN_TCXOENA_REQ") + ), + MTK_PIN( + 173, "GPIO173", + MTK_EINT_FUNCTION(0, 173), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO173"), + MTK_FUNCTION(1, "CMFLASH3"), + MTK_FUNCTION(2, "PWM_3"), + MTK_FUNCTION(3, "MD_GPS_L5_BLANK"), + MTK_FUNCTION(4, "CLKM1_A"), + MTK_FUNCTION(7, "DBG_MON_A31") + ), + MTK_PIN( + 174, "GPIO174", + MTK_EINT_FUNCTION(0, 174), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO174"), + MTK_FUNCTION(1, "CMFLASH0"), + MTK_FUNCTION(2, "PWM_0"), + MTK_FUNCTION(3, "VBUSVALID_1P"), + MTK_FUNCTION(4, "MD32_2_RXD"), + MTK_FUNCTION(5, "DISP_PWM3") + ), + MTK_PIN( + 175, "GPIO175", + MTK_EINT_FUNCTION(0, 175), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO175"), + MTK_FUNCTION(1, "CMFLASH1"), + MTK_FUNCTION(2, "PWM_1"), + MTK_FUNCTION(3, "EDP_TX_HPD"), + MTK_FUNCTION(4, "MD32_2_TXD"), + MTK_FUNCTION(5, "DISP_PWM4") + ), + MTK_PIN( + 176, "GPIO176", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO176"), + MTK_FUNCTION(1, "SCL5"), + MTK_FUNCTION(2, "LCM3_RST"), + MTK_FUNCTION(4, "MD_URXD1_CONN"), + MTK_FUNCTION(6, "TP_UTXD_GNSS_VCORE") + ), + MTK_PIN( + 177, "GPIO177", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO177"), + MTK_FUNCTION(1, "SDA5"), + MTK_FUNCTION(2, "DSI3_TE"), + MTK_FUNCTION(4, "MD_UTXD1_CONN"), + MTK_FUNCTION(6, "TP_URXD_GNSS_VCORE") + ), + MTK_PIN( + 178, "GPIO178", + MTK_EINT_FUNCTION(0, 178), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO178"), + MTK_FUNCTION(1, "DMIC_CLK"), + MTK_FUNCTION(2, "SCP_DMIC_CLK"), + MTK_FUNCTION(3, "SRCLKENAI0"), + MTK_FUNCTION(4, "CLKM2_B"), + MTK_FUNCTION(5, "TP_GPIO7_AO"), + MTK_FUNCTION(6, "SPU1_UTX"), + MTK_FUNCTION(7, "DAP_SONIC_SWCK") + ), + MTK_PIN( + 179, "GPIO179", + MTK_EINT_FUNCTION(0, 179), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO179"), + MTK_FUNCTION(1, "DMIC_DAT"), + MTK_FUNCTION(2, "SCP_DMIC_DAT"), + MTK_FUNCTION(3, "SRCLKENAI1"), + MTK_FUNCTION(4, "CLKM3_B"), + MTK_FUNCTION(5, "TP_GPIO8_AO"), + MTK_FUNCTION(6, "SPU1_URX"), + MTK_FUNCTION(7, "DAP_SONIC_SWD") + ), + MTK_PIN( + 180, "GPIO180", + MTK_EINT_FUNCTION(0, 180), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO180"), + MTK_FUNCTION(1, "IDDIG_1P"), + MTK_FUNCTION(2, "CMVREF0"), + MTK_FUNCTION(3, "GPS_PPS1"), + MTK_FUNCTION(4, "GPS_L5_ELNA_EN"), + MTK_FUNCTION(5, "DISP_PWM1") + ), + MTK_PIN( + 181, "GPIO181", + MTK_EINT_FUNCTION(0, 181), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO181"), + MTK_FUNCTION(1, "USB_DRVVBUS_1P"), + MTK_FUNCTION(2, "CMVREF1"), + MTK_FUNCTION(3, "MFG_EB_JTAG_TRSTN"), + MTK_FUNCTION(4, "ADSP_JTAG1_TRSTN"), + MTK_FUNCTION(5, "HFRP_JTAG1_TRSTN"), + MTK_FUNCTION(6, "SPU1_NTRST"), + MTK_FUNCTION(7, "CONN_BG_GPS_MCU_TRST_B") + ), + MTK_PIN( + 182, "GPIO182", + MTK_EINT_FUNCTION(0, 182), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO182"), + MTK_FUNCTION(1, "SCL11"), + MTK_FUNCTION(2, "CMVREF2"), + MTK_FUNCTION(3, "MFG_EB_JTAG_TCK"), + MTK_FUNCTION(4, "ADSP_JTAG1_TCK"), + MTK_FUNCTION(5, "HFRP_JTAG1_TCK"), + MTK_FUNCTION(6, "SPU1_TCK"), + MTK_FUNCTION(7, "CONN_BG_GPS_MCU_TCK") + ), + MTK_PIN( + 183, "GPIO183", + MTK_EINT_FUNCTION(0, 183), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO183"), + MTK_FUNCTION(1, "SDA11"), + MTK_FUNCTION(2, "CMVREF3"), + MTK_FUNCTION(3, "MFG_EB_JTAG_TMS"), + MTK_FUNCTION(4, "ADSP_JTAG1_TMS"), + MTK_FUNCTION(5, "HFRP_JTAG1_TMS"), + MTK_FUNCTION(6, "SPU1_TMS"), + MTK_FUNCTION(7, "CONN_BG_GPS_MCU_TMS") + ), + MTK_PIN( + 184, "GPIO184", + MTK_EINT_FUNCTION(0, 184), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO184"), + MTK_FUNCTION(1, "SCL12"), + MTK_FUNCTION(2, "CMVREF4"), + MTK_FUNCTION(3, "MFG_EB_JTAG_TDO"), + MTK_FUNCTION(4, "ADSP_JTAG1_TDO"), + MTK_FUNCTION(5, "HFRP_JTAG1_TDO"), + MTK_FUNCTION(6, "SPU1_TDO"), + MTK_FUNCTION(7, "CONN_BG_GPS_MCU_TDO") + ), + MTK_PIN( + 185, "GPIO185", + MTK_EINT_FUNCTION(0, 185), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO185"), + MTK_FUNCTION(1, "SDA12"), + MTK_FUNCTION(2, "CMVREF5"), + MTK_FUNCTION(3, "MFG_EB_JTAG_TDI"), + MTK_FUNCTION(4, "ADSP_JTAG1_TDI"), + MTK_FUNCTION(5, "HFRP_JTAG1_TDI"), + MTK_FUNCTION(6, "SPU1_TDI"), + MTK_FUNCTION(7, "CONN_BG_GPS_MCU_TDI") + ), + MTK_PIN( + 186, "GPIO186", + MTK_EINT_FUNCTION(0, 186), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO186"), + MTK_FUNCTION(1, "MD_GPS_L1_BLANK"), + MTK_FUNCTION(2, "PMSR_SMAP"), + MTK_FUNCTION(3, "TP_GPIO2_AO") + ), + MTK_PIN( + 187, "GPIO187", + MTK_EINT_FUNCTION(0, 187), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO187"), + MTK_FUNCTION(1, "MD_GPS_L5_BLANK"), + MTK_FUNCTION(3, "TP_GPIO4_AO") + ), + MTK_PIN( + 188, "GPIO188", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO188"), + MTK_FUNCTION(1, "SCL2"), + MTK_FUNCTION(2, "SCP_SCL8") + ), + MTK_PIN( + 189, "GPIO189", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO189"), + MTK_FUNCTION(1, "SDA2"), + MTK_FUNCTION(2, "SCP_SDA8") + ), + MTK_PIN( + 190, "GPIO190", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO190"), + MTK_FUNCTION(1, "SCL4"), + MTK_FUNCTION(2, "SCP_SCL9"), + MTK_FUNCTION(6, "UDI_TDI_6") + ), + MTK_PIN( + 191, "GPIO191", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO191"), + MTK_FUNCTION(1, "SDA4"), + MTK_FUNCTION(2, "SCP_SDA9"), + MTK_FUNCTION(6, "UDI_TDI_7") + ), + MTK_PIN( + 192, "GPIO192", + MTK_EINT_FUNCTION(0, 192), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO192"), + MTK_FUNCTION(1, "CMMCLK2"), + MTK_FUNCTION(4, "MD32_3_RXD") + ), + MTK_PIN( + 193, "GPIO193", + MTK_EINT_FUNCTION(0, 193), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO193"), + MTK_FUNCTION(3, "CLKM0_B"), + MTK_FUNCTION(4, "MD32_3_TXD"), + MTK_FUNCTION(6, "UDI_TDO_7") + ), + MTK_PIN( + 194, "GPIO194", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO194"), + MTK_FUNCTION(1, "SCL7"), + MTK_FUNCTION(2, "MD32_3_GPIO0"), + MTK_FUNCTION(3, "CLKM2_B"), + MTK_FUNCTION(6, "UDI_TDI_2") + ), + MTK_PIN( + 195, "GPIO195", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO195"), + MTK_FUNCTION(1, "SDA7"), + MTK_FUNCTION(3, "CLKM3_B"), + MTK_FUNCTION(6, "UDI_TDI_3") + ), + MTK_PIN( + 196, "GPIO196", + MTK_EINT_FUNCTION(0, 196), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO196"), + MTK_FUNCTION(1, "CMMCLK3") + ), + MTK_PIN( + 197, "GPIO197", + MTK_EINT_FUNCTION(0, 197), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO197"), + MTK_FUNCTION(3, "CLKM1_B"), + MTK_FUNCTION(6, "UDI_TDI_1") + ), + MTK_PIN( + 198, "GPIO198", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO198"), + MTK_FUNCTION(1, "SCL8"), + MTK_FUNCTION(6, "UDI_TDI_4") + ), + MTK_PIN( + 199, "GPIO199", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO199"), + MTK_FUNCTION(1, "SDA8"), + MTK_FUNCTION(6, "UDI_TDI_5") + ), + MTK_PIN( + 200, "GPIO200", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO200"), + MTK_FUNCTION(1, "SCL1") + ), + MTK_PIN( + 201, "GPIO201", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO201"), + MTK_FUNCTION(1, "SDA1"), + MTK_FUNCTION(7, "TSFDC_BG_COMP") + ), + MTK_PIN( + 202, "GPIO202", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO202"), + MTK_FUNCTION(1, "SCL9"), + MTK_FUNCTION(2, "SCP_SCL7"), + MTK_FUNCTION(6, "TP_GPIO15_AO") + ), + MTK_PIN( + 203, "GPIO203", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO203"), + MTK_FUNCTION(1, "SDA9"), + MTK_FUNCTION(2, "SCP_SDA7"), + MTK_FUNCTION(6, "TP_GPIO9_AO") + ), + MTK_PIN( + 204, "GPIO204", + MTK_EINT_FUNCTION(0, 204), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO204"), + MTK_FUNCTION(1, "SCL13"), + MTK_FUNCTION(2, "CMVREF6"), + MTK_FUNCTION(3, "GPS_L1_ELNA_EN"), + MTK_FUNCTION(5, "CLKM2_B"), + MTK_FUNCTION(6, "TP_GPIO12_AO") + ), + MTK_PIN( + 205, "GPIO205", + MTK_EINT_FUNCTION(0, 205), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO205"), + MTK_FUNCTION(1, "SDA13"), + MTK_FUNCTION(2, "CMVREF7"), + MTK_FUNCTION(3, "GPS_L5_ELNA_EN"), + MTK_FUNCTION(5, "CLKM3_B"), + MTK_FUNCTION(6, "TP_GPIO13_AO") + ), + MTK_PIN( + 206, "GPIO206", + MTK_EINT_FUNCTION(0, 206), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO206"), + MTK_FUNCTION(2, "MD32_2_GPIO0"), + MTK_FUNCTION(5, "VBUSVALID"), + MTK_FUNCTION(6, "UDI_TDO_3") + ), + MTK_PIN( + 207, "GPIO207", + MTK_EINT_FUNCTION(0, 207), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO207"), + MTK_FUNCTION(1, "PCIE_WAKEN_2P"), + MTK_FUNCTION(2, "PMSR_SMAP_MAX"), + MTK_FUNCTION(4, "FMI2S_A_BCK"), + MTK_FUNCTION(6, "UDI_TDO_4") + ), + MTK_PIN( + 208, "GPIO208", + MTK_EINT_FUNCTION(0, 208), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO208"), + MTK_FUNCTION(1, "PCIE_CLKREQN_2P"), + MTK_FUNCTION(2, "PMSR_SMAP_MAX_W"), + MTK_FUNCTION(4, "FMI2S_A_LRCK"), + MTK_FUNCTION(5, "CLKM0_B"), + MTK_FUNCTION(6, "UDI_TDO_5") + ), + MTK_PIN( + 209, "GPIO209", + MTK_EINT_FUNCTION(0, 209), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO209"), + MTK_FUNCTION(1, "PCIE_PERSTN_2P"), + MTK_FUNCTION(2, "PMSR_SMAP"), + MTK_FUNCTION(4, "FMI2S_A_DI"), + MTK_FUNCTION(5, "CLKM1_B"), + MTK_FUNCTION(6, "UDI_TDO_6") + ), + MTK_PIN( + 210, "GPIO210", + MTK_EINT_FUNCTION(0, 210), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO210"), + MTK_FUNCTION(1, "CMMCLK4") + ), + MTK_PIN( + 211, "GPIO211", + MTK_EINT_FUNCTION(0, 211), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO211"), + MTK_FUNCTION(1, "CMMCLK5"), + MTK_FUNCTION(2, "CONN_TCXOENA_REQ") + ), + MTK_PIN( + 212, "GPIO212", + MTK_EINT_FUNCTION(0, 212), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO212"), + MTK_FUNCTION(1, "CMMCLK6"), + MTK_FUNCTION(2, "TP_GPIO10_AO"), + MTK_FUNCTION(5, "IDDIG"), + MTK_FUNCTION(6, "UDI_TDO_1") + ), + MTK_PIN( + 213, "GPIO213", + MTK_EINT_FUNCTION(0, 213), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO213"), + MTK_FUNCTION(1, "CMMCLK7"), + MTK_FUNCTION(2, "TP_GPIO11_AO"), + MTK_FUNCTION(5, "USB_DRVVBUS"), + MTK_FUNCTION(6, "UDI_TDO_2") + ), + MTK_PIN( + 214, "GPIO214", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO214"), + MTK_FUNCTION(1, "SCP_SCL3"), + MTK_FUNCTION(2, "SDA14_E1_SCL14_E2"), + MTK_FUNCTION(6, "GBE1_MDC"), + MTK_FUNCTION(7, "GBE0_MDC") + ), + MTK_PIN( + 215, "GPIO215", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO215"), + MTK_FUNCTION(1, "SCP_SDA3"), + MTK_FUNCTION(2, "SCL14_E1_SDA14_E2"), + MTK_FUNCTION(6, "GBE1_MDIO"), + MTK_FUNCTION(7, "GBE0_MDIO") + ), + MTK_PIN( + 216, "GPIO216", + MTK_EINT_FUNCTION(0, 216), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO216"), + MTK_FUNCTION(1, "GPS_PPS0") + ), + MTK_PIN( + 217, "GPIO217", + MTK_EINT_FUNCTION(0, 217), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO217"), + MTK_FUNCTION(1, "KPROW0"), + MTK_FUNCTION(6, "TP_GPIO12_AO") + ), + MTK_PIN( + 218, "GPIO218", + MTK_EINT_FUNCTION(0, 218), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO218"), + MTK_FUNCTION(1, "KPROW1"), + MTK_FUNCTION(2, "SPI0_WP"), + MTK_FUNCTION(3, "MBISTREADEN_TRIGGER"), + MTK_FUNCTION(5, "GPS_L5_ELNA_EN"), + MTK_FUNCTION(6, "TP_GPIO14_AO") + ), + MTK_PIN( + 219, "GPIO219", + MTK_EINT_FUNCTION(0, 219), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO219"), + MTK_FUNCTION(1, "KPCOL1"), + MTK_FUNCTION(2, "SPI0_HOLD"), + MTK_FUNCTION(3, "MBISTWRITEEN_TRIGGER"), + MTK_FUNCTION(4, "SPMI_M_TRIG_FLAG"), + MTK_FUNCTION(5, "GPS_L1_ELNA_EN"), + MTK_FUNCTION(6, "SPM_JTAG_TRSTN_VLP"), + MTK_FUNCTION(7, "JTRSTN_SEL1") + ), + MTK_PIN( + 220, "GPIO220", + MTK_EINT_FUNCTION(0, 220), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO220"), + MTK_FUNCTION(1, "SPI0_CLK"), + MTK_FUNCTION(6, "SPM_JTAG_TCK_VLP"), + MTK_FUNCTION(7, "JTCK_SEL1") + ), + MTK_PIN( + 221, "GPIO221", + MTK_EINT_FUNCTION(0, 221), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO221"), + MTK_FUNCTION(1, "SPI0_CSB"), + MTK_FUNCTION(6, "SPM_JTAG_TMS_VLP"), + MTK_FUNCTION(7, "JTMS_SEL1") + ), + MTK_PIN( + 222, "GPIO222", + MTK_EINT_FUNCTION(0, 222), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO222"), + MTK_FUNCTION(1, "SPI0_MO"), + MTK_FUNCTION(2, "SCP_SCL7"), + MTK_FUNCTION(6, "SPM_JTAG_TDO_VLP"), + MTK_FUNCTION(7, "JTDO_SEL1") + ), + MTK_PIN( + 223, "GPIO223", + MTK_EINT_FUNCTION(0, 223), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO223"), + MTK_FUNCTION(1, "SPI0_MI"), + MTK_FUNCTION(2, "SCP_SDA7"), + MTK_FUNCTION(6, "SPM_JTAG_TDI_VLP"), + MTK_FUNCTION(7, "JTDI_SEL1") + ), + MTK_PIN( + 224, "GPIO224", + MTK_EINT_FUNCTION(0, 224), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO224"), + MTK_FUNCTION(1, "MSDC2_CLK"), + MTK_FUNCTION(2, "DMIC2_CLK"), + MTK_FUNCTION(3, "GBE0_AUX_PPS0"), + MTK_FUNCTION(4, "GBE0_TXER"), + MTK_FUNCTION(5, "GBE1_TXER"), + MTK_FUNCTION(6, "GBE1_AUX_PPS0"), + MTK_FUNCTION(7, "MD32_1_TXD") + ), + MTK_PIN( + 225, "GPIO225", + MTK_EINT_FUNCTION(0, 225), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO225"), + MTK_FUNCTION(1, "MSDC2_CMD"), + MTK_FUNCTION(2, "DMIC2_DAT"), + MTK_FUNCTION(3, "GBE0_AUX_PPS1"), + MTK_FUNCTION(4, "GBE0_RXER"), + MTK_FUNCTION(5, "GBE1_RXER"), + MTK_FUNCTION(6, "GBE1_AUX_PPS1"), + MTK_FUNCTION(7, "MD32_1_RXD") + ), + MTK_PIN( + 226, "GPIO226", + MTK_EINT_FUNCTION(0, 226), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO226"), + MTK_FUNCTION(1, "MSDC2_DAT0"), + MTK_FUNCTION(2, "I2SIN3_BCK"), + MTK_FUNCTION(3, "GBE0_AUX_PPS2"), + MTK_FUNCTION(4, "GBE0_COL"), + MTK_FUNCTION(5, "GBE1_COL"), + MTK_FUNCTION(6, "GBE1_AUX_PPS2"), + MTK_FUNCTION(7, "GBE1_MDC") + ), + MTK_PIN( + 227, "GPIO227", + MTK_EINT_FUNCTION(0, 227), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO227"), + MTK_FUNCTION(1, "MSDC2_DAT1"), + MTK_FUNCTION(2, "I2SIN3_LRCK"), + MTK_FUNCTION(3, "GBE0_AUX_PPS3"), + MTK_FUNCTION(4, "GBE0_INTR"), + MTK_FUNCTION(5, "GBE1_INTR"), + MTK_FUNCTION(6, "GBE1_AUX_PPS3"), + MTK_FUNCTION(7, "GBE1_MDIO") + ), + MTK_PIN( + 228, "GPIO228", + MTK_EINT_FUNCTION(0, 228), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO228"), + MTK_FUNCTION(1, "MSDC2_DAT2"), + MTK_FUNCTION(2, "I2SIN3_DI"), + MTK_FUNCTION(3, "GBE0_MDC"), + MTK_FUNCTION(4, "GBE1_MDC"), + MTK_FUNCTION(5, "CONN_BG_GPS_MCU_AICE_TCKC") + ), + MTK_PIN( + 229, "GPIO229", + MTK_EINT_FUNCTION(0, 229), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO229"), + MTK_FUNCTION(1, "MSDC2_DAT3"), + MTK_FUNCTION(2, "I2SOUT3_DO"), + MTK_FUNCTION(3, "GBE0_MDIO"), + MTK_FUNCTION(4, "GBE1_MDIO"), + MTK_FUNCTION(5, "CONN_BG_GPS_MCU_AICE_TMSC"), + MTK_FUNCTION(7, "AVB_CLK2") + ), + MTK_PIN( + 230, "GPIO230", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO230"), + MTK_FUNCTION(1, "CONN_TOP_CLK") + ), + MTK_PIN( + 231, "GPIO231", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO231"), + MTK_FUNCTION(1, "CONN_TOP_DATA") + ), + MTK_PIN( + 232, "GPIO232", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO232"), + MTK_FUNCTION(1, "CONN_HRST_B") + ), + MTK_PIN( + 233, "GPIO233", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO233"), + MTK_FUNCTION(1, "I2SIN0_BCK") + ), + MTK_PIN( + 234, "GPIO234", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO234"), + MTK_FUNCTION(1, "I2SIN0_LRCK") + ), + MTK_PIN( + 235, "GPIO235", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO235"), + MTK_FUNCTION(1, "I2SIN0_DI") + ), + MTK_PIN( + 236, "GPIO236", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO236"), + MTK_FUNCTION(1, "I2SOUT0_DO") + ), + MTK_PIN( + 237, "GPIO237", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO237"), + MTK_FUNCTION(1, "CONN_UARTHUB_UART_TX"), + MTK_FUNCTION(3, "UTXD3") + ), + MTK_PIN( + 238, "GPIO238", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO238"), + MTK_FUNCTION(1, "CONN_UARTHUB_UART_RX"), + MTK_FUNCTION(3, "URXD3") + ), + MTK_PIN( + 239, "GPIO239", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO239"), + MTK_FUNCTION(1, "TP_UTXD_CONSYS_VLP"), + MTK_FUNCTION(2, "TP_URXD_CONSYS_VLP") + ), + MTK_PIN( + 240, "GPIO240", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO240"), + MTK_FUNCTION(1, "TP_URXD_CONSYS_VLP"), + MTK_FUNCTION(2, "TP_UTXD_CONSYS_VLP") + ), + MTK_PIN( + 241, "GPIO241", + MTK_EINT_FUNCTION(0, 241), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO241"), + MTK_FUNCTION(1, "PCIE_PERSTN") + ), + MTK_PIN( + 242, "GPIO242", + MTK_EINT_FUNCTION(0, 242), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO242"), + MTK_FUNCTION(1, "PCIE_WAKEN") + ), + MTK_PIN( + 243, "GPIO243", + MTK_EINT_FUNCTION(0, 243), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO243"), + MTK_FUNCTION(1, "PCIE_CLKREQN") + ), + MTK_PIN( + 244, "GPIO244", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO244"), + MTK_FUNCTION(1, "CONN_RST") + ), + MTK_PIN( + 245, "GPIO245", + MTK_EINT_FUNCTION(0, 245), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO245") + ), + MTK_PIN( + 246, "GPIO246", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO246"), + MTK_FUNCTION(1, "CONN_PTA_TXD0") + ), + MTK_PIN( + 247, "GPIO247", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO247"), + MTK_FUNCTION(1, "CONN_PTA_RXD0") + ), + MTK_PIN( + 248, "GPIO248", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO248"), + MTK_FUNCTION(3, "UCTS3") + ), + MTK_PIN( + 249, "GPIO249", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO249"), + MTK_FUNCTION(3, "URTS3") + ), + MTK_PIN( + 250, "GPIO250", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO250") + ), + MTK_PIN( + 251, "GPIO251", + MTK_EINT_FUNCTION(0, 251), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO251"), + MTK_FUNCTION(1, "IDDIG_1P") + ), + MTK_PIN( + 252, "GPIO252", + MTK_EINT_FUNCTION(0, 252), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO252"), + MTK_FUNCTION(1, "USB_DRVVBUS_1P") + ), + MTK_PIN( + 253, "GPIO253", + MTK_EINT_FUNCTION(0, 253), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO253"), + MTK_FUNCTION(1, "VBUSVALID_1P") + ), + MTK_PIN( + 254, "GPIO254", + MTK_EINT_FUNCTION(0, 254), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO254"), + MTK_FUNCTION(1, "IDDIG_2P") + ), + MTK_PIN( + 255, "GPIO255", + MTK_EINT_FUNCTION(0, 255), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO255"), + MTK_FUNCTION(1, "USB_DRVVBUS_2P") + ), + MTK_PIN( + 256, "GPIO256", + MTK_EINT_FUNCTION(0, 256), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO256"), + MTK_FUNCTION(1, "VBUSVALID_2P") + ), + MTK_PIN( + 257, "GPIO257", + MTK_EINT_FUNCTION(0, 257), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO257"), + MTK_FUNCTION(1, "VBUSVALID_3P") + ), + MTK_PIN( + 258, "GPIO258", + MTK_EINT_FUNCTION(0, 258), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO258"), + MTK_FUNCTION(7, "AVB_CLK1") + ), + MTK_PIN( + 259, "GPIO259", + MTK_EINT_FUNCTION(0, 259), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO259"), + MTK_FUNCTION(1, "GBE0_TXD0"), + MTK_FUNCTION(2, "GBE1_TXD0") + ), + MTK_PIN( + 260, "GPIO260", + MTK_EINT_FUNCTION(0, 260), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO260"), + MTK_FUNCTION(1, "GBE0_TXD1"), + MTK_FUNCTION(2, "GBE1_TXD1") + ), + MTK_PIN( + 261, "GPIO261", + MTK_EINT_FUNCTION(0, 261), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO261"), + MTK_FUNCTION(1, "GBE0_TXC"), + MTK_FUNCTION(2, "GBE1_TXC") + ), + MTK_PIN( + 262, "GPIO262", + MTK_EINT_FUNCTION(0, 262), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO262"), + MTK_FUNCTION(1, "GBE0_TXEN"), + MTK_FUNCTION(2, "GBE1_TXEN") + ), + MTK_PIN( + 263, "GPIO263", + MTK_EINT_FUNCTION(0, 263), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO263"), + MTK_FUNCTION(1, "GBE0_RXD0"), + MTK_FUNCTION(2, "GBE1_RXD0"), + MTK_FUNCTION(3, "GBE0_AUX_PPS0") + ), + MTK_PIN( + 264, "GPIO264", + MTK_EINT_FUNCTION(0, 264), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO264"), + MTK_FUNCTION(1, "GBE0_RXD1"), + MTK_FUNCTION(2, "GBE1_RXD1"), + MTK_FUNCTION(3, "GBE0_AUX_PPS1") + ), + MTK_PIN( + 265, "GPIO265", + MTK_EINT_FUNCTION(0, 265), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO265"), + MTK_FUNCTION(1, "GBE0_RXC"), + MTK_FUNCTION(2, "GBE1_RXC"), + MTK_FUNCTION(3, "GBE0_AUX_PPS2") + ), + MTK_PIN( + 266, "GPIO266", + MTK_EINT_FUNCTION(0, 266), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO266"), + MTK_FUNCTION(1, "GBE0_RXDV"), + MTK_FUNCTION(2, "GBE1_RXDV"), + MTK_FUNCTION(3, "GBE0_AUX_PPS3") + ), + MTK_PIN( + 267, "GPIO267", + MTK_EINT_FUNCTION(0, 267), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO267"), + MTK_FUNCTION(1, "GBE0_TXD2"), + MTK_FUNCTION(2, "GBE1_TXD2"), + MTK_FUNCTION(3, "GBE0_RXER"), + MTK_FUNCTION(4, "GBE1_RXER") + ), + MTK_PIN( + 268, "GPIO268", + MTK_EINT_FUNCTION(0, 268), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO268"), + MTK_FUNCTION(1, "GBE0_TXD3"), + MTK_FUNCTION(2, "GBE1_TXD3") + ), + MTK_PIN( + 269, "GPIO269", + MTK_EINT_FUNCTION(0, 269), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO269"), + MTK_FUNCTION(1, "GBE0_RXD2"), + MTK_FUNCTION(2, "GBE1_RXD2"), + MTK_FUNCTION(3, "GBE0_MDC") + ), + MTK_PIN( + 270, "GPIO270", + MTK_EINT_FUNCTION(0, 270), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO270"), + MTK_FUNCTION(1, "GBE0_RXD3"), + MTK_FUNCTION(2, "GBE1_RXD3"), + MTK_FUNCTION(3, "GBE0_MDIO") + ), + MTK_PIN( + 271, "veint271", + MTK_EINT_FUNCTION(0, 271), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 272, "veint272", + MTK_EINT_FUNCTION(0, 272), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 273, "veint273", + MTK_EINT_FUNCTION(0, 273), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 274, "veint274", + MTK_EINT_FUNCTION(0, 274), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 275, "veint275", + MTK_EINT_FUNCTION(0, 275), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 276, "veint276", + MTK_EINT_FUNCTION(0, 276), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 277, "veint277", + MTK_EINT_FUNCTION(0, 277), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 278, "veint278", + MTK_EINT_FUNCTION(0, 278), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 279, "veint279", + MTK_EINT_FUNCTION(0, 279), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 280, "veint280", + MTK_EINT_FUNCTION(0, 280), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 281, "veint281", + MTK_EINT_FUNCTION(0, 281), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 282, "veint282", + MTK_EINT_FUNCTION(0, 282), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 283, "veint283", + MTK_EINT_FUNCTION(0, 283), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 284, "veint284", + MTK_EINT_FUNCTION(0, 284), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 285, "veint285", + MTK_EINT_FUNCTION(0, 285), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 286, "veint286", + MTK_EINT_FUNCTION(0, 286), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 287, "veint287", + MTK_EINT_FUNCTION(0, 287), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 288, "veint288", + MTK_EINT_FUNCTION(0, 288), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 289, "veint289", + MTK_EINT_FUNCTION(0, 289), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 290, "veint290", + MTK_EINT_FUNCTION(0, 290), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 291, "veint291", + MTK_EINT_FUNCTION(0, 291), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 292, "veint292", + MTK_EINT_FUNCTION(0, 292), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ) +}; + +static struct mtk_eint_pin eint_pins_mt8196[] = { + MTK_EINT_PIN(0, 2, 0, 1), + MTK_EINT_PIN(1, 2, 1, 1), + MTK_EINT_PIN(2, 2, 16, 0), + MTK_EINT_PIN(3, 2, 17, 0), + MTK_EINT_PIN(4, 2, 2, 1), + MTK_EINT_PIN(5, 2, 3, 1), + MTK_EINT_PIN(6, 2, 4, 1), + MTK_EINT_PIN(7, 2, 5, 1), + MTK_EINT_PIN(8, 2, 6, 1), + MTK_EINT_PIN(9, 2, 18, 0), + MTK_EINT_PIN(10, 2, 7, 1), + MTK_EINT_PIN(11, 2, 8, 1), + MTK_EINT_PIN(12, 2, 9, 1), + MTK_EINT_PIN(13, 1, 4, 0), + MTK_EINT_PIN(14, 0, 0, 1), + MTK_EINT_PIN(15, 1, 5, 0), + MTK_EINT_PIN(16, 1, 6, 0), + MTK_EINT_PIN(17, 1, 7, 0), + MTK_EINT_PIN(18, 1, 8, 0), + MTK_EINT_PIN(19, 1, 9, 0), + MTK_EINT_PIN(20, 0, 1, 1), + MTK_EINT_PIN(21, 0, 10, 0), + MTK_EINT_PIN(22, 0, 11, 0), + MTK_EINT_PIN(23, 0, 12, 0), + MTK_EINT_PIN(24, 0, 13, 0), + MTK_EINT_PIN(25, 0, 14, 0), + MTK_EINT_PIN(26, 0, 15, 0), + MTK_EINT_PIN(27, 0, 2, 1), + MTK_EINT_PIN(28, 0, 16, 0), + MTK_EINT_PIN(29, 0, 17, 0), + MTK_EINT_PIN(30, 0, 18, 0), + MTK_EINT_PIN(31, 0, 3, 1), + MTK_EINT_PIN(32, 0, 19, 0), + MTK_EINT_PIN(33, 0, 20, 0), + MTK_EINT_PIN(34, 0, 21, 0), + MTK_EINT_PIN(35, 0, 22, 0), + MTK_EINT_PIN(36, 0, 23, 0), + MTK_EINT_PIN(37, 0, 24, 0), + MTK_EINT_PIN(38, 0, 25, 0), + MTK_EINT_PIN(39, 2, 10, 1), + MTK_EINT_PIN(40, 2, 11, 1), + MTK_EINT_PIN(41, 2, 12, 1), + MTK_EINT_PIN(42, 2, 13, 1), + MTK_EINT_PIN(43, 2, 14, 1), + MTK_EINT_PIN(44, 2, 19, 0), + MTK_EINT_PIN(45, 2, 20, 0), + MTK_EINT_PIN(46, 2, 21, 0), + MTK_EINT_PIN(47, 2, 22, 0), + MTK_EINT_PIN(48, 2, 23, 0), + MTK_EINT_PIN(49, 2, 24, 0), + MTK_EINT_PIN(50, 2, 25, 0), + MTK_EINT_PIN(51, 2, 26, 0), + MTK_EINT_PIN(52, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(53, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(54, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(55, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(56, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(57, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(58, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(59, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(60, 2, 27, 0), + MTK_EINT_PIN(61, 2, 28, 0), + MTK_EINT_PIN(62, 2, 29, 0), + MTK_EINT_PIN(63, 2, 30, 0), + MTK_EINT_PIN(64, 2, 31, 0), + MTK_EINT_PIN(65, 2, 32, 0), + MTK_EINT_PIN(66, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(67, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(68, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(69, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(70, 2, 33, 0), + MTK_EINT_PIN(71, 2, 34, 0), + MTK_EINT_PIN(72, 2, 35, 0), + MTK_EINT_PIN(73, 2, 36, 0), + MTK_EINT_PIN(74, 2, 37, 0), + MTK_EINT_PIN(75, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(76, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(77, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(78, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(79, 2, 38, 0), + MTK_EINT_PIN(80, 2, 39, 0), + MTK_EINT_PIN(81, 2, 40, 0), + MTK_EINT_PIN(82, 2, 41, 0), + MTK_EINT_PIN(83, 2, 42, 0), + MTK_EINT_PIN(84, 2, 43, 0), + MTK_EINT_PIN(85, 2, 44, 0), + MTK_EINT_PIN(86, 2, 45, 0), + MTK_EINT_PIN(87, 2, 46, 0), + MTK_EINT_PIN(88, 2, 47, 0), + MTK_EINT_PIN(89, 2, 48, 0), + MTK_EINT_PIN(90, 2, 49, 0), + MTK_EINT_PIN(91, 2, 50, 0), + MTK_EINT_PIN(92, 2, 15, 1), + MTK_EINT_PIN(93, 2, 51, 0), + MTK_EINT_PIN(94, 2, 52, 0), + MTK_EINT_PIN(95, 2, 53, 0), + MTK_EINT_PIN(96, 2, 54, 0), + MTK_EINT_PIN(97, 2, 55, 0), + MTK_EINT_PIN(98, 2, 56, 0), + MTK_EINT_PIN(99, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(100, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(101, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(102, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(103, 2, 57, 0), + MTK_EINT_PIN(104, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(105, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(106, 1, 10, 0), + MTK_EINT_PIN(107, 1, 11, 0), + MTK_EINT_PIN(108, 1, 12, 0), + MTK_EINT_PIN(109, 1, 13, 0), + MTK_EINT_PIN(110, 1, 0, 1), + MTK_EINT_PIN(111, 1, 1, 1), + MTK_EINT_PIN(112, 1, 2, 1), + MTK_EINT_PIN(113, 1, 3, 1), + MTK_EINT_PIN(114, 1, 14, 0), + MTK_EINT_PIN(115, 1, 15, 0), + MTK_EINT_PIN(116, 1, 16, 0), + MTK_EINT_PIN(117, 1, 17, 0), + MTK_EINT_PIN(118, 1, 18, 0), + MTK_EINT_PIN(119, 1, 19, 0), + MTK_EINT_PIN(120, 1, 20, 0), + MTK_EINT_PIN(121, 1, 21, 0), + MTK_EINT_PIN(122, 1, 22, 0), + MTK_EINT_PIN(123, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(124, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(125, 1, 23, 0), + MTK_EINT_PIN(126, 1, 24, 0), + MTK_EINT_PIN(127, 1, 25, 0), + MTK_EINT_PIN(128, 1, 26, 0), + MTK_EINT_PIN(129, 1, 27, 0), + MTK_EINT_PIN(130, 1, 28, 0), + MTK_EINT_PIN(131, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(132, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(133, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(134, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(135, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(136, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(137, 0, 26, 0), + MTK_EINT_PIN(138, 0, 27, 0), + MTK_EINT_PIN(139, 0, 28, 0), + MTK_EINT_PIN(140, 0, 29, 0), + MTK_EINT_PIN(141, 0, 30, 0), + MTK_EINT_PIN(142, 0, 31, 0), + MTK_EINT_PIN(143, 0, 32, 0), + MTK_EINT_PIN(144, 0, 33, 0), + MTK_EINT_PIN(145, 0, 34, 0), + MTK_EINT_PIN(146, 0, 35, 0), + MTK_EINT_PIN(147, 0, 36, 0), + MTK_EINT_PIN(148, 0, 4, 1), + MTK_EINT_PIN(149, 0, 37, 0), + MTK_EINT_PIN(150, 0, 5, 1), + MTK_EINT_PIN(151, 0, 38, 0), + MTK_EINT_PIN(152, 0, 39, 0), + MTK_EINT_PIN(153, 0, 40, 0), + MTK_EINT_PIN(154, 0, 41, 0), + MTK_EINT_PIN(155, 0, 42, 0), + MTK_EINT_PIN(156, 0, 43, 0), + MTK_EINT_PIN(157, 0, 44, 0), + MTK_EINT_PIN(158, 0, 45, 0), + MTK_EINT_PIN(159, 0, 46, 0), + MTK_EINT_PIN(160, 0, 47, 0), + MTK_EINT_PIN(161, 0, 48, 0), + MTK_EINT_PIN(162, 0, 49, 0), + MTK_EINT_PIN(163, 0, 50, 0), + MTK_EINT_PIN(164, 0, 51, 0), + MTK_EINT_PIN(165, 0, 52, 0), + MTK_EINT_PIN(166, 0, 53, 0), + MTK_EINT_PIN(167, 0, 54, 0), + MTK_EINT_PIN(168, 0, 55, 0), + MTK_EINT_PIN(169, 0, 56, 0), + MTK_EINT_PIN(170, 0, 57, 0), + MTK_EINT_PIN(171, 0, 58, 0), + MTK_EINT_PIN(172, 0, 6, 1), + MTK_EINT_PIN(173, 0, 7, 1), + MTK_EINT_PIN(174, 0, 8, 1), + MTK_EINT_PIN(175, 0, 9, 1), + MTK_EINT_PIN(176, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(177, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(178, 0, 59, 0), + MTK_EINT_PIN(179, 0, 60, 0), + MTK_EINT_PIN(180, 0, 61, 0), + MTK_EINT_PIN(181, 0, 62, 0), + MTK_EINT_PIN(182, 0, 63, 0), + MTK_EINT_PIN(183, 0, 64, 0), + MTK_EINT_PIN(184, 0, 65, 0), + MTK_EINT_PIN(185, 0, 66, 0), + MTK_EINT_PIN(186, 3, 6, 0), + MTK_EINT_PIN(187, 3, 7, 0), + MTK_EINT_PIN(188, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(189, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(190, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(191, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(192, 3, 8, 0), + MTK_EINT_PIN(193, 3, 9, 0), + MTK_EINT_PIN(194, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(195, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(196, 3, 10, 0), + MTK_EINT_PIN(197, 3, 11, 0), + MTK_EINT_PIN(198, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(199, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(200, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(201, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(202, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(203, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(204, 3, 12, 0), + MTK_EINT_PIN(205, 3, 13, 0), + MTK_EINT_PIN(206, 3, 14, 0), + MTK_EINT_PIN(207, 3, 0, 1), + MTK_EINT_PIN(208, 3, 1, 1), + MTK_EINT_PIN(209, 3, 2, 1), + MTK_EINT_PIN(210, 3, 15, 0), + MTK_EINT_PIN(211, 3, 3, 1), + MTK_EINT_PIN(212, 3, 4, 1), + MTK_EINT_PIN(213, 3, 5, 1), + MTK_EINT_PIN(214, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(215, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(216, 3, 16, 0), + MTK_EINT_PIN(217, 3, 17, 0), + MTK_EINT_PIN(218, 3, 18, 0), + MTK_EINT_PIN(219, 3, 19, 0), + MTK_EINT_PIN(220, 3, 20, 0), + MTK_EINT_PIN(221, 3, 21, 0), + MTK_EINT_PIN(222, 3, 22, 0), + MTK_EINT_PIN(223, 3, 23, 0), + MTK_EINT_PIN(224, 3, 24, 0), + MTK_EINT_PIN(225, 3, 25, 0), + MTK_EINT_PIN(226, 3, 26, 0), + MTK_EINT_PIN(227, 3, 27, 0), + MTK_EINT_PIN(228, 3, 28, 0), + MTK_EINT_PIN(229, 3, 29, 0), + MTK_EINT_PIN(230, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(231, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(232, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(233, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(234, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(235, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(236, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(237, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(238, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(239, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(240, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(241, 3, 30, 0), + MTK_EINT_PIN(242, 3, 31, 0), + MTK_EINT_PIN(243, 3, 32, 0), + MTK_EINT_PIN(244, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(245, 3, 45, 0), + MTK_EINT_PIN(246, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(247, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(248, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(249, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(250, EINT_INVALID_BASE, 0, 0), + MTK_EINT_PIN(251, 0, 67, 0), + MTK_EINT_PIN(252, 0, 68, 0), + MTK_EINT_PIN(253, 0, 69, 0), + MTK_EINT_PIN(254, 0, 70, 0), + MTK_EINT_PIN(255, 0, 71, 0), + MTK_EINT_PIN(256, 0, 72, 0), + MTK_EINT_PIN(257, 0, 73, 0), + MTK_EINT_PIN(258, 0, 74, 0), + MTK_EINT_PIN(259, 3, 33, 0), + MTK_EINT_PIN(260, 3, 34, 0), + MTK_EINT_PIN(261, 3, 35, 0), + MTK_EINT_PIN(262, 3, 36, 0), + MTK_EINT_PIN(263, 3, 37, 0), + MTK_EINT_PIN(264, 3, 38, 0), + MTK_EINT_PIN(265, 3, 39, 0), + MTK_EINT_PIN(266, 3, 40, 0), + MTK_EINT_PIN(267, 3, 41, 0), + MTK_EINT_PIN(268, 3, 42, 0), + MTK_EINT_PIN(269, 3, 43, 0), + MTK_EINT_PIN(270, 3, 44, 0), + MTK_EINT_PIN(271, 4, 0, 0), + MTK_EINT_PIN(272, 4, 1, 0), + MTK_EINT_PIN(273, 4, 2, 0), + MTK_EINT_PIN(274, 4, 3, 0), + MTK_EINT_PIN(275, 4, 4, 0), + MTK_EINT_PIN(276, 4, 5, 0), + MTK_EINT_PIN(277, 4, 6, 0), + MTK_EINT_PIN(278, 4, 7, 0), + MTK_EINT_PIN(279, 4, 8, 0), + MTK_EINT_PIN(280, 4, 9, 0), + MTK_EINT_PIN(281, 4, 10, 0), + MTK_EINT_PIN(282, 4, 11, 0), + MTK_EINT_PIN(283, 4, 12, 0), + MTK_EINT_PIN(284, 4, 13, 0), + MTK_EINT_PIN(285, 4, 14, 0), + MTK_EINT_PIN(286, 4, 15, 0), + MTK_EINT_PIN(287, 4, 16, 0), + MTK_EINT_PIN(288, 4, 17, 0), + MTK_EINT_PIN(289, 4, 18, 0), + MTK_EINT_PIN(290, 4, 19, 0), + MTK_EINT_PIN(291, 4, 20, 0), + MTK_EINT_PIN(292, 4, 21, 0), +}; +#endif /* __PINCTRL_MTK_MT8196_H */ diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c index 87e958d827bf..89ef4e530fcc 100644 --- a/drivers/pinctrl/mediatek/pinctrl-paris.c +++ b/drivers/pinctrl/mediatek/pinctrl-paris.c @@ -840,9 +840,6 @@ static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio) const struct mtk_pin_desc *desc; int value, err; - if (gpio >= hw->soc->npins) - return -EINVAL; - /* * "Virtual" GPIOs are always and only used for interrupts * Since they are only used for interrupts, they are always inputs @@ -868,9 +865,6 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio) const struct mtk_pin_desc *desc; int value, err; - if (gpio >= hw->soc->npins) - return -EINVAL; - desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio]; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DI, &value); @@ -880,38 +874,29 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio) return !!value; } -static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value) +static int mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value) { struct mtk_pinctrl *hw = gpiochip_get_data(chip); const struct mtk_pin_desc *desc; - if (gpio >= hw->soc->npins) - return; - desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio]; - mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO, !!value); + return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO, !!value); } static int mtk_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio) { - struct mtk_pinctrl *hw = gpiochip_get_data(chip); - - if (gpio >= hw->soc->npins) - return -EINVAL; - return pinctrl_gpio_direction_input(chip, gpio); } static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio, int value) { - struct mtk_pinctrl *hw = gpiochip_get_data(chip); - - if (gpio >= hw->soc->npins) - return -EINVAL; + int ret; - mtk_gpio_set(chip, gpio, value); + ret = mtk_gpio_set(chip, gpio, value); + if (ret) + return ret; return pinctrl_gpio_direction_output(chip, gpio); } @@ -964,7 +949,7 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw) chip->direction_input = mtk_gpio_direction_input; chip->direction_output = mtk_gpio_direction_output; chip->get = mtk_gpio_get; - chip->set = mtk_gpio_set; + chip->set_rv = mtk_gpio_set; chip->to_irq = mtk_gpio_to_irq; chip->set_config = mtk_gpio_set_config; chip->base = -1; diff --git a/drivers/pinctrl/meson/Kconfig b/drivers/pinctrl/meson/Kconfig index 90639bc171f6..0315e224bce6 100644 --- a/drivers/pinctrl/meson/Kconfig +++ b/drivers/pinctrl/meson/Kconfig @@ -3,7 +3,7 @@ menuconfig PINCTRL_MESON tristate "Amlogic SoC pinctrl drivers" depends on ARCH_MESON || COMPILE_TEST depends on OF - default y + default ARCH_MESON select PINMUX select PINCONF select GENERIC_PINCONF @@ -17,25 +17,25 @@ config PINCTRL_MESON8 bool "Meson 8 SoC pinctrl driver" depends on ARM select PINCTRL_MESON8_PMX - default y + default ARCH_MESON config PINCTRL_MESON8B bool "Meson 8b SoC pinctrl driver" depends on ARM select PINCTRL_MESON8_PMX - default y + default ARCH_MESON config PINCTRL_MESON_GXBB tristate "Meson gxbb SoC pinctrl driver" depends on ARM64 select PINCTRL_MESON8_PMX - default y + default ARCH_MESON config PINCTRL_MESON_GXL tristate "Meson gxl SoC pinctrl driver" depends on ARM64 select PINCTRL_MESON8_PMX - default y + default ARCH_MESON config PINCTRL_MESON8_PMX tristate @@ -44,7 +44,7 @@ config PINCTRL_MESON_AXG tristate "Meson axg Soc pinctrl driver" depends on ARM64 select PINCTRL_MESON_AXG_PMX - default y + default ARCH_MESON config PINCTRL_MESON_AXG_PMX tristate @@ -53,24 +53,24 @@ config PINCTRL_MESON_G12A tristate "Meson g12a Soc pinctrl driver" depends on ARM64 select PINCTRL_MESON_AXG_PMX - default y + default ARCH_MESON config PINCTRL_MESON_A1 tristate "Meson a1 Soc pinctrl driver" depends on ARM64 select PINCTRL_MESON_AXG_PMX - default y + default ARCH_MESON config PINCTRL_MESON_S4 tristate "Meson s4 Soc pinctrl driver" depends on ARM64 select PINCTRL_MESON_AXG_PMX - default y + default ARCH_MESON config PINCTRL_AMLOGIC_A4 bool "AMLOGIC pincontrol" depends on ARM64 - default y + default ARCH_MESON help This is the driver for the pin controller found on Amlogic SoCs. @@ -82,12 +82,12 @@ config PINCTRL_AMLOGIC_C3 tristate "Amlogic C3 SoC pinctrl driver" depends on ARM64 select PINCTRL_MESON_AXG_PMX - default y + default ARCH_MESON config PINCTRL_AMLOGIC_T7 tristate "Amlogic T7 SoC pinctrl driver" depends on ARM64 select PINCTRL_MESON_AXG_PMX - default y + default ARCH_MESON endif diff --git a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c index ee7bbc72f9b3..385cc619df13 100644 --- a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c +++ b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c @@ -596,20 +596,6 @@ static int aml_get_group_pins(struct pinctrl_dev *pctldev, return 0; } -static inline const struct aml_pctl_group * - aml_pctl_find_group_by_name(const struct aml_pinctrl *info, - const char *name) -{ - int i; - - for (i = 0; i < info->ngroups; i++) { - if (!strcmp(info->groups[i].name, name)) - return &info->groups[i]; - } - - return NULL; -} - static void aml_pin_dbg_show(struct pinctrl_dev *pcdev, struct seq_file *s, unsigned int offset) { @@ -806,15 +792,15 @@ static int aml_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio, value ? BIT(bit) : 0); } -static void aml_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value) +static int aml_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value) { struct aml_gpio_bank *bank = gpiochip_get_data(chip); unsigned int bit, reg; aml_gpio_calc_reg_and_bit(bank, AML_REG_OUT, gpio, ®, &bit); - regmap_update_bits(bank->reg_gpio, reg, BIT(bit), - value ? BIT(bit) : 0); + return regmap_update_bits(bank->reg_gpio, reg, BIT(bit), + value ? BIT(bit) : 0); } static int aml_gpio_get(struct gpio_chip *chip, unsigned int gpio) @@ -832,7 +818,7 @@ static const struct gpio_chip aml_gpio_template = { .request = gpiochip_generic_request, .free = gpiochip_generic_free, .set_config = gpiochip_generic_config, - .set = aml_gpio_set, + .set_rv = aml_gpio_set, .get = aml_gpio_get, .direction_input = aml_gpio_direction_input, .direction_output = aml_gpio_direction_output, diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index e5a32a0532ee..f5be61f2ede4 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -580,9 +580,9 @@ static int meson_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, gpio, value); } -static void meson_gpio_set(struct gpio_chip *chip, unsigned gpio, int value) +static int meson_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value) { - meson_pinconf_set_drive(gpiochip_get_data(chip), gpio, value); + return meson_pinconf_set_drive(gpiochip_get_data(chip), gpio, value); } static int meson_gpio_get(struct gpio_chip *chip, unsigned gpio) @@ -616,7 +616,7 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc) pc->chip.direction_input = meson_gpio_direction_input; pc->chip.direction_output = meson_gpio_direction_output; pc->chip.get = meson_gpio_get; - pc->chip.set = meson_gpio_set; + pc->chip.set_rv = meson_gpio_set; pc->chip.base = -1; pc->chip.ngpio = pc->data->num_pins; pc->chip.can_sleep = false; diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c index 335744ac8310..a6b106984e12 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c @@ -358,9 +358,7 @@ static int armada_37xx_pmx_set_by_name(struct pinctrl_dev *pctldev, val = grp->val[func]; - regmap_update_bits(info->regmap, reg, mask, val); - - return 0; + return regmap_update_bits(info->regmap, reg, mask, val); } static int armada_37xx_pmx_set(struct pinctrl_dev *pctldev, @@ -402,10 +400,13 @@ static int armada_37xx_gpio_get_direction(struct gpio_chip *chip, struct armada_37xx_pinctrl *info = gpiochip_get_data(chip); unsigned int reg = OUTPUT_EN; unsigned int val, mask; + int ret; armada_37xx_update_reg(®, &offset); mask = BIT(offset); - regmap_read(info->regmap, reg, &val); + ret = regmap_read(info->regmap, reg, &val); + if (ret) + return ret; if (val & mask) return GPIO_LINE_DIRECTION_OUT; @@ -417,22 +418,22 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { struct armada_37xx_pinctrl *info = gpiochip_get_data(chip); - unsigned int reg = OUTPUT_EN; + unsigned int en_offset = offset; + unsigned int reg = OUTPUT_VAL; unsigned int mask, val, ret; armada_37xx_update_reg(®, &offset); mask = BIT(offset); + val = value ? mask : 0; - ret = regmap_update_bits(info->regmap, reg, mask, mask); - + ret = regmap_update_bits(info->regmap, reg, mask, val); if (ret) return ret; - reg = OUTPUT_VAL; - val = value ? mask : 0; - regmap_update_bits(info->regmap, reg, mask, val); + reg = OUTPUT_EN; + armada_37xx_update_reg(®, &en_offset); - return 0; + return regmap_update_bits(info->regmap, reg, mask, mask); } static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset) @@ -440,17 +441,20 @@ static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset) struct armada_37xx_pinctrl *info = gpiochip_get_data(chip); unsigned int reg = INPUT_VAL; unsigned int val, mask; + int ret; armada_37xx_update_reg(®, &offset); mask = BIT(offset); - regmap_read(info->regmap, reg, &val); + ret = regmap_read(info->regmap, reg, &val); + if (ret) + return ret; return (val & mask) != 0; } -static void armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset, - int value) +static int armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct armada_37xx_pinctrl *info = gpiochip_get_data(chip); unsigned int reg = OUTPUT_VAL; @@ -460,7 +464,7 @@ static void armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset, mask = BIT(offset); val = value ? mask : 0; - regmap_update_bits(info->regmap, reg, mask, val); + return regmap_update_bits(info->regmap, reg, mask, val); } static int armada_37xx_pmx_gpio_set_direction(struct pinctrl_dev *pctldev, @@ -469,16 +473,17 @@ static int armada_37xx_pmx_gpio_set_direction(struct pinctrl_dev *pctldev, { struct armada_37xx_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); struct gpio_chip *chip = range->gc; + int ret; dev_dbg(info->dev, "gpio_direction for pin %u as %s-%d to %s\n", offset, range->name, offset, input ? "input" : "output"); if (input) - armada_37xx_gpio_direction_input(chip, offset); + ret = armada_37xx_gpio_direction_input(chip, offset); else - armada_37xx_gpio_direction_output(chip, offset, 0); + ret = armada_37xx_gpio_direction_output(chip, offset, 0); - return 0; + return ret; } static int armada_37xx_gpio_request_enable(struct pinctrl_dev *pctldev, @@ -513,7 +518,7 @@ static const struct pinmux_ops armada_37xx_pmx_ops = { static const struct gpio_chip armada_37xx_gpiolib_chip = { .request = gpiochip_generic_request, .free = gpiochip_generic_free, - .set = armada_37xx_gpio_set, + .set_rv = armada_37xx_gpio_set, .get = armada_37xx_gpio_get, .get_direction = armada_37xx_gpio_get_direction, .direction_input = armada_37xx_gpio_direction_input, diff --git a/drivers/pinctrl/nomadik/Kconfig b/drivers/pinctrl/nomadik/Kconfig index aafecf348670..1b4fe2a4c302 100644 --- a/drivers/pinctrl/nomadik/Kconfig +++ b/drivers/pinctrl/nomadik/Kconfig @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -if ARCH_U8500 +if (ARCH_U8500 || COMPILE_TEST) config PINCTRL_ABX500 bool "ST-Ericsson ABx500 family Mixed Signal Circuit gpio functions" @@ -10,11 +10,11 @@ config PINCTRL_ABX500 config PINCTRL_AB8500 bool "AB8500 pin controller driver" - depends on PINCTRL_ABX500 && ARCH_U8500 + depends on PINCTRL_ABX500 && (ARCH_U8500 || COMPILE_TEST) config PINCTRL_AB8505 bool "AB8505 pin controller driver" - depends on PINCTRL_ABX500 && ARCH_U8500 + depends on PINCTRL_ABX500 && (ARCH_U8500 || COMPILE_TEST) endif diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c index 8cd4ba5cf0bd..2f55f83127cf 100644 --- a/drivers/pinctrl/nomadik/pinctrl-abx500.c +++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c @@ -167,14 +167,10 @@ out: return bit; } -static void abx500_gpio_set(struct gpio_chip *chip, unsigned offset, int val) +static int abx500_gpio_set(struct gpio_chip *chip, unsigned int offset, + int val) { - struct abx500_pinctrl *pct = gpiochip_get_data(chip); - int ret; - - ret = abx500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val); - if (ret < 0) - dev_err(pct->dev, "%s write failed (%d)\n", __func__, ret); + return abx500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val); } static int abx500_gpio_direction_output(struct gpio_chip *chip, @@ -540,7 +536,7 @@ static const struct gpio_chip abx500gpio_chip = { .direction_input = abx500_gpio_direction_input, .get = abx500_gpio_get, .direction_output = abx500_gpio_direction_output, - .set = abx500_gpio_set, + .set_rv = abx500_gpio_set, .to_irq = abx500_gpio_to_irq, .dbg_show = abx500_gpio_dbg_show, }; diff --git a/drivers/pinctrl/pinconf.h b/drivers/pinctrl/pinconf.h index a171195b3615..e1ae71610526 100644 --- a/drivers/pinctrl/pinconf.h +++ b/drivers/pinctrl/pinconf.h @@ -142,4 +142,21 @@ int pinconf_generic_parse_dt_config(struct device_node *np, int pinconf_generic_parse_dt_pinmux(struct device_node *np, struct device *dev, unsigned int **pid, unsigned int **pmux, unsigned int *npins); +#else +static inline int +pinconf_generic_parse_dt_config(struct device_node *np, + struct pinctrl_dev *pctldev, + unsigned long **configs, + unsigned int *nconfigs) +{ + return -ENOTSUPP; +} + +static inline int +pinconf_generic_parse_dt_pinmux(struct device_node *np, struct device *dev, + unsigned int **pid, unsigned int **pmux, + unsigned int *npins) +{ + return -ENOTSUPP; +} #endif diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 5da8c48695e6..5cf3db6d78b7 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -105,7 +105,8 @@ static int amd_gpio_get_value(struct gpio_chip *gc, unsigned offset) return !!(pin_reg & BIT(PIN_STS_OFF)); } -static void amd_gpio_set_value(struct gpio_chip *gc, unsigned offset, int value) +static int amd_gpio_set_value(struct gpio_chip *gc, unsigned int offset, + int value) { u32 pin_reg; unsigned long flags; @@ -119,6 +120,8 @@ static void amd_gpio_set_value(struct gpio_chip *gc, unsigned offset, int value) pin_reg &= ~BIT(OUTPUT_VALUE_OFF); writel(pin_reg, gpio_dev->base + offset * 4); raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); + + return 0; } static int amd_gpio_set_debounce(struct amd_gpio *gpio_dev, unsigned int offset, @@ -1173,7 +1176,7 @@ static int amd_gpio_probe(struct platform_device *pdev) gpio_dev->gc.direction_input = amd_gpio_direction_input; gpio_dev->gc.direction_output = amd_gpio_direction_output; gpio_dev->gc.get = amd_gpio_get_value; - gpio_dev->gc.set = amd_gpio_set_value; + gpio_dev->gc.set_rv = amd_gpio_set_value; gpio_dev->gc.set_config = amd_gpio_set_config; gpio_dev->gc.dbg_show = amd_gpio_dbg_show; diff --git a/drivers/pinctrl/pinctrl-apple-gpio.c b/drivers/pinctrl/pinctrl-apple-gpio.c index f861e63f4115..0f551d67d482 100644 --- a/drivers/pinctrl/pinctrl-apple-gpio.c +++ b/drivers/pinctrl/pinctrl-apple-gpio.c @@ -66,7 +66,7 @@ struct apple_gpio_pinctrl { #define REG_GPIOx_DRIVE_STRENGTH1 GENMASK(23, 22) #define REG_IRQ(g, x) (0x800 + 0x40 * (g) + 4 * ((x) >> 5)) -struct regmap_config regmap_config = { +static const struct regmap_config regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, @@ -79,13 +79,13 @@ struct regmap_config regmap_config = { /* No locking needed to mask/unmask IRQs as the interrupt mode is per pin-register. */ static void apple_gpio_set_reg(struct apple_gpio_pinctrl *pctl, - unsigned int pin, u32 mask, u32 value) + unsigned int pin, u32 mask, u32 value) { regmap_update_bits(pctl->map, REG_GPIO(pin), mask, value); } static u32 apple_gpio_get_reg(struct apple_gpio_pinctrl *pctl, - unsigned int pin) + unsigned int pin) { int ret; u32 val; @@ -100,9 +100,9 @@ static u32 apple_gpio_get_reg(struct apple_gpio_pinctrl *pctl, /* Pin controller functions */ static int apple_gpio_dt_node_to_map(struct pinctrl_dev *pctldev, - struct device_node *node, - struct pinctrl_map **map, - unsigned *num_maps) + struct device_node *node, + struct pinctrl_map **map, + unsigned *num_maps) { unsigned reserved_maps; struct apple_gpio_pinctrl *pctl; @@ -147,8 +147,8 @@ static int apple_gpio_dt_node_to_map(struct pinctrl_dev *pctldev, group_name = pinctrl_generic_get_group_name(pctldev, pin); function_name = pinmux_generic_get_function_name(pctl->pctldev, func); ret = pinctrl_utils_add_map_mux(pctl->pctldev, map, - &reserved_maps, num_maps, - group_name, function_name); + &reserved_maps, num_maps, + group_name, function_name); if (ret) goto free_map; } @@ -171,7 +171,7 @@ static const struct pinctrl_ops apple_gpio_pinctrl_ops = { /* Pin multiplexer functions */ static int apple_gpio_pinmux_set(struct pinctrl_dev *pctldev, unsigned func, - unsigned group) + unsigned group) { struct apple_gpio_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); @@ -237,7 +237,7 @@ static int apple_gpio_direction_input(struct gpio_chip *chip, unsigned int offse } static int apple_gpio_direction_output(struct gpio_chip *chip, - unsigned int offset, int value) + unsigned int offset, int value) { struct apple_gpio_pinctrl *pctl = gpiochip_get_data(chip); @@ -282,7 +282,7 @@ static void apple_gpio_irq_mask(struct irq_data *data) struct apple_gpio_pinctrl *pctl = gpiochip_get_data(gc); apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE, - FIELD_PREP(REG_GPIOx_MODE, REG_GPIOx_IN_IRQ_OFF)); + FIELD_PREP(REG_GPIOx_MODE, REG_GPIOx_IN_IRQ_OFF)); gpiochip_disable_irq(gc, data->hwirq); } @@ -294,7 +294,7 @@ static void apple_gpio_irq_unmask(struct irq_data *data) gpiochip_enable_irq(gc, data->hwirq); apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE, - FIELD_PREP(REG_GPIOx_MODE, irqtype)); + FIELD_PREP(REG_GPIOx_MODE, irqtype)); } static unsigned int apple_gpio_irq_startup(struct irq_data *data) @@ -303,7 +303,7 @@ static unsigned int apple_gpio_irq_startup(struct irq_data *data) struct apple_gpio_pinctrl *pctl = gpiochip_get_data(chip); apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_GRP, - FIELD_PREP(REG_GPIOx_GRP, 0)); + FIELD_PREP(REG_GPIOx_GRP, 0)); apple_gpio_direction_input(chip, data->hwirq); apple_gpio_irq_unmask(data); @@ -320,7 +320,7 @@ static int apple_gpio_irq_set_type(struct irq_data *data, unsigned int type) return -EINVAL; apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE, - FIELD_PREP(REG_GPIOx_MODE, irqtype)); + FIELD_PREP(REG_GPIOx_MODE, irqtype)); if (type & IRQ_TYPE_LEVEL_MASK) irq_set_handler_locked(data, handle_level_irq); @@ -429,7 +429,7 @@ static int apple_gpio_pinctrl_probe(struct platform_device *pdev) unsigned int npins; const char **pin_names; unsigned int *pin_nums; - static const char* pinmux_functions[] = { + static const char *pinmux_functions[] = { "gpio", "periph1", "periph2", "periph3" }; unsigned int i, nirqs = 0; diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c index e57ac4ea91dd..ca8a54a43ff5 100644 --- a/drivers/pinctrl/pinctrl-at91-pio4.c +++ b/drivers/pinctrl/pinctrl-at91-pio4.c @@ -390,7 +390,7 @@ static int atmel_gpio_direction_output(struct gpio_chip *chip, return 0; } -static void atmel_gpio_set(struct gpio_chip *chip, unsigned int offset, int val) +static int atmel_gpio_set(struct gpio_chip *chip, unsigned int offset, int val) { struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip); struct atmel_pin *pin = atmel_pioctrl->pins[offset]; @@ -398,10 +398,12 @@ static void atmel_gpio_set(struct gpio_chip *chip, unsigned int offset, int val) atmel_gpio_write(atmel_pioctrl, pin->bank, val ? ATMEL_PIO_SODR : ATMEL_PIO_CODR, BIT(pin->line)); + + return 0; } -static void atmel_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask, - unsigned long *bits) +static int atmel_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask, + unsigned long *bits) { struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip); unsigned int bank; @@ -431,6 +433,8 @@ static void atmel_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask, bits[word] >>= ATMEL_PIO_NPINS_PER_BANK; #endif } + + return 0; } static struct gpio_chip atmel_gpio_chip = { @@ -438,8 +442,8 @@ static struct gpio_chip atmel_gpio_chip = { .get = atmel_gpio_get, .get_multiple = atmel_gpio_get_multiple, .direction_output = atmel_gpio_direction_output, - .set = atmel_gpio_set, - .set_multiple = atmel_gpio_set_multiple, + .set_rv = atmel_gpio_set, + .set_multiple_rv = atmel_gpio_set_multiple, .to_irq = atmel_gpio_to_irq, .base = 0, }; @@ -609,8 +613,10 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev, if (ret) goto exit; - pinctrl_utils_add_map_mux(pctldev, map, reserved_maps, num_maps, + ret = pinctrl_utils_add_map_mux(pctldev, map, reserved_maps, num_maps, group, func); + if (ret) + goto exit; if (num_configs) { ret = pinctrl_utils_add_map_configs(pctldev, map, diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index 93ab277d9943..6c2727bd55bc 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -1449,18 +1449,19 @@ static int at91_gpio_get(struct gpio_chip *chip, unsigned offset) return (pdsr & mask) != 0; } -static void at91_gpio_set(struct gpio_chip *chip, unsigned offset, - int val) +static int at91_gpio_set(struct gpio_chip *chip, unsigned int offset, int val) { struct at91_gpio_chip *at91_gpio = gpiochip_get_data(chip); void __iomem *pio = at91_gpio->regbase; unsigned mask = 1 << offset; writel_relaxed(mask, pio + (val ? PIO_SODR : PIO_CODR)); + + return 0; } -static void at91_gpio_set_multiple(struct gpio_chip *chip, - unsigned long *mask, unsigned long *bits) +static int at91_gpio_set_multiple(struct gpio_chip *chip, + unsigned long *mask, unsigned long *bits) { struct at91_gpio_chip *at91_gpio = gpiochip_get_data(chip); void __iomem *pio = at91_gpio->regbase; @@ -1472,6 +1473,8 @@ static void at91_gpio_set_multiple(struct gpio_chip *chip, writel_relaxed(set_mask, pio + PIO_SODR); writel_relaxed(clear_mask, pio + PIO_CODR); + + return 0; } static int at91_gpio_direction_output(struct gpio_chip *chip, unsigned offset, @@ -1798,8 +1801,8 @@ static const struct gpio_chip at91_gpio_template = { .direction_input = at91_gpio_direction_input, .get = at91_gpio_get, .direction_output = at91_gpio_direction_output, - .set = at91_gpio_set, - .set_multiple = at91_gpio_set_multiple, + .set_rv = at91_gpio_set, + .set_multiple_rv = at91_gpio_set_multiple, .dbg_show = at91_gpio_dbg_show, .can_sleep = false, .ngpio = MAX_NB_GPIO_PER_BANK, @@ -1819,12 +1822,16 @@ static int at91_gpio_probe(struct platform_device *pdev) struct at91_gpio_chip *at91_chip = NULL; struct gpio_chip *chip; struct pinctrl_gpio_range *range; + int alias_idx; int ret = 0; int irq, i; - int alias_idx = of_alias_get_id(np, "gpio"); uint32_t ngpio; char **names; + alias_idx = of_alias_get_id(np, "gpio"); + if (alias_idx < 0) + return alias_idx; + BUG_ON(alias_idx >= ARRAY_SIZE(gpio_chips)); if (gpio_chips[alias_idx]) return dev_err_probe(dev, -EBUSY, "%d slot is occupied.\n", alias_idx); diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c index 2b4805e74eed..fff408b60c4a 100644 --- a/drivers/pinctrl/pinctrl-axp209.c +++ b/drivers/pinctrl/pinctrl-axp209.c @@ -192,34 +192,30 @@ static int axp20x_gpio_get_direction(struct gpio_chip *chip, static int axp20x_gpio_output(struct gpio_chip *chip, unsigned int offset, int value) { - chip->set(chip, offset, value); - - return 0; + return chip->set_rv(chip, offset, value); } -static void axp20x_gpio_set(struct gpio_chip *chip, unsigned int offset, - int value) +static int axp20x_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct axp20x_pctl *pctl = gpiochip_get_data(chip); int reg; /* AXP209 has GPIO3 status sharing the settings register */ - if (offset == 3) { - regmap_update_bits(pctl->regmap, AXP20X_GPIO3_CTRL, - AXP20X_GPIO3_FUNCTIONS, - value ? AXP20X_GPIO3_FUNCTION_OUT_HIGH : - AXP20X_GPIO3_FUNCTION_OUT_LOW); - return; - } + if (offset == 3) + return regmap_update_bits(pctl->regmap, AXP20X_GPIO3_CTRL, + AXP20X_GPIO3_FUNCTIONS, + value ? + AXP20X_GPIO3_FUNCTION_OUT_HIGH : + AXP20X_GPIO3_FUNCTION_OUT_LOW); reg = axp20x_gpio_get_reg(offset); if (reg < 0) - return; + return reg; - regmap_update_bits(pctl->regmap, reg, - AXP20X_GPIO_FUNCTIONS, - value ? AXP20X_GPIO_FUNCTION_OUT_HIGH : - AXP20X_GPIO_FUNCTION_OUT_LOW); + return regmap_update_bits(pctl->regmap, reg, AXP20X_GPIO_FUNCTIONS, + value ? AXP20X_GPIO_FUNCTION_OUT_HIGH : + AXP20X_GPIO_FUNCTION_OUT_LOW); } static int axp20x_pmx_set(struct pinctrl_dev *pctldev, unsigned int offset, @@ -229,12 +225,11 @@ static int axp20x_pmx_set(struct pinctrl_dev *pctldev, unsigned int offset, int reg; /* AXP209 GPIO3 settings have a different layout */ - if (offset == 3) { + if (offset == 3) return regmap_update_bits(pctl->regmap, AXP20X_GPIO3_CTRL, AXP20X_GPIO3_FUNCTIONS, config == AXP20X_MUX_GPIO_OUT ? AXP20X_GPIO3_FUNCTION_OUT_LOW : AXP20X_GPIO3_FUNCTION_INPUT); - } reg = axp20x_gpio_get_reg(offset); if (reg < 0) @@ -468,7 +463,7 @@ static int axp20x_pctl_probe(struct platform_device *pdev) pctl->chip.owner = THIS_MODULE; pctl->chip.get = axp20x_gpio_get; pctl->chip.get_direction = axp20x_gpio_get_direction; - pctl->chip.set = axp20x_gpio_set; + pctl->chip.set_rv = axp20x_gpio_set; pctl->chip.direction_input = pinctrl_gpio_direction_input; pctl->chip.direction_output = axp20x_gpio_output; diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c index 3cfbcaee9e65..8a2fd632bdd4 100644 --- a/drivers/pinctrl/pinctrl-cy8c95x0.c +++ b/drivers/pinctrl/pinctrl-cy8c95x0.c @@ -742,14 +742,15 @@ static int cy8c95x0_gpio_get_value(struct gpio_chip *gc, unsigned int off) return reg_val ? 1 : 0; } -static void cy8c95x0_gpio_set_value(struct gpio_chip *gc, unsigned int off, - int val) +static int cy8c95x0_gpio_set_value(struct gpio_chip *gc, unsigned int off, + int val) { struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc); u8 port = cypress_get_port(chip, off); u8 bit = cypress_get_pin_mask(chip, off); - cy8c95x0_regmap_write_bits(chip, CY8C95X0_OUTPUT, port, bit, val ? bit : 0); + return cy8c95x0_regmap_write_bits(chip, CY8C95X0_OUTPUT, port, bit, + val ? bit : 0); } static int cy8c95x0_gpio_get_direction(struct gpio_chip *gc, unsigned int off) @@ -908,12 +909,12 @@ static int cy8c95x0_gpio_get_multiple(struct gpio_chip *gc, return cy8c95x0_read_regs_mask(chip, CY8C95X0_INPUT, bits, mask); } -static void cy8c95x0_gpio_set_multiple(struct gpio_chip *gc, - unsigned long *mask, unsigned long *bits) +static int cy8c95x0_gpio_set_multiple(struct gpio_chip *gc, + unsigned long *mask, unsigned long *bits) { struct cy8c95x0_pinctrl *chip = gpiochip_get_data(gc); - cy8c95x0_write_regs_mask(chip, CY8C95X0_OUTPUT, bits, mask); + return cy8c95x0_write_regs_mask(chip, CY8C95X0_OUTPUT, bits, mask); } static int cy8c95x0_add_pin_ranges(struct gpio_chip *gc) @@ -938,10 +939,10 @@ static int cy8c95x0_setup_gpiochip(struct cy8c95x0_pinctrl *chip) gc->direction_input = cy8c95x0_gpio_direction_input; gc->direction_output = cy8c95x0_gpio_direction_output; gc->get = cy8c95x0_gpio_get_value; - gc->set = cy8c95x0_gpio_set_value; + gc->set_rv = cy8c95x0_gpio_set_value; gc->get_direction = cy8c95x0_gpio_get_direction; gc->get_multiple = cy8c95x0_gpio_get_multiple; - gc->set_multiple = cy8c95x0_gpio_set_multiple; + gc->set_multiple_rv = cy8c95x0_gpio_set_multiple; gc->set_config = gpiochip_generic_config; gc->can_sleep = true; gc->add_pin_ranges = cy8c95x0_add_pin_ranges; diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c index a9e48eac15f6..3c660471ec69 100644 --- a/drivers/pinctrl/pinctrl-ingenic.c +++ b/drivers/pinctrl/pinctrl-ingenic.c @@ -3800,12 +3800,14 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc) chained_irq_exit(irq_chip, desc); } -static void ingenic_gpio_set(struct gpio_chip *gc, - unsigned int offset, int value) +static int ingenic_gpio_set(struct gpio_chip *gc, unsigned int offset, + int value) { struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc); ingenic_gpio_set_value(jzgc, offset, value); + + return 0; } static int ingenic_gpio_get(struct gpio_chip *gc, unsigned int offset) @@ -4449,7 +4451,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc, jzgc->gc.fwnode = fwnode; jzgc->gc.owner = THIS_MODULE; - jzgc->gc.set = ingenic_gpio_set; + jzgc->gc.set_rv = ingenic_gpio_set; jzgc->gc.get = ingenic_gpio_get; jzgc->gc.direction_input = pinctrl_gpio_direction_input; jzgc->gc.direction_output = ingenic_gpio_direction_output; diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c index 4d1f41488017..c2f4b16f42d2 100644 --- a/drivers/pinctrl/pinctrl-mcp23s08.c +++ b/drivers/pinctrl/pinctrl-mcp23s08.c @@ -636,6 +636,14 @@ int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, mcp->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + /* + * Reset the chip - we don't really know what state it's in, so reset + * all pins to input first to prevent surprises. + */ + ret = mcp_write(mcp, MCP_IODIR, mcp->chip.ngpio == 16 ? 0xFFFF : 0xFF); + if (ret < 0) + return ret; + /* verify MCP_IOCON.SEQOP = 0, so sequential reads work, * and MCP_IOCON.HAEN = 1, so we work with all chips. */ diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c index a60db93b61b1..88c2f14cfc6b 100644 --- a/drivers/pinctrl/pinctrl-microchip-sgpio.c +++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c @@ -555,10 +555,10 @@ static int microchip_sgpio_get_direction(struct gpio_chip *gc, unsigned int gpio return bank->is_input ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT; } -static void microchip_sgpio_set_value(struct gpio_chip *gc, - unsigned int gpio, int value) +static int microchip_sgpio_set_value(struct gpio_chip *gc, unsigned int gpio, + int value) { - microchip_sgpio_direction_output(gc, gpio, value); + return microchip_sgpio_direction_output(gc, gpio, value); } static int microchip_sgpio_get_value(struct gpio_chip *gc, unsigned int gpio) @@ -858,7 +858,7 @@ static int microchip_sgpio_register_bank(struct device *dev, gc->direction_input = microchip_sgpio_direction_input; gc->direction_output = microchip_sgpio_direction_output; gc->get = microchip_sgpio_get_value; - gc->set = microchip_sgpio_set_value; + gc->set_rv = microchip_sgpio_set_value; gc->request = gpiochip_generic_request; gc->free = gpiochip_generic_free; gc->of_xlate = microchip_sgpio_of_xlate; diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c index 329d54b11529..fbb3d43746bb 100644 --- a/drivers/pinctrl/pinctrl-ocelot.c +++ b/drivers/pinctrl/pinctrl-ocelot.c @@ -1950,17 +1950,18 @@ static int ocelot_gpio_get(struct gpio_chip *chip, unsigned int offset) return !!(val & BIT(offset % 32)); } -static void ocelot_gpio_set(struct gpio_chip *chip, unsigned int offset, - int value) +static int ocelot_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct ocelot_pinctrl *info = gpiochip_get_data(chip); if (value) - regmap_write(info->map, REG(OCELOT_GPIO_OUT_SET, info, offset), - BIT(offset % 32)); - else - regmap_write(info->map, REG(OCELOT_GPIO_OUT_CLR, info, offset), - BIT(offset % 32)); + return regmap_write(info->map, + REG(OCELOT_GPIO_OUT_SET, info, offset), + BIT(offset % 32)); + + return regmap_write(info->map, REG(OCELOT_GPIO_OUT_CLR, info, offset), + BIT(offset % 32)); } static int ocelot_gpio_get_direction(struct gpio_chip *chip, @@ -1996,7 +1997,7 @@ static int ocelot_gpio_direction_output(struct gpio_chip *chip, static const struct gpio_chip ocelot_gpiolib_chip = { .request = gpiochip_generic_request, .free = gpiochip_generic_free, - .set = ocelot_gpio_set, + .set_rv = ocelot_gpio_set, .get = ocelot_gpio_get, .get_direction = ocelot_gpio_get_direction, .direction_input = pinctrl_gpio_direction_input, diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c index 8c50e0091b32..e7bf60960961 100644 --- a/drivers/pinctrl/pinctrl-pistachio.c +++ b/drivers/pinctrl/pinctrl-pistachio.c @@ -1186,12 +1186,14 @@ static int pistachio_gpio_get(struct gpio_chip *chip, unsigned offset) return !!(gpio_readl(bank, reg) & BIT(offset)); } -static void pistachio_gpio_set(struct gpio_chip *chip, unsigned offset, - int value) +static int pistachio_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct pistachio_gpio_bank *bank = gpiochip_get_data(chip); gpio_mask_writel(bank, GPIO_OUTPUT, offset, !!value); + + return 0; } static int pistachio_gpio_direction_input(struct gpio_chip *chip, @@ -1326,7 +1328,7 @@ static void pistachio_gpio_irq_handler(struct irq_desc *desc) .direction_input = pistachio_gpio_direction_input, \ .direction_output = pistachio_gpio_direction_output, \ .get = pistachio_gpio_get, \ - .set = pistachio_gpio_set, \ + .set_rv = pistachio_gpio_set, \ .base = _pin_base, \ .ngpio = _npins, \ }, \ diff --git a/drivers/pinctrl/pinctrl-rk805.c b/drivers/pinctrl/pinctrl-rk805.c index c42f1bf93404..fc0e330b1d11 100644 --- a/drivers/pinctrl/pinctrl-rk805.c +++ b/drivers/pinctrl/pinctrl-rk805.c @@ -325,26 +325,26 @@ static int rk805_gpio_get(struct gpio_chip *chip, unsigned int offset) return !!(val & pci->pin_cfg[offset].val_msk); } -static void rk805_gpio_set(struct gpio_chip *chip, - unsigned int offset, - int value) +static int rk805_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct rk805_pctrl_info *pci = gpiochip_get_data(chip); - int ret; - ret = regmap_update_bits(pci->rk808->regmap, - pci->pin_cfg[offset].reg, - pci->pin_cfg[offset].val_msk, - value ? pci->pin_cfg[offset].val_msk : 0); - if (ret) - dev_err(pci->dev, "set gpio%d value %d failed\n", - offset, value); + return regmap_update_bits(pci->rk808->regmap, + pci->pin_cfg[offset].reg, + pci->pin_cfg[offset].val_msk, + value ? pci->pin_cfg[offset].val_msk : 0); } static int rk805_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { - rk805_gpio_set(chip, offset, value); + int ret; + + ret = rk805_gpio_set(chip, offset, value); + if (ret) + return ret; + return pinctrl_gpio_direction_output(chip, offset); } @@ -378,7 +378,7 @@ static const struct gpio_chip rk805_gpio_chip = { .free = gpiochip_generic_free, .get_direction = rk805_gpio_get_direction, .get = rk805_gpio_get, - .set = rk805_gpio_set, + .set_rv = rk805_gpio_set, .direction_input = pinctrl_gpio_direction_input, .direction_output = rk805_gpio_direction_output, .can_sleep = true, diff --git a/drivers/pinctrl/pinctrl-scmi.c b/drivers/pinctrl/pinctrl-scmi.c index df4bbcd7d1d5..383681041e4c 100644 --- a/drivers/pinctrl/pinctrl-scmi.c +++ b/drivers/pinctrl/pinctrl-scmi.c @@ -507,6 +507,7 @@ static int pinctrl_scmi_get_pins(struct scmi_pinctrl *pmx, static const char * const scmi_pinctrl_blocklist[] = { "fsl,imx95", + "fsl,imx94", NULL }; diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c index aae01120dc52..f4fdcaa043e6 100644 --- a/drivers/pinctrl/pinctrl-stmfx.c +++ b/drivers/pinctrl/pinctrl-stmfx.c @@ -115,14 +115,14 @@ static int stmfx_gpio_get(struct gpio_chip *gc, unsigned int offset) return ret ? ret : !!(value & mask); } -static void stmfx_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) +static int stmfx_gpio_set(struct gpio_chip *gc, unsigned int offset, int value) { struct stmfx_pinctrl *pctl = gpiochip_get_data(gc); u32 reg = value ? STMFX_REG_GPO_SET : STMFX_REG_GPO_CLR; u32 mask = get_mask(offset); - regmap_write_bits(pctl->stmfx->map, reg + get_reg(offset), - mask, mask); + return regmap_write_bits(pctl->stmfx->map, reg + get_reg(offset), + mask, mask); } static int stmfx_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) @@ -161,8 +161,11 @@ static int stmfx_gpio_direction_output(struct gpio_chip *gc, struct stmfx_pinctrl *pctl = gpiochip_get_data(gc); u32 reg = STMFX_REG_GPIO_DIR + get_reg(offset); u32 mask = get_mask(offset); + int ret; - stmfx_gpio_set(gc, offset, value); + ret = stmfx_gpio_set(gc, offset, value); + if (ret) + return ret; return regmap_write_bits(pctl->stmfx->map, reg, mask, mask); } @@ -694,7 +697,7 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev) pctl->gpio_chip.direction_input = stmfx_gpio_direction_input; pctl->gpio_chip.direction_output = stmfx_gpio_direction_output; pctl->gpio_chip.get = stmfx_gpio_get; - pctl->gpio_chip.set = stmfx_gpio_set; + pctl->gpio_chip.set_rv = stmfx_gpio_set; pctl->gpio_chip.set_config = gpiochip_generic_config; pctl->gpio_chip.base = -1; pctl->gpio_chip.ngpio = pctl->pctl_desc.npins; diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c index 98262b8ce43a..d3a12c1c0de2 100644 --- a/drivers/pinctrl/pinctrl-sx150x.c +++ b/drivers/pinctrl/pinctrl-sx150x.c @@ -432,24 +432,25 @@ static int sx150x_gpio_oscio_set(struct sx150x_pinctrl *pctl, (value ? 0x1f : 0x10)); } -static void sx150x_gpio_set(struct gpio_chip *chip, unsigned int offset, - int value) +static int sx150x_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct sx150x_pinctrl *pctl = gpiochip_get_data(chip); if (sx150x_pin_is_oscio(pctl, offset)) - sx150x_gpio_oscio_set(pctl, value); - else - __sx150x_gpio_set(pctl, offset, value); + return sx150x_gpio_oscio_set(pctl, value); + + return __sx150x_gpio_set(pctl, offset, value); } -static void sx150x_gpio_set_multiple(struct gpio_chip *chip, - unsigned long *mask, - unsigned long *bits) +static int sx150x_gpio_set_multiple(struct gpio_chip *chip, + unsigned long *mask, + unsigned long *bits) { struct sx150x_pinctrl *pctl = gpiochip_get_data(chip); - regmap_write_bits(pctl->regmap, pctl->data->reg_data, *mask, *bits); + return regmap_write_bits(pctl->regmap, pctl->data->reg_data, *mask, + *bits); } static int sx150x_gpio_direction_input(struct gpio_chip *chip, @@ -1175,7 +1176,7 @@ static int sx150x_probe(struct i2c_client *client) pctl->gpio.direction_input = sx150x_gpio_direction_input; pctl->gpio.direction_output = sx150x_gpio_direction_output; pctl->gpio.get = sx150x_gpio_get; - pctl->gpio.set = sx150x_gpio_set; + pctl->gpio.set_rv = sx150x_gpio_set; pctl->gpio.set_config = gpiochip_generic_config; pctl->gpio.parent = dev; pctl->gpio.can_sleep = true; @@ -1190,7 +1191,7 @@ static int sx150x_probe(struct i2c_client *client) * would require locking that is not in place at this time. */ if (pctl->data->model != SX150X_789) - pctl->gpio.set_multiple = sx150x_gpio_set_multiple; + pctl->gpio.set_multiple_rv = sx150x_gpio_set_multiple; /* Add Interrupt support if an irq is specified */ if (client->irq > 0) { diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c index 7366aba5a199..57fefeb603f0 100644 --- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c +++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c @@ -327,14 +327,14 @@ static int lpi_gpio_get(struct gpio_chip *chip, unsigned int pin) LPI_GPIO_VALUE_IN_MASK; } -static void lpi_gpio_set(struct gpio_chip *chip, unsigned int pin, int value) +static int lpi_gpio_set(struct gpio_chip *chip, unsigned int pin, int value) { struct lpi_pinctrl *state = gpiochip_get_data(chip); unsigned long config; config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value); - lpi_config_set(state->ctrl, pin, &config, 1); + return lpi_config_set(state->ctrl, pin, &config, 1); } #ifdef CONFIG_DEBUG_FS @@ -398,7 +398,7 @@ static const struct gpio_chip lpi_gpio_template = { .direction_input = lpi_gpio_direction_input, .direction_output = lpi_gpio_direction_output, .get = lpi_gpio_get, - .set = lpi_gpio_set, + .set_rv = lpi_gpio_set, .request = gpiochip_generic_request, .free = gpiochip_generic_free, .dbg_show = lpi_gpio_dbg_show, diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 0eb816395dc6..f012ea88aa22 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -635,7 +635,7 @@ static int msm_gpio_get(struct gpio_chip *chip, unsigned offset) return !!(val & BIT(g->in_bit)); } -static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +static int msm_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { const struct msm_pingroup *g; struct msm_pinctrl *pctrl = gpiochip_get_data(chip); @@ -654,6 +654,8 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value) msm_writel_io(val, pctrl, g); raw_spin_unlock_irqrestore(&pctrl->lock, flags); + + return 0; } #ifdef CONFIG_DEBUG_FS @@ -790,7 +792,7 @@ static const struct gpio_chip msm_gpio_template = { .direction_output = msm_gpio_direction_output, .get_direction = msm_gpio_get_direction, .get = msm_gpio_get, - .set = msm_gpio_set, + .set_rv = msm_gpio_set, .request = gpiochip_generic_request, .free = gpiochip_generic_free, .dbg_show = msm_gpio_dbg_show, diff --git a/drivers/pinctrl/qcom/pinctrl-qcm2290.c b/drivers/pinctrl/qcom/pinctrl-qcm2290.c index ba699eac9ee8..f885af571ec9 100644 --- a/drivers/pinctrl/qcom/pinctrl-qcm2290.c +++ b/drivers/pinctrl/qcom/pinctrl-qcm2290.c @@ -37,6 +37,8 @@ .mux_bit = 2, \ .pull_bit = 0, \ .drv_bit = 6, \ + .egpio_enable = 12, \ + .egpio_present = 11, \ .oe_bit = 9, \ .in_bit = 0, \ .out_bit = 1, \ @@ -387,6 +389,7 @@ enum qcm2290_functions { msm_mux_ddr_pxi1, msm_mux_ddr_pxi2, msm_mux_ddr_pxi3, + msm_mux_egpio, msm_mux_gcc_gp1, msm_mux_gcc_gp2, msm_mux_gcc_gp3, @@ -816,6 +819,13 @@ static const char * const sd_write_groups[] = { static const char * const jitter_bist_groups[] = { "gpio96", "gpio97", }; +static const char * const egpio_groups[] = { + "gpio98", "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", + "gpio104", "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", + "gpio110", "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", + "gpio116", "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", + "gpio122", "gpio123", "gpio124", "gpio125", "gpio126", +}; static const char * const ddr_pxi2_groups[] = { "gpio102", "gpio103", }; @@ -851,6 +861,7 @@ static const struct pinfunction qcm2290_functions[] = { MSM_PIN_FUNCTION(ddr_pxi1), MSM_PIN_FUNCTION(ddr_pxi2), MSM_PIN_FUNCTION(ddr_pxi3), + MSM_PIN_FUNCTION(egpio), MSM_PIN_FUNCTION(gcc_gp1), MSM_PIN_FUNCTION(gcc_gp2), MSM_PIN_FUNCTION(gcc_gp3), @@ -1037,35 +1048,35 @@ static const struct msm_pingroup qcm2290_groups[] = { [95] = PINGROUP(95, nav_gpio, gp_pdm0, qdss_gpio, wlan1_adc1, _, _, _, _, _), [96] = PINGROUP(96, qup4, nav_gpio, mdp_vsync, gp_pdm1, sd_write, jitter_bist, qdss_cti, qdss_cti, _), [97] = PINGROUP(97, qup4, nav_gpio, mdp_vsync, gp_pdm2, jitter_bist, qdss_cti, qdss_cti, _, _), - [98] = PINGROUP(98, _, _, _, _, _, _, _, _, _), - [99] = PINGROUP(99, _, _, _, _, _, _, _, _, _), - [100] = PINGROUP(100, atest, _, _, _, _, _, _, _, _), - [101] = PINGROUP(101, atest, _, _, _, _, _, _, _, _), - [102] = PINGROUP(102, _, phase_flag, dac_calib, ddr_pxi2, _, _, _, _, _), - [103] = PINGROUP(103, _, phase_flag, dac_calib, ddr_pxi2, _, _, _, _, _), - [104] = PINGROUP(104, _, phase_flag, qdss_gpio, dac_calib, ddr_pxi3, _, pwm_8, _, _), - [105] = PINGROUP(105, _, phase_flag, qdss_gpio, dac_calib, ddr_pxi3, _, _, _, _), - [106] = PINGROUP(106, nav_gpio, gcc_gp3, qdss_gpio, _, _, _, _, _, _), - [107] = PINGROUP(107, nav_gpio, gcc_gp2, qdss_gpio, _, _, _, _, _, _), - [108] = PINGROUP(108, nav_gpio, _, _, _, _, _, _, _, _), - [109] = PINGROUP(109, _, qdss_gpio, _, _, _, _, _, _, _), - [110] = PINGROUP(110, _, qdss_gpio, _, _, _, _, _, _, _), - [111] = PINGROUP(111, _, _, _, _, _, _, _, _, _), - [112] = PINGROUP(112, _, _, _, _, _, _, _, _, _), - [113] = PINGROUP(113, _, _, _, _, _, _, _, _, _), - [114] = PINGROUP(114, _, _, _, _, _, _, _, _, _), - [115] = PINGROUP(115, _, pwm_9, _, _, _, _, _, _, _), - [116] = PINGROUP(116, _, _, _, _, _, _, _, _, _), - [117] = PINGROUP(117, _, _, _, _, _, _, _, _, _), - [118] = PINGROUP(118, _, _, _, _, _, _, _, _, _), - [119] = PINGROUP(119, _, _, _, _, _, _, _, _, _), - [120] = PINGROUP(120, _, _, _, _, _, _, _, _, _), - [121] = PINGROUP(121, _, _, _, _, _, _, _, _, _), - [122] = PINGROUP(122, _, _, _, _, _, _, _, _, _), - [123] = PINGROUP(123, _, _, _, _, _, _, _, _, _), - [124] = PINGROUP(124, _, _, _, _, _, _, _, _, _), - [125] = PINGROUP(125, _, _, _, _, _, _, _, _, _), - [126] = PINGROUP(126, _, _, _, _, _, _, _, _, _), + [98] = PINGROUP(98, _, _, _, _, _, _, _, _, egpio), + [99] = PINGROUP(99, _, _, _, _, _, _, _, _, egpio), + [100] = PINGROUP(100, atest, _, _, _, _, _, _, _, egpio), + [101] = PINGROUP(101, atest, _, _, _, _, _, _, _, egpio), + [102] = PINGROUP(102, _, phase_flag, dac_calib, ddr_pxi2, _, _, _, _, egpio), + [103] = PINGROUP(103, _, phase_flag, dac_calib, ddr_pxi2, _, _, _, _, egpio), + [104] = PINGROUP(104, _, phase_flag, qdss_gpio, dac_calib, ddr_pxi3, _, pwm_8, _, egpio), + [105] = PINGROUP(105, _, phase_flag, qdss_gpio, dac_calib, ddr_pxi3, _, _, _, egpio), + [106] = PINGROUP(106, nav_gpio, gcc_gp3, qdss_gpio, _, _, _, _, _, egpio), + [107] = PINGROUP(107, nav_gpio, gcc_gp2, qdss_gpio, _, _, _, _, _, egpio), + [108] = PINGROUP(108, nav_gpio, _, _, _, _, _, _, _, egpio), + [109] = PINGROUP(109, _, qdss_gpio, _, _, _, _, _, _, egpio), + [110] = PINGROUP(110, _, qdss_gpio, _, _, _, _, _, _, egpio), + [111] = PINGROUP(111, _, _, _, _, _, _, _, _, egpio), + [112] = PINGROUP(112, _, _, _, _, _, _, _, _, egpio), + [113] = PINGROUP(113, _, _, _, _, _, _, _, _, egpio), + [114] = PINGROUP(114, _, _, _, _, _, _, _, _, egpio), + [115] = PINGROUP(115, _, pwm_9, _, _, _, _, _, _, egpio), + [116] = PINGROUP(116, _, _, _, _, _, _, _, _, egpio), + [117] = PINGROUP(117, _, _, _, _, _, _, _, _, egpio), + [118] = PINGROUP(118, _, _, _, _, _, _, _, _, egpio), + [119] = PINGROUP(119, _, _, _, _, _, _, _, _, egpio), + [120] = PINGROUP(120, _, _, _, _, _, _, _, _, egpio), + [121] = PINGROUP(121, _, _, _, _, _, _, _, _, egpio), + [122] = PINGROUP(122, _, _, _, _, _, _, _, _, egpio), + [123] = PINGROUP(123, _, _, _, _, _, _, _, _, egpio), + [124] = PINGROUP(124, _, _, _, _, _, _, _, _, egpio), + [125] = PINGROUP(125, _, _, _, _, _, _, _, _, egpio), + [126] = PINGROUP(126, _, _, _, _, _, _, _, _, egpio), [127] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x84004, 0, 0), [128] = SDC_QDSD_PINGROUP(sdc1_clk, 0x84000, 13, 6), [129] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x84000, 11, 3), @@ -1095,6 +1106,7 @@ static const struct msm_pinctrl_soc_data qcm2290_pinctrl = { .ngpios = 127, .wakeirq_map = qcm2290_mpm_map, .nwakeirq_map = ARRAY_SIZE(qcm2290_mpm_map), + .egpio_func = 9, }; static int qcm2290_pinctrl_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/qcom/pinctrl-qcs615.c b/drivers/pinctrl/qcom/pinctrl-qcs615.c index 23015b055f6a..17ca743c2210 100644 --- a/drivers/pinctrl/qcom/pinctrl-qcs615.c +++ b/drivers/pinctrl/qcom/pinctrl-qcs615.c @@ -1062,7 +1062,7 @@ static const struct msm_pinctrl_soc_data qcs615_tlmm = { .nfunctions = ARRAY_SIZE(qcs615_functions), .groups = qcs615_groups, .ngroups = ARRAY_SIZE(qcs615_groups), - .ngpios = 123, + .ngpios = 124, .tiles = qcs615_tiles, .ntiles = ARRAY_SIZE(qcs615_tiles), .wakeirq_map = qcs615_pdc_map, diff --git a/drivers/pinctrl/qcom/pinctrl-qcs8300.c b/drivers/pinctrl/qcom/pinctrl-qcs8300.c index ba6de944a859..5f5f7c4ac644 100644 --- a/drivers/pinctrl/qcom/pinctrl-qcs8300.c +++ b/drivers/pinctrl/qcom/pinctrl-qcs8300.c @@ -1204,7 +1204,7 @@ static const struct msm_pinctrl_soc_data qcs8300_pinctrl = { .nfunctions = ARRAY_SIZE(qcs8300_functions), .groups = qcs8300_groups, .ngroups = ARRAY_SIZE(qcs8300_groups), - .ngpios = 133, + .ngpios = 134, .wakeirq_map = qcs8300_pdc_map, .nwakeirq_map = ARRAY_SIZE(qcs8300_pdc_map), .egpio_func = 11, diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c index c8ce61066070..bc082bfb52ef 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c @@ -764,14 +764,14 @@ static int pmic_gpio_get(struct gpio_chip *chip, unsigned pin) return !!pad->out_value; } -static void pmic_gpio_set(struct gpio_chip *chip, unsigned pin, int value) +static int pmic_gpio_set(struct gpio_chip *chip, unsigned int pin, int value) { struct pmic_gpio_state *state = gpiochip_get_data(chip); unsigned long config; config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value); - pmic_gpio_config_set(state->ctrl, pin, &config, 1); + return pmic_gpio_config_set(state->ctrl, pin, &config, 1); } static int pmic_gpio_of_xlate(struct gpio_chip *chip, @@ -802,7 +802,7 @@ static const struct gpio_chip pmic_gpio_gpio_template = { .direction_input = pmic_gpio_direction_input, .direction_output = pmic_gpio_direction_output, .get = pmic_gpio_get, - .set = pmic_gpio_set, + .set_rv = pmic_gpio_set, .request = gpiochip_generic_request, .free = gpiochip_generic_free, .of_xlate = pmic_gpio_of_xlate, diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c index 7b28c5fb2402..ba9084978f90 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c @@ -600,14 +600,14 @@ static int pmic_mpp_get(struct gpio_chip *chip, unsigned pin) return !!pad->out_value; } -static void pmic_mpp_set(struct gpio_chip *chip, unsigned pin, int value) +static int pmic_mpp_set(struct gpio_chip *chip, unsigned int pin, int value) { struct pmic_mpp_state *state = gpiochip_get_data(chip); unsigned long config; config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value); - pmic_mpp_config_set(state->ctrl, pin, &config, 1); + return pmic_mpp_config_set(state->ctrl, pin, &config, 1); } static int pmic_mpp_of_xlate(struct gpio_chip *chip, @@ -638,7 +638,7 @@ static const struct gpio_chip pmic_mpp_gpio_template = { .direction_input = pmic_mpp_direction_input, .direction_output = pmic_mpp_direction_output, .get = pmic_mpp_get, - .set = pmic_mpp_set, + .set_rv = pmic_mpp_set, .request = gpiochip_generic_request, .free = gpiochip_generic_free, .of_xlate = pmic_mpp_of_xlate, diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c index 82679417e25f..3a8014ebf064 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c @@ -507,7 +507,8 @@ static int pm8xxx_gpio_get(struct gpio_chip *chip, unsigned offset) return ret; } -static void pm8xxx_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +static int pm8xxx_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct pm8xxx_gpio *pctrl = gpiochip_get_data(chip); struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data; @@ -519,7 +520,7 @@ static void pm8xxx_gpio_set(struct gpio_chip *chip, unsigned offset, int value) val |= pin->open_drain << 1; val |= pin->output_value; - pm8xxx_write_bank(pctrl, pin, 1, val); + return pm8xxx_write_bank(pctrl, pin, 1, val); } static int pm8xxx_gpio_of_xlate(struct gpio_chip *chip, @@ -596,7 +597,7 @@ static const struct gpio_chip pm8xxx_gpio_template = { .direction_input = pm8xxx_gpio_direction_input, .direction_output = pm8xxx_gpio_direction_output, .get = pm8xxx_gpio_get, - .set = pm8xxx_gpio_set, + .set_rv = pm8xxx_gpio_set, .of_xlate = pm8xxx_gpio_of_xlate, .dbg_show = pm8xxx_gpio_dbg_show, .owner = THIS_MODULE, diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c index 4841bbfe4864..087c37d304fc 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c @@ -511,14 +511,15 @@ static int pm8xxx_mpp_get(struct gpio_chip *chip, unsigned offset) return ret; } -static void pm8xxx_mpp_set(struct gpio_chip *chip, unsigned offset, int value) +static int pm8xxx_mpp_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct pm8xxx_mpp *pctrl = gpiochip_get_data(chip); struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data; pin->output_value = !!value; - pm8xxx_mpp_update(pctrl, pin); + return pm8xxx_mpp_update(pctrl, pin); } static int pm8xxx_mpp_of_xlate(struct gpio_chip *chip, @@ -633,7 +634,7 @@ static const struct gpio_chip pm8xxx_mpp_template = { .direction_input = pm8xxx_mpp_direction_input, .direction_output = pm8xxx_mpp_direction_output, .get = pm8xxx_mpp_get, - .set = pm8xxx_mpp_set, + .set_rv = pm8xxx_mpp_set, .of_xlate = pm8xxx_mpp_of_xlate, .dbg_show = pm8xxx_mpp_dbg_show, .owner = THIS_MODULE, diff --git a/drivers/pinctrl/qcom/tlmm-test.c b/drivers/pinctrl/qcom/tlmm-test.c index fd02bf3a76cb..7b99e89e0f67 100644 --- a/drivers/pinctrl/qcom/tlmm-test.c +++ b/drivers/pinctrl/qcom/tlmm-test.c @@ -547,6 +547,7 @@ static int tlmm_test_init(struct kunit *test) struct tlmm_test_priv *priv; priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv); atomic_set(&priv->intr_count, 0); atomic_set(&priv->thread_count, 0); diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig index 3c18d908b21e..e16034fc1bbf 100644 --- a/drivers/pinctrl/renesas/Kconfig +++ b/drivers/pinctrl/renesas/Kconfig @@ -42,6 +42,7 @@ config PINCTRL_RENESAS select PINCTRL_RZG2L if ARCH_RZG2L select PINCTRL_RZV2M if ARCH_R9A09G011 select PINCTRL_RZG2L if ARCH_R9A09G047 + select PINCTRL_RZG2L if ARCH_R9A09G056 select PINCTRL_RZG2L if ARCH_R9A09G057 select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203 select PINCTRL_PFC_SH7264 if CPU_SUBTYPE_SH7264 diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index c72e250f4a15..78fa08ff0faa 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -2230,135 +2230,146 @@ static const struct rzg2l_dedicated_configs rzg3s_dedicated_pins[] = { PIN_CFG_IO_VMC_SD1)) }, }; -static struct rzg2l_dedicated_configs rzv2h_dedicated_pins[] = { - { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0, PIN_CFG_NF) }, - { "TMS_SWDIO", RZG2L_SINGLE_PIN_PACK(0x3, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_IEN)) }, - { "TDO", RZG2L_SINGLE_PIN_PACK(0x3, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) }, - { "WDTUDFCA", RZG2L_SINGLE_PIN_PACK(0x5, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD | PIN_CFG_NOD)) }, - { "WDTUDFCM", RZG2L_SINGLE_PIN_PACK(0x5, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD | PIN_CFG_NOD)) }, - { "SCIF_RXD", RZG2L_SINGLE_PIN_PACK(0x6, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "SCIF_TXD", RZG2L_SINGLE_PIN_PACK(0x6, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "XSPI0_CKP", RZG2L_SINGLE_PIN_PACK(0x7, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD | PIN_CFG_OEN)) }, - { "XSPI0_CKN", RZG2L_SINGLE_PIN_PACK(0x7, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD | PIN_CFG_OEN)) }, - { "XSPI0_CS0N", RZG2L_SINGLE_PIN_PACK(0x7, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD | PIN_CFG_OEN)) }, - { "XSPI0_DS", RZG2L_SINGLE_PIN_PACK(0x7, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "XSPI0_RESET0N", RZG2L_SINGLE_PIN_PACK(0x7, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD | PIN_CFG_OEN)) }, - { "XSPI0_RSTO0N", RZG2L_SINGLE_PIN_PACK(0x7, 5, (PIN_CFG_PUPD)) }, - { "XSPI0_INT0N", RZG2L_SINGLE_PIN_PACK(0x7, 6, (PIN_CFG_PUPD)) }, - { "XSPI0_ECS0N", RZG2L_SINGLE_PIN_PACK(0x7, 7, (PIN_CFG_PUPD)) }, - { "XSPI0_IO0", RZG2L_SINGLE_PIN_PACK(0x8, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "XSPI0_IO1", RZG2L_SINGLE_PIN_PACK(0x8, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "XSPI0_IO2", RZG2L_SINGLE_PIN_PACK(0x8, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "XSPI0_IO3", RZG2L_SINGLE_PIN_PACK(0x8, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "XSPI0_IO4", RZG2L_SINGLE_PIN_PACK(0x8, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "XSPI0_IO5", RZG2L_SINGLE_PIN_PACK(0x8, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "XSPI0_IO6", RZG2L_SINGLE_PIN_PACK(0x8, 6, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "XSPI0_IO7", RZG2L_SINGLE_PIN_PACK(0x8, 7, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "SD0CLK", RZG2L_SINGLE_PIN_PACK(0x9, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) }, - { "SD0CMD", RZG2L_SINGLE_PIN_PACK(0x9, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_IEN | PIN_CFG_PUPD)) }, - { "SD0RSTN", RZG2L_SINGLE_PIN_PACK(0x9, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) }, - { "SD0DAT0", RZG2L_SINGLE_PIN_PACK(0xa, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_IEN | PIN_CFG_PUPD)) }, - { "SD0DAT1", RZG2L_SINGLE_PIN_PACK(0xa, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_IEN | PIN_CFG_PUPD)) }, - { "SD0DAT2", RZG2L_SINGLE_PIN_PACK(0xa, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_IEN | PIN_CFG_PUPD)) }, - { "SD0DAT3", RZG2L_SINGLE_PIN_PACK(0xa, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_IEN | PIN_CFG_PUPD)) }, - { "SD0DAT4", RZG2L_SINGLE_PIN_PACK(0xa, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_IEN | PIN_CFG_PUPD)) }, - { "SD0DAT5", RZG2L_SINGLE_PIN_PACK(0xa, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_IEN | PIN_CFG_PUPD)) }, - { "SD0DAT6", RZG2L_SINGLE_PIN_PACK(0xa, 6, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_IEN | PIN_CFG_PUPD)) }, - { "SD0DAT7", RZG2L_SINGLE_PIN_PACK(0xa, 7, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_IEN | PIN_CFG_PUPD)) }, - { "SD1CLK", RZG2L_SINGLE_PIN_PACK(0xb, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) }, - { "SD1CMD", RZG2L_SINGLE_PIN_PACK(0xb, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_IEN | PIN_CFG_PUPD)) }, - { "SD1DAT0", RZG2L_SINGLE_PIN_PACK(0xc, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_IEN | PIN_CFG_PUPD)) }, - { "SD1DAT1", RZG2L_SINGLE_PIN_PACK(0xc, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_IEN | PIN_CFG_PUPD)) }, - { "SD1DAT2", RZG2L_SINGLE_PIN_PACK(0xc, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_IEN | PIN_CFG_PUPD)) }, - { "SD1DAT3", RZG2L_SINGLE_PIN_PACK(0xc, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_IEN | PIN_CFG_PUPD)) }, - { "PCIE0_RSTOUTB", RZG2L_SINGLE_PIN_PACK(0xe, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) }, - { "PCIE1_RSTOUTB", RZG2L_SINGLE_PIN_PACK(0xe, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) }, - { "ET0_MDIO", RZG2L_SINGLE_PIN_PACK(0xf, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_IEN | PIN_CFG_PUPD)) }, - { "ET0_MDC", RZG2L_SINGLE_PIN_PACK(0xf, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "ET0_RXCTL_RXDV", RZG2L_SINGLE_PIN_PACK(0x10, 0, (PIN_CFG_PUPD)) }, - { "ET0_TXCTL_TXEN", RZG2L_SINGLE_PIN_PACK(0x10, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "ET0_TXER", RZG2L_SINGLE_PIN_PACK(0x10, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "ET0_RXER", RZG2L_SINGLE_PIN_PACK(0x10, 3, (PIN_CFG_PUPD)) }, - { "ET0_RXC_RXCLK", RZG2L_SINGLE_PIN_PACK(0x10, 4, (PIN_CFG_PUPD)) }, - { "ET0_TXC_TXCLK", RZG2L_SINGLE_PIN_PACK(0x10, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD | PIN_CFG_OEN)) }, - { "ET0_CRS", RZG2L_SINGLE_PIN_PACK(0x10, 6, (PIN_CFG_PUPD)) }, - { "ET0_COL", RZG2L_SINGLE_PIN_PACK(0x10, 7, (PIN_CFG_PUPD)) }, - { "ET0_TXD0", RZG2L_SINGLE_PIN_PACK(0x11, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "ET0_TXD1", RZG2L_SINGLE_PIN_PACK(0x11, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "ET0_TXD2", RZG2L_SINGLE_PIN_PACK(0x11, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "ET0_TXD3", RZG2L_SINGLE_PIN_PACK(0x11, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "ET0_RXD0", RZG2L_SINGLE_PIN_PACK(0x11, 4, (PIN_CFG_PUPD)) }, - { "ET0_RXD1", RZG2L_SINGLE_PIN_PACK(0x11, 5, (PIN_CFG_PUPD)) }, - { "ET0_RXD2", RZG2L_SINGLE_PIN_PACK(0x11, 6, (PIN_CFG_PUPD)) }, - { "ET0_RXD3", RZG2L_SINGLE_PIN_PACK(0x11, 7, (PIN_CFG_PUPD)) }, - { "ET1_MDIO", RZG2L_SINGLE_PIN_PACK(0x12, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_IEN | PIN_CFG_PUPD)) }, - { "ET1_MDC", RZG2L_SINGLE_PIN_PACK(0x12, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "ET1_RXCTL_RXDV", RZG2L_SINGLE_PIN_PACK(0x13, 0, (PIN_CFG_PUPD)) }, - { "ET1_TXCTL_TXEN", RZG2L_SINGLE_PIN_PACK(0x13, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | +static const struct { + struct rzg2l_dedicated_configs common[77]; + struct rzg2l_dedicated_configs pcie1[1]; +} rzv2h_dedicated_pins = { + .common = { + { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0, PIN_CFG_NF) }, + { "TMS_SWDIO", RZG2L_SINGLE_PIN_PACK(0x3, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_IEN)) }, + { "TDO", RZG2L_SINGLE_PIN_PACK(0x3, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) }, + { "WDTUDFCA", RZG2L_SINGLE_PIN_PACK(0x5, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD | PIN_CFG_NOD)) }, + { "WDTUDFCM", RZG2L_SINGLE_PIN_PACK(0x5, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD | PIN_CFG_NOD)) }, + { "SCIF_RXD", RZG2L_SINGLE_PIN_PACK(0x6, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "SCIF_TXD", RZG2L_SINGLE_PIN_PACK(0x6, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "XSPI0_CKP", RZG2L_SINGLE_PIN_PACK(0x7, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD | PIN_CFG_OEN)) }, + { "XSPI0_CKN", RZG2L_SINGLE_PIN_PACK(0x7, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD | PIN_CFG_OEN)) }, + { "XSPI0_CS0N", RZG2L_SINGLE_PIN_PACK(0x7, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD | PIN_CFG_OEN)) }, + { "XSPI0_DS", RZG2L_SINGLE_PIN_PACK(0x7, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "XSPI0_RESET0N", RZG2L_SINGLE_PIN_PACK(0x7, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD | PIN_CFG_OEN)) }, + { "XSPI0_RSTO0N", RZG2L_SINGLE_PIN_PACK(0x7, 5, (PIN_CFG_PUPD)) }, + { "XSPI0_INT0N", RZG2L_SINGLE_PIN_PACK(0x7, 6, (PIN_CFG_PUPD)) }, + { "XSPI0_ECS0N", RZG2L_SINGLE_PIN_PACK(0x7, 7, (PIN_CFG_PUPD)) }, + { "XSPI0_IO0", RZG2L_SINGLE_PIN_PACK(0x8, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "XSPI0_IO1", RZG2L_SINGLE_PIN_PACK(0x8, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "XSPI0_IO2", RZG2L_SINGLE_PIN_PACK(0x8, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "XSPI0_IO3", RZG2L_SINGLE_PIN_PACK(0x8, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "XSPI0_IO4", RZG2L_SINGLE_PIN_PACK(0x8, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "XSPI0_IO5", RZG2L_SINGLE_PIN_PACK(0x8, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "XSPI0_IO6", RZG2L_SINGLE_PIN_PACK(0x8, 6, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "XSPI0_IO7", RZG2L_SINGLE_PIN_PACK(0x8, 7, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "SD0CLK", RZG2L_SINGLE_PIN_PACK(0x9, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) }, + { "SD0CMD", RZG2L_SINGLE_PIN_PACK(0x9, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_IEN | PIN_CFG_PUPD)) }, + { "SD0RSTN", RZG2L_SINGLE_PIN_PACK(0x9, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) }, + { "SD0DAT0", RZG2L_SINGLE_PIN_PACK(0xa, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_IEN | PIN_CFG_PUPD)) }, + { "SD0DAT1", RZG2L_SINGLE_PIN_PACK(0xa, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_IEN | PIN_CFG_PUPD)) }, + { "SD0DAT2", RZG2L_SINGLE_PIN_PACK(0xa, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_IEN | PIN_CFG_PUPD)) }, + { "SD0DAT3", RZG2L_SINGLE_PIN_PACK(0xa, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_IEN | PIN_CFG_PUPD)) }, + { "SD0DAT4", RZG2L_SINGLE_PIN_PACK(0xa, 4, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_IEN | PIN_CFG_PUPD)) }, + { "SD0DAT5", RZG2L_SINGLE_PIN_PACK(0xa, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_IEN | PIN_CFG_PUPD)) }, + { "SD0DAT6", RZG2L_SINGLE_PIN_PACK(0xa, 6, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_IEN | PIN_CFG_PUPD)) }, + { "SD0DAT7", RZG2L_SINGLE_PIN_PACK(0xa, 7, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_IEN | PIN_CFG_PUPD)) }, + { "SD1CLK", RZG2L_SINGLE_PIN_PACK(0xb, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR)) }, + { "SD1CMD", RZG2L_SINGLE_PIN_PACK(0xb, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_IEN | PIN_CFG_PUPD)) }, + { "SD1DAT0", RZG2L_SINGLE_PIN_PACK(0xc, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_IEN | PIN_CFG_PUPD)) }, + { "SD1DAT1", RZG2L_SINGLE_PIN_PACK(0xc, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_IEN | PIN_CFG_PUPD)) }, + { "SD1DAT2", RZG2L_SINGLE_PIN_PACK(0xc, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_IEN | PIN_CFG_PUPD)) }, + { "SD1DAT3", RZG2L_SINGLE_PIN_PACK(0xc, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_IEN | PIN_CFG_PUPD)) }, + { "PCIE0_RSTOUTB", RZG2L_SINGLE_PIN_PACK(0xe, 0, (PIN_CFG_IOLH_RZV2H | + PIN_CFG_SR)) }, + { "ET0_MDIO", RZG2L_SINGLE_PIN_PACK(0xf, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_IEN | PIN_CFG_PUPD)) }, + { "ET0_MDC", RZG2L_SINGLE_PIN_PACK(0xf, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | PIN_CFG_PUPD)) }, - { "ET1_TXER", RZG2L_SINGLE_PIN_PACK(0x13, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "ET1_RXER", RZG2L_SINGLE_PIN_PACK(0x13, 3, (PIN_CFG_PUPD)) }, - { "ET1_RXC_RXCLK", RZG2L_SINGLE_PIN_PACK(0x13, 4, (PIN_CFG_PUPD)) }, - { "ET1_TXC_TXCLK", RZG2L_SINGLE_PIN_PACK(0x13, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD | PIN_CFG_OEN)) }, - { "ET1_CRS", RZG2L_SINGLE_PIN_PACK(0x13, 6, (PIN_CFG_PUPD)) }, - { "ET1_COL", RZG2L_SINGLE_PIN_PACK(0x13, 7, (PIN_CFG_PUPD)) }, - { "ET1_TXD0", RZG2L_SINGLE_PIN_PACK(0x14, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "ET1_TXD1", RZG2L_SINGLE_PIN_PACK(0x14, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "ET1_TXD2", RZG2L_SINGLE_PIN_PACK(0x14, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "ET1_TXD3", RZG2L_SINGLE_PIN_PACK(0x14, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | - PIN_CFG_PUPD)) }, - { "ET1_RXD0", RZG2L_SINGLE_PIN_PACK(0x14, 4, (PIN_CFG_PUPD)) }, - { "ET1_RXD1", RZG2L_SINGLE_PIN_PACK(0x14, 5, (PIN_CFG_PUPD)) }, - { "ET1_RXD2", RZG2L_SINGLE_PIN_PACK(0x14, 6, (PIN_CFG_PUPD)) }, - { "ET1_RXD3", RZG2L_SINGLE_PIN_PACK(0x14, 7, (PIN_CFG_PUPD)) }, + { "ET0_RXCTL_RXDV", RZG2L_SINGLE_PIN_PACK(0x10, 0, (PIN_CFG_PUPD)) }, + { "ET0_TXCTL_TXEN", RZG2L_SINGLE_PIN_PACK(0x10, 1, (PIN_CFG_IOLH_RZV2H | + PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "ET0_TXER", RZG2L_SINGLE_PIN_PACK(0x10, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "ET0_RXER", RZG2L_SINGLE_PIN_PACK(0x10, 3, (PIN_CFG_PUPD)) }, + { "ET0_RXC_RXCLK", RZG2L_SINGLE_PIN_PACK(0x10, 4, (PIN_CFG_PUPD)) }, + { "ET0_TXC_TXCLK", RZG2L_SINGLE_PIN_PACK(0x10, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD | PIN_CFG_OEN)) }, + { "ET0_CRS", RZG2L_SINGLE_PIN_PACK(0x10, 6, (PIN_CFG_PUPD)) }, + { "ET0_COL", RZG2L_SINGLE_PIN_PACK(0x10, 7, (PIN_CFG_PUPD)) }, + { "ET0_TXD0", RZG2L_SINGLE_PIN_PACK(0x11, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "ET0_TXD1", RZG2L_SINGLE_PIN_PACK(0x11, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "ET0_TXD2", RZG2L_SINGLE_PIN_PACK(0x11, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "ET0_TXD3", RZG2L_SINGLE_PIN_PACK(0x11, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "ET0_RXD0", RZG2L_SINGLE_PIN_PACK(0x11, 4, (PIN_CFG_PUPD)) }, + { "ET0_RXD1", RZG2L_SINGLE_PIN_PACK(0x11, 5, (PIN_CFG_PUPD)) }, + { "ET0_RXD2", RZG2L_SINGLE_PIN_PACK(0x11, 6, (PIN_CFG_PUPD)) }, + { "ET0_RXD3", RZG2L_SINGLE_PIN_PACK(0x11, 7, (PIN_CFG_PUPD)) }, + { "ET1_MDIO", RZG2L_SINGLE_PIN_PACK(0x12, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_IEN | PIN_CFG_PUPD)) }, + { "ET1_MDC", RZG2L_SINGLE_PIN_PACK(0x12, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "ET1_RXCTL_RXDV", RZG2L_SINGLE_PIN_PACK(0x13, 0, (PIN_CFG_PUPD)) }, + { "ET1_TXCTL_TXEN", RZG2L_SINGLE_PIN_PACK(0x13, 1, (PIN_CFG_IOLH_RZV2H | + PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "ET1_TXER", RZG2L_SINGLE_PIN_PACK(0x13, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "ET1_RXER", RZG2L_SINGLE_PIN_PACK(0x13, 3, (PIN_CFG_PUPD)) }, + { "ET1_RXC_RXCLK", RZG2L_SINGLE_PIN_PACK(0x13, 4, (PIN_CFG_PUPD)) }, + { "ET1_TXC_TXCLK", RZG2L_SINGLE_PIN_PACK(0x13, 5, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD | PIN_CFG_OEN)) }, + { "ET1_CRS", RZG2L_SINGLE_PIN_PACK(0x13, 6, (PIN_CFG_PUPD)) }, + { "ET1_COL", RZG2L_SINGLE_PIN_PACK(0x13, 7, (PIN_CFG_PUPD)) }, + { "ET1_TXD0", RZG2L_SINGLE_PIN_PACK(0x14, 0, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "ET1_TXD1", RZG2L_SINGLE_PIN_PACK(0x14, 1, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "ET1_TXD2", RZG2L_SINGLE_PIN_PACK(0x14, 2, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "ET1_TXD3", RZG2L_SINGLE_PIN_PACK(0x14, 3, (PIN_CFG_IOLH_RZV2H | PIN_CFG_SR | + PIN_CFG_PUPD)) }, + { "ET1_RXD0", RZG2L_SINGLE_PIN_PACK(0x14, 4, (PIN_CFG_PUPD)) }, + { "ET1_RXD1", RZG2L_SINGLE_PIN_PACK(0x14, 5, (PIN_CFG_PUPD)) }, + { "ET1_RXD2", RZG2L_SINGLE_PIN_PACK(0x14, 6, (PIN_CFG_PUPD)) }, + { "ET1_RXD3", RZG2L_SINGLE_PIN_PACK(0x14, 7, (PIN_CFG_PUPD)) }, + }, + .pcie1 = { + { "PCIE1_RSTOUTB", RZG2L_SINGLE_PIN_PACK(0xe, 1, (PIN_CFG_IOLH_RZV2H | + PIN_CFG_SR)) }, + }, }; static struct rzg2l_dedicated_configs rzg3e_dedicated_pins[] = { @@ -3349,13 +3360,37 @@ static struct rzg2l_pinctrl_data r9a09g047_data = { .bias_param_to_hw = &rzv2h_bias_param_to_hw, }; +static struct rzg2l_pinctrl_data r9a09g056_data = { + .port_pins = rzv2h_gpio_names, + .port_pin_configs = r9a09g057_gpio_configs, + .n_ports = ARRAY_SIZE(r9a09g057_gpio_configs), + .dedicated_pins = rzv2h_dedicated_pins.common, + .n_port_pins = ARRAY_SIZE(r9a09g057_gpio_configs) * RZG2L_PINS_PER_PORT, + .n_dedicated_pins = ARRAY_SIZE(rzv2h_dedicated_pins.common), + .hwcfg = &rzv2h_hwcfg, + .variable_pin_cfg = r9a09g057_variable_pin_cfg, + .n_variable_pin_cfg = ARRAY_SIZE(r9a09g057_variable_pin_cfg), + .num_custom_params = ARRAY_SIZE(renesas_rzv2h_custom_bindings), + .custom_params = renesas_rzv2h_custom_bindings, +#ifdef CONFIG_DEBUG_FS + .custom_conf_items = renesas_rzv2h_conf_items, +#endif + .pwpr_pfc_lock_unlock = &rzv2h_pwpr_pfc_lock_unlock, + .pmc_writeb = &rzv2h_pmc_writeb, + .oen_read = &rzv2h_oen_read, + .oen_write = &rzv2h_oen_write, + .hw_to_bias_param = &rzv2h_hw_to_bias_param, + .bias_param_to_hw = &rzv2h_bias_param_to_hw, +}; + static struct rzg2l_pinctrl_data r9a09g057_data = { .port_pins = rzv2h_gpio_names, .port_pin_configs = r9a09g057_gpio_configs, .n_ports = ARRAY_SIZE(r9a09g057_gpio_configs), - .dedicated_pins = rzv2h_dedicated_pins, + .dedicated_pins = rzv2h_dedicated_pins.common, .n_port_pins = ARRAY_SIZE(r9a09g057_gpio_configs) * RZG2L_PINS_PER_PORT, - .n_dedicated_pins = ARRAY_SIZE(rzv2h_dedicated_pins), + .n_dedicated_pins = ARRAY_SIZE(rzv2h_dedicated_pins.common) + + ARRAY_SIZE(rzv2h_dedicated_pins.pcie1), .hwcfg = &rzv2h_hwcfg, .variable_pin_cfg = r9a09g057_variable_pin_cfg, .n_variable_pin_cfg = ARRAY_SIZE(r9a09g057_variable_pin_cfg), @@ -3390,6 +3425,10 @@ static const struct of_device_id rzg2l_pinctrl_of_table[] = { .data = &r9a09g047_data, }, { + .compatible = "renesas,r9a09g056-pinctrl", + .data = &r9a09g056_data, + }, + { .compatible = "renesas,r9a09g057-pinctrl", .data = &r9a09g057_data, }, diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c index dd07720e32cc..9fd894729a7b 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c @@ -1419,8 +1419,8 @@ static const struct samsung_pin_ctrl exynosautov920_pin_ctrl[] = { .pin_banks = exynosautov920_pin_banks0, .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks0), .eint_wkup_init = exynos_eint_wkup_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = exynosautov920_pinctrl_suspend, + .resume = exynosautov920_pinctrl_resume, .retention_data = &exynosautov920_retention_data, }, { /* pin-controller instance 1 AUD data */ @@ -1431,43 +1431,43 @@ static const struct samsung_pin_ctrl exynosautov920_pin_ctrl[] = { .pin_banks = exynosautov920_pin_banks2, .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks2), .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = exynosautov920_pinctrl_suspend, + .resume = exynosautov920_pinctrl_resume, }, { /* pin-controller instance 3 HSI1 data */ .pin_banks = exynosautov920_pin_banks3, .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks3), .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = exynosautov920_pinctrl_suspend, + .resume = exynosautov920_pinctrl_resume, }, { /* pin-controller instance 4 HSI2 data */ .pin_banks = exynosautov920_pin_banks4, .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks4), .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = exynosautov920_pinctrl_suspend, + .resume = exynosautov920_pinctrl_resume, }, { /* pin-controller instance 5 HSI2UFS data */ .pin_banks = exynosautov920_pin_banks5, .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks5), .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = exynosautov920_pinctrl_suspend, + .resume = exynosautov920_pinctrl_resume, }, { /* pin-controller instance 6 PERIC0 data */ .pin_banks = exynosautov920_pin_banks6, .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks6), .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = exynosautov920_pinctrl_suspend, + .resume = exynosautov920_pinctrl_resume, }, { /* pin-controller instance 7 PERIC1 data */ .pin_banks = exynosautov920_pin_banks7, .nr_banks = ARRAY_SIZE(exynosautov920_pin_banks7), .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = exynosautov920_pinctrl_suspend, + .resume = exynosautov920_pinctrl_resume, }, }; @@ -1762,15 +1762,15 @@ static const struct samsung_pin_ctrl gs101_pin_ctrl[] __initconst = { .pin_banks = gs101_pin_alive, .nr_banks = ARRAY_SIZE(gs101_pin_alive), .eint_wkup_init = exynos_eint_wkup_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = gs101_pinctrl_suspend, + .resume = gs101_pinctrl_resume, }, { /* pin banks of gs101 pin-controller (FAR_ALIVE) */ .pin_banks = gs101_pin_far_alive, .nr_banks = ARRAY_SIZE(gs101_pin_far_alive), .eint_wkup_init = exynos_eint_wkup_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = gs101_pinctrl_suspend, + .resume = gs101_pinctrl_resume, }, { /* pin banks of gs101 pin-controller (GSACORE) */ .pin_banks = gs101_pin_gsacore, @@ -1784,29 +1784,29 @@ static const struct samsung_pin_ctrl gs101_pin_ctrl[] __initconst = { .pin_banks = gs101_pin_peric0, .nr_banks = ARRAY_SIZE(gs101_pin_peric0), .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = gs101_pinctrl_suspend, + .resume = gs101_pinctrl_resume, }, { /* pin banks of gs101 pin-controller (PERIC1) */ .pin_banks = gs101_pin_peric1, .nr_banks = ARRAY_SIZE(gs101_pin_peric1), .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = gs101_pinctrl_suspend, + .resume = gs101_pinctrl_resume, }, { /* pin banks of gs101 pin-controller (HSI1) */ .pin_banks = gs101_pin_hsi1, .nr_banks = ARRAY_SIZE(gs101_pin_hsi1), .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = gs101_pinctrl_suspend, + .resume = gs101_pinctrl_resume, }, { /* pin banks of gs101 pin-controller (HSI2) */ .pin_banks = gs101_pin_hsi2, .nr_banks = ARRAY_SIZE(gs101_pin_hsi2), .eint_gpio_init = exynos_eint_gpio_init, - .suspend = exynos_pinctrl_suspend, - .resume = exynos_pinctrl_resume, + .suspend = gs101_pinctrl_suspend, + .resume = gs101_pinctrl_resume, }, }; diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index 42093bae8bb7..f3e1c11abe55 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -370,6 +370,37 @@ struct exynos_eint_gpio_save { u32 eint_mask; }; +static void exynos_eint_update_flt_reg(void __iomem *reg, int cnt, int con) +{ + unsigned int val, shift; + int i; + + val = readl(reg); + for (i = 0; i < cnt; i++) { + shift = i * EXYNOS_FLTCON_LEN; + val &= ~(EXYNOS_FLTCON_DIGITAL << shift); + val |= con << shift; + } + writel(val, reg); +} + +/* + * Set the desired filter (digital or analog delay) and enable it to + * every pin in the bank. Note the filter selection bitfield is only + * found on alive banks. The filter determines to what extent signal + * fluctuations received through the pad are considered glitches. + */ +static void exynos_eint_set_filter(struct samsung_pin_bank *bank, int filter) +{ + unsigned int off = EXYNOS_GPIO_EFLTCON_OFFSET + bank->eint_fltcon_offset; + void __iomem *reg = bank->drvdata->virt_base + off; + unsigned int con = EXYNOS_FLTCON_EN | filter; + + for (int n = 0; n < bank->nr_pins; n += 4) + exynos_eint_update_flt_reg(reg + n, + min(bank->nr_pins - n, 4), con); +} + /* * exynos_eint_gpio_init() - setup handling of external gpio interrupts. * @d: driver data of samsung pinctrl driver. @@ -762,153 +793,190 @@ __init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) return 0; } -static void exynos_pinctrl_suspend_bank( - struct samsung_pinctrl_drv_data *drvdata, - struct samsung_pin_bank *bank) +static void exynos_set_wakeup(struct samsung_pin_bank *bank) { - struct exynos_eint_gpio_save *save = bank->soc_priv; - const void __iomem *regs = bank->eint_base; + struct exynos_irq_chip *irq_chip; - if (clk_enable(bank->drvdata->pclk)) { - dev_err(bank->gpio_chip.parent, - "unable to enable clock for saving state\n"); - return; + if (bank->irq_chip) { + irq_chip = bank->irq_chip; + irq_chip->set_eint_wakeup_mask(bank->drvdata, irq_chip); } - - save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET - + bank->eint_offset); - save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET - + 2 * bank->eint_offset); - save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET - + 2 * bank->eint_offset + 4); - save->eint_mask = readl(regs + bank->irq_chip->eint_mask - + bank->eint_offset); - - clk_disable(bank->drvdata->pclk); - - pr_debug("%s: save con %#010x\n", bank->name, save->eint_con); - pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0); - pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1); - pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask); } -static void exynosauto_pinctrl_suspend_bank(struct samsung_pinctrl_drv_data *drvdata, - struct samsung_pin_bank *bank) +void exynos_pinctrl_suspend(struct samsung_pin_bank *bank) { struct exynos_eint_gpio_save *save = bank->soc_priv; const void __iomem *regs = bank->eint_base; - if (clk_enable(bank->drvdata->pclk)) { - dev_err(bank->gpio_chip.parent, - "unable to enable clock for saving state\n"); - return; + if (bank->eint_type == EINT_TYPE_GPIO) { + save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET + + bank->eint_offset); + save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset); + save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4); + save->eint_mask = readl(regs + bank->irq_chip->eint_mask + + bank->eint_offset); + + pr_debug("%s: save con %#010x\n", + bank->name, save->eint_con); + pr_debug("%s: save fltcon0 %#010x\n", + bank->name, save->eint_fltcon0); + pr_debug("%s: save fltcon1 %#010x\n", + bank->name, save->eint_fltcon1); + pr_debug("%s: save mask %#010x\n", + bank->name, save->eint_mask); + } else if (bank->eint_type == EINT_TYPE_WKUP) { + exynos_set_wakeup(bank); } - - save->eint_con = readl(regs + bank->pctl_offset + bank->eint_con_offset); - save->eint_mask = readl(regs + bank->pctl_offset + bank->eint_mask_offset); - - clk_disable(bank->drvdata->pclk); - - pr_debug("%s: save con %#010x\n", bank->name, save->eint_con); - pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask); } -void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata) +void gs101_pinctrl_suspend(struct samsung_pin_bank *bank) { - struct samsung_pin_bank *bank = drvdata->pin_banks; - struct exynos_irq_chip *irq_chip = NULL; - int i; + struct exynos_eint_gpio_save *save = bank->soc_priv; + const void __iomem *regs = bank->eint_base; - for (i = 0; i < drvdata->nr_banks; ++i, ++bank) { - if (bank->eint_type == EINT_TYPE_GPIO) { - if (bank->eint_con_offset) - exynosauto_pinctrl_suspend_bank(drvdata, bank); - else - exynos_pinctrl_suspend_bank(drvdata, bank); - } - else if (bank->eint_type == EINT_TYPE_WKUP) { - if (!irq_chip) { - irq_chip = bank->irq_chip; - irq_chip->set_eint_wakeup_mask(drvdata, - irq_chip); - } - } + if (bank->eint_type == EINT_TYPE_GPIO) { + save->eint_con = readl(regs + EXYNOS_GPIO_ECON_OFFSET + + bank->eint_offset); + + save->eint_fltcon0 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + bank->eint_fltcon_offset); + + /* fltcon1 register only exists for pins 4-7 */ + if (bank->nr_pins > 4) + save->eint_fltcon1 = readl(regs + + EXYNOS_GPIO_EFLTCON_OFFSET + + bank->eint_fltcon_offset + 4); + + save->eint_mask = readl(regs + bank->irq_chip->eint_mask + + bank->eint_offset); + + pr_debug("%s: save con %#010x\n", + bank->name, save->eint_con); + pr_debug("%s: save fltcon0 %#010x\n", + bank->name, save->eint_fltcon0); + if (bank->nr_pins > 4) + pr_debug("%s: save fltcon1 %#010x\n", + bank->name, save->eint_fltcon1); + pr_debug("%s: save mask %#010x\n", + bank->name, save->eint_mask); + } else if (bank->eint_type == EINT_TYPE_WKUP) { + exynos_set_wakeup(bank); + exynos_eint_set_filter(bank, EXYNOS_FLTCON_ANALOG); } } -static void exynos_pinctrl_resume_bank( - struct samsung_pinctrl_drv_data *drvdata, - struct samsung_pin_bank *bank) +void exynosautov920_pinctrl_suspend(struct samsung_pin_bank *bank) { struct exynos_eint_gpio_save *save = bank->soc_priv; - void __iomem *regs = bank->eint_base; + const void __iomem *regs = bank->eint_base; - if (clk_enable(bank->drvdata->pclk)) { - dev_err(bank->gpio_chip.parent, - "unable to enable clock for restoring state\n"); - return; + if (bank->eint_type == EINT_TYPE_GPIO) { + save->eint_con = readl(regs + bank->pctl_offset + + bank->eint_con_offset); + save->eint_mask = readl(regs + bank->pctl_offset + + bank->eint_mask_offset); + pr_debug("%s: save con %#010x\n", + bank->name, save->eint_con); + pr_debug("%s: save mask %#010x\n", + bank->name, save->eint_mask); + } else if (bank->eint_type == EINT_TYPE_WKUP) { + exynos_set_wakeup(bank); } +} - pr_debug("%s: con %#010x => %#010x\n", bank->name, - readl(regs + EXYNOS_GPIO_ECON_OFFSET - + bank->eint_offset), save->eint_con); - pr_debug("%s: fltcon0 %#010x => %#010x\n", bank->name, - readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET - + 2 * bank->eint_offset), save->eint_fltcon0); - pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name, - readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET - + 2 * bank->eint_offset + 4), save->eint_fltcon1); - pr_debug("%s: mask %#010x => %#010x\n", bank->name, - readl(regs + bank->irq_chip->eint_mask - + bank->eint_offset), save->eint_mask); - - writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET - + bank->eint_offset); - writel(save->eint_fltcon0, regs + EXYNOS_GPIO_EFLTCON_OFFSET - + 2 * bank->eint_offset); - writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET - + 2 * bank->eint_offset + 4); - writel(save->eint_mask, regs + bank->irq_chip->eint_mask - + bank->eint_offset); +void gs101_pinctrl_resume(struct samsung_pin_bank *bank) +{ + struct exynos_eint_gpio_save *save = bank->soc_priv; - clk_disable(bank->drvdata->pclk); + void __iomem *regs = bank->eint_base; + void __iomem *eint_fltcfg0 = regs + EXYNOS_GPIO_EFLTCON_OFFSET + + bank->eint_fltcon_offset; + + if (bank->eint_type == EINT_TYPE_GPIO) { + pr_debug("%s: con %#010x => %#010x\n", bank->name, + readl(regs + EXYNOS_GPIO_ECON_OFFSET + + bank->eint_offset), save->eint_con); + + pr_debug("%s: fltcon0 %#010x => %#010x\n", bank->name, + readl(eint_fltcfg0), save->eint_fltcon0); + + /* fltcon1 register only exists for pins 4-7 */ + if (bank->nr_pins > 4) + pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name, + readl(eint_fltcfg0 + 4), save->eint_fltcon1); + + pr_debug("%s: mask %#010x => %#010x\n", bank->name, + readl(regs + bank->irq_chip->eint_mask + + bank->eint_offset), save->eint_mask); + + writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET + + bank->eint_offset); + writel(save->eint_fltcon0, eint_fltcfg0); + + if (bank->nr_pins > 4) + writel(save->eint_fltcon1, eint_fltcfg0 + 4); + writel(save->eint_mask, regs + bank->irq_chip->eint_mask + + bank->eint_offset); + } else if (bank->eint_type == EINT_TYPE_WKUP) { + exynos_eint_set_filter(bank, EXYNOS_FLTCON_DIGITAL); + } } -static void exynosauto_pinctrl_resume_bank(struct samsung_pinctrl_drv_data *drvdata, - struct samsung_pin_bank *bank) +void exynos_pinctrl_resume(struct samsung_pin_bank *bank) { struct exynos_eint_gpio_save *save = bank->soc_priv; void __iomem *regs = bank->eint_base; - if (clk_enable(bank->drvdata->pclk)) { - dev_err(bank->gpio_chip.parent, - "unable to enable clock for restoring state\n"); - return; + if (bank->eint_type == EINT_TYPE_GPIO) { + pr_debug("%s: con %#010x => %#010x\n", bank->name, + readl(regs + EXYNOS_GPIO_ECON_OFFSET + + bank->eint_offset), save->eint_con); + pr_debug("%s: fltcon0 %#010x => %#010x\n", bank->name, + readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset), save->eint_fltcon0); + pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name, + readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4), + save->eint_fltcon1); + pr_debug("%s: mask %#010x => %#010x\n", bank->name, + readl(regs + bank->irq_chip->eint_mask + + bank->eint_offset), save->eint_mask); + + writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET + + bank->eint_offset); + writel(save->eint_fltcon0, regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset); + writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET + + 2 * bank->eint_offset + 4); + writel(save->eint_mask, regs + bank->irq_chip->eint_mask + + bank->eint_offset); } - - pr_debug("%s: con %#010x => %#010x\n", bank->name, - readl(regs + bank->pctl_offset + bank->eint_con_offset), save->eint_con); - pr_debug("%s: mask %#010x => %#010x\n", bank->name, - readl(regs + bank->pctl_offset + bank->eint_mask_offset), save->eint_mask); - - writel(save->eint_con, regs + bank->pctl_offset + bank->eint_con_offset); - writel(save->eint_mask, regs + bank->pctl_offset + bank->eint_mask_offset); - - clk_disable(bank->drvdata->pclk); } -void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata) +void exynosautov920_pinctrl_resume(struct samsung_pin_bank *bank) { - struct samsung_pin_bank *bank = drvdata->pin_banks; - int i; + struct exynos_eint_gpio_save *save = bank->soc_priv; + void __iomem *regs = bank->eint_base; - for (i = 0; i < drvdata->nr_banks; ++i, ++bank) - if (bank->eint_type == EINT_TYPE_GPIO) { - if (bank->eint_con_offset) - exynosauto_pinctrl_resume_bank(drvdata, bank); - else - exynos_pinctrl_resume_bank(drvdata, bank); - } + if (bank->eint_type == EINT_TYPE_GPIO) { + /* exynosautov920 has eint_con_offset for all but one bank */ + if (!bank->eint_con_offset) + exynos_pinctrl_resume(bank); + + pr_debug("%s: con %#010x => %#010x\n", bank->name, + readl(regs + bank->pctl_offset + bank->eint_con_offset), + save->eint_con); + pr_debug("%s: mask %#010x => %#010x\n", bank->name, + readl(regs + bank->pctl_offset + + bank->eint_mask_offset), save->eint_mask); + + writel(save->eint_con, + regs + bank->pctl_offset + bank->eint_con_offset); + writel(save->eint_mask, + regs + bank->pctl_offset + bank->eint_mask_offset); + } } static void exynos_retention_enable(struct samsung_pinctrl_drv_data *drvdata) diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h index b483270ddc53..362dc533186f 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.h +++ b/drivers/pinctrl/samsung/pinctrl-exynos.h @@ -52,6 +52,26 @@ #define EXYNOS_EINT_MAX_PER_BANK 8 #define EXYNOS_EINT_NR_WKUP_EINT +/* + * EINT filter configuration register (on alive banks) has + * the following layout. + * + * BitfieldName[PinNum][Bit:Bit] + * FLT_EN[3][31] FLT_SEL[3][30] FLT_WIDTH[3][29:24] + * FLT_EN[2][23] FLT_SEL[2][22] FLT_WIDTH[2][21:16] + * FLT_EN[1][15] FLT_SEL[1][14] FLT_WIDTH[1][13:8] + * FLT_EN[0][7] FLT_SEL[0][6] FLT_WIDTH[0][5:0] + * + * FLT_EN 0x0 = Disable, 0x1=Enable + * FLT_SEL 0x0 = Analog delay filter, 0x1 Digital filter (clock count) + * FLT_WIDTH Filtering width. Valid when FLT_SEL is 0x1 + */ + +#define EXYNOS_FLTCON_EN BIT(7) +#define EXYNOS_FLTCON_DIGITAL BIT(6) +#define EXYNOS_FLTCON_ANALOG (0 << 6) +#define EXYNOS_FLTCON_LEN 8 + #define EXYNOS_PIN_BANK_EINTN(pins, reg, id) \ { \ .type = &bank_type_off, \ @@ -240,8 +260,12 @@ struct exynos_muxed_weint_data { int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d); int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d); -void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata); -void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata); +void exynosautov920_pinctrl_resume(struct samsung_pin_bank *bank); +void exynosautov920_pinctrl_suspend(struct samsung_pin_bank *bank); +void exynos_pinctrl_suspend(struct samsung_pin_bank *bank); +void exynos_pinctrl_resume(struct samsung_pin_bank *bank); +void gs101_pinctrl_suspend(struct samsung_pin_bank *bank); +void gs101_pinctrl_resume(struct samsung_pin_bank *bank); struct samsung_retention_ctrl * exynos_retention_init(struct samsung_pinctrl_drv_data *drvdata, const struct samsung_retention_data *data); diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index 2896eb2de2c0..fe1ac82b9d79 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -570,15 +570,18 @@ static void samsung_gpio_set_value(struct gpio_chip *gc, } /* gpiolib gpio_set callback function */ -static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value) +static int samsung_gpio_set(struct gpio_chip *gc, unsigned int offset, + int value) { struct samsung_pin_bank *bank = gpiochip_get_data(gc); struct samsung_pinctrl_drv_data *drvdata = bank->drvdata; unsigned long flags; + int ret; - if (clk_enable(drvdata->pclk)) { + ret = clk_enable(drvdata->pclk); + if (ret) { dev_err(drvdata->dev, "failed to enable clock\n"); - return; + return ret; } raw_spin_lock_irqsave(&bank->slock, flags); @@ -586,6 +589,8 @@ static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value) raw_spin_unlock_irqrestore(&bank->slock, flags); clk_disable(drvdata->pclk); + + return 0; } /* gpiolib gpio_get callback function */ @@ -1062,7 +1067,7 @@ static int samsung_gpio_set_config(struct gpio_chip *gc, unsigned int offset, static const struct gpio_chip samsung_gpiolib_chip = { .request = gpiochip_generic_request, .free = gpiochip_generic_free, - .set = samsung_gpio_set, + .set_rv = samsung_gpio_set, .get = samsung_gpio_get, .direction_input = samsung_gpio_direction_input, .direction_output = samsung_gpio_direction_output, @@ -1333,6 +1338,7 @@ err_put_banks: static int __maybe_unused samsung_pinctrl_suspend(struct device *dev) { struct samsung_pinctrl_drv_data *drvdata = dev_get_drvdata(dev); + struct samsung_pin_bank *bank; int i; i = clk_enable(drvdata->pclk); @@ -1343,7 +1349,7 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev) } for (i = 0; i < drvdata->nr_banks; i++) { - struct samsung_pin_bank *bank = &drvdata->pin_banks[i]; + bank = &drvdata->pin_banks[i]; const void __iomem *reg = bank->pctl_base + bank->pctl_offset; const u8 *offs = bank->type->reg_offset; const u8 *widths = bank->type->fld_width; @@ -1371,10 +1377,14 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev) } } + for (i = 0; i < drvdata->nr_banks; i++) { + bank = &drvdata->pin_banks[i]; + if (drvdata->suspend) + drvdata->suspend(bank); + } + clk_disable(drvdata->pclk); - if (drvdata->suspend) - drvdata->suspend(drvdata); if (drvdata->retention_ctrl && drvdata->retention_ctrl->enable) drvdata->retention_ctrl->enable(drvdata); @@ -1392,6 +1402,7 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev) static int __maybe_unused samsung_pinctrl_resume(struct device *dev) { struct samsung_pinctrl_drv_data *drvdata = dev_get_drvdata(dev); + struct samsung_pin_bank *bank; int ret; int i; @@ -1406,11 +1417,14 @@ static int __maybe_unused samsung_pinctrl_resume(struct device *dev) return ret; } - if (drvdata->resume) - drvdata->resume(drvdata); + for (i = 0; i < drvdata->nr_banks; i++) { + bank = &drvdata->pin_banks[i]; + if (drvdata->resume) + drvdata->resume(bank); + } for (i = 0; i < drvdata->nr_banks; i++) { - struct samsung_pin_bank *bank = &drvdata->pin_banks[i]; + bank = &drvdata->pin_banks[i]; void __iomem *reg = bank->pctl_base + bank->pctl_offset; const u8 *offs = bank->type->reg_offset; const u8 *widths = bank->type->fld_width; diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h index 3cf758df7d69..fcc57c244d16 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.h +++ b/drivers/pinctrl/samsung/pinctrl-samsung.h @@ -285,8 +285,8 @@ struct samsung_pin_ctrl { int (*eint_gpio_init)(struct samsung_pinctrl_drv_data *); int (*eint_wkup_init)(struct samsung_pinctrl_drv_data *); void (*pud_value_init)(struct samsung_pinctrl_drv_data *drvdata); - void (*suspend)(struct samsung_pinctrl_drv_data *); - void (*resume)(struct samsung_pinctrl_drv_data *); + void (*suspend)(struct samsung_pin_bank *bank); + void (*resume)(struct samsung_pin_bank *bank); }; /** @@ -335,8 +335,8 @@ struct samsung_pinctrl_drv_data { struct samsung_retention_ctrl *retention_ctrl; - void (*suspend)(struct samsung_pinctrl_drv_data *); - void (*resume)(struct samsung_pinctrl_drv_data *); + void (*suspend)(struct samsung_pin_bank *bank); + void (*resume)(struct samsung_pin_bank *bank); }; /** diff --git a/drivers/pinctrl/spacemit/pinctrl-k1.c b/drivers/pinctrl/spacemit/pinctrl-k1.c index 67e867b04a02..9996b1c4a07e 100644 --- a/drivers/pinctrl/spacemit/pinctrl-k1.c +++ b/drivers/pinctrl/spacemit/pinctrl-k1.c @@ -2,6 +2,7 @@ /* Copyright (c) 2024 Yixun Lan <dlan@gentoo.org> */ #include <linux/bits.h> +#include <linux/clk.h> #include <linux/cleanup.h> #include <linux/io.h> #include <linux/of.h> @@ -721,6 +722,7 @@ static int spacemit_pinctrl_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct spacemit_pinctrl *pctrl; + struct clk *func_clk, *bus_clk; const struct spacemit_pinctrl_data *pctrl_data; int ret; @@ -739,6 +741,14 @@ static int spacemit_pinctrl_probe(struct platform_device *pdev) if (IS_ERR(pctrl->regs)) return PTR_ERR(pctrl->regs); + func_clk = devm_clk_get_enabled(dev, "func"); + if (IS_ERR(func_clk)) + return dev_err_probe(dev, PTR_ERR(func_clk), "failed to get func clock\n"); + + bus_clk = devm_clk_get_enabled(dev, "bus"); + if (IS_ERR(bus_clk)) + return dev_err_probe(dev, PTR_ERR(bus_clk), "failed to get bus clock\n"); + pctrl->pdesc.name = dev_name(dev); pctrl->pdesc.pins = pctrl_data->pins; pctrl->pdesc.npins = pctrl_data->npins; diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index cc0b4d1d7cff..ba49d48c3a1d 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c @@ -228,11 +228,14 @@ static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset) return !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset)); } -static void stm32_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +static int stm32_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) { struct stm32_gpio_bank *bank = gpiochip_get_data(chip); __stm32_gpio_set(bank, offset, value); + + return 0; } static int stm32_gpio_direction_output(struct gpio_chip *chip, @@ -308,7 +311,7 @@ static const struct gpio_chip stm32_gpio_template = { .request = stm32_gpio_request, .free = pinctrl_gpio_free, .get = stm32_gpio_get, - .set = stm32_gpio_set, + .set_rv = stm32_gpio_set, .direction_input = pinctrl_gpio_direction_input, .direction_output = stm32_gpio_direction_output, .to_irq = stm32_gpio_to_irq, diff --git a/drivers/pinctrl/uniphier/Kconfig b/drivers/pinctrl/uniphier/Kconfig index b71c07d84662..5e3de0df756b 100644 --- a/drivers/pinctrl/uniphier/Kconfig +++ b/drivers/pinctrl/uniphier/Kconfig @@ -3,7 +3,7 @@ menuconfig PINCTRL_UNIPHIER bool "UniPhier SoC pinctrl drivers" depends on ARCH_UNIPHIER || COMPILE_TEST depends on OF && MFD_SYSCON - default y + default ARCH_UNIPHIER select PINMUX select GENERIC_PINCONF diff --git a/drivers/platform/cznic/Kconfig b/drivers/platform/cznic/Kconfig index 13e37b49d9d0..61cff5f7e02e 100644 --- a/drivers/platform/cznic/Kconfig +++ b/drivers/platform/cznic/Kconfig @@ -76,6 +76,23 @@ config TURRIS_OMNIA_MCU_TRNG Say Y here to add support for the true random number generator provided by CZ.NIC's Turris Omnia MCU. +config TURRIS_OMNIA_MCU_KEYCTL + bool "Turris Omnia MCU ECDSA message signing" + default y + depends on KEYS + depends on ASYMMETRIC_KEY_TYPE + depends on TURRIS_OMNIA_MCU_GPIO + select TURRIS_SIGNING_KEY + help + Say Y here to add support for ECDSA message signing with board private + key (if available on the MCU). This is exposed via the keyctl() + syscall. + endif # TURRIS_OMNIA_MCU +config TURRIS_SIGNING_KEY + tristate + depends on KEYS + depends on ASYMMETRIC_KEY_TYPE + endif # CZNIC_PLATFORMS diff --git a/drivers/platform/cznic/Makefile b/drivers/platform/cznic/Makefile index ce6d997f34d6..ccad7bec82e1 100644 --- a/drivers/platform/cznic/Makefile +++ b/drivers/platform/cznic/Makefile @@ -3,6 +3,9 @@ obj-$(CONFIG_TURRIS_OMNIA_MCU) += turris-omnia-mcu.o turris-omnia-mcu-y := turris-omnia-mcu-base.o turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_GPIO) += turris-omnia-mcu-gpio.o +turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_KEYCTL) += turris-omnia-mcu-keyctl.o turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_SYSOFF_WAKEUP) += turris-omnia-mcu-sys-off-wakeup.o turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_TRNG) += turris-omnia-mcu-trng.o turris-omnia-mcu-$(CONFIG_TURRIS_OMNIA_MCU_WATCHDOG) += turris-omnia-mcu-watchdog.o + +obj-$(CONFIG_TURRIS_SIGNING_KEY) += turris-signing-key.o diff --git a/drivers/platform/cznic/turris-omnia-mcu-base.c b/drivers/platform/cznic/turris-omnia-mcu-base.c index 770e680b96f9..e8fc0d7b3343 100644 --- a/drivers/platform/cznic/turris-omnia-mcu-base.c +++ b/drivers/platform/cznic/turris-omnia-mcu-base.c @@ -392,6 +392,10 @@ static int omnia_mcu_probe(struct i2c_client *client) if (err) return err; + err = omnia_mcu_register_keyctl(mcu); + if (err) + return err; + return omnia_mcu_register_trng(mcu); } diff --git a/drivers/platform/cznic/turris-omnia-mcu-gpio.c b/drivers/platform/cznic/turris-omnia-mcu-gpio.c index 5f35f7c5d5d7..c2df24ea8686 100644 --- a/drivers/platform/cznic/turris-omnia-mcu-gpio.c +++ b/drivers/platform/cznic/turris-omnia-mcu-gpio.c @@ -13,6 +13,7 @@ #include <linux/device.h> #include <linux/devm-helpers.h> #include <linux/errno.h> +#include <linux/gpio/consumer.h> #include <linux/gpio/driver.h> #include <linux/i2c.h> #include <linux/interrupt.h> @@ -195,7 +196,7 @@ static const struct omnia_gpio omnia_gpios[64] = { }; /* mapping from interrupts to indexes of GPIOs in the omnia_gpios array */ -const u8 omnia_int_to_gpio_idx[32] = { +static const u8 omnia_int_to_gpio_idx[32] = { [__bf_shf(OMNIA_INT_CARD_DET)] = 4, [__bf_shf(OMNIA_INT_MSATA_IND)] = 5, [__bf_shf(OMNIA_INT_USB30_OVC)] = 6, @@ -1093,3 +1094,21 @@ int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu) return 0; } + +int omnia_mcu_request_irq(struct omnia_mcu *mcu, u32 spec, + irq_handler_t thread_fn, const char *devname) +{ + u8 irq_idx; + int irq; + + if (!spec) + return -EINVAL; + + irq_idx = omnia_int_to_gpio_idx[ffs(spec) - 1]; + irq = gpiod_to_irq(gpio_device_get_desc(mcu->gc.gpiodev, irq_idx)); + if (irq < 0) + return irq; + + return devm_request_threaded_irq(&mcu->client->dev, irq, NULL, + thread_fn, IRQF_ONESHOT, devname, mcu); +} diff --git a/drivers/platform/cznic/turris-omnia-mcu-keyctl.c b/drivers/platform/cznic/turris-omnia-mcu-keyctl.c new file mode 100644 index 000000000000..dc40f942f082 --- /dev/null +++ b/drivers/platform/cznic/turris-omnia-mcu-keyctl.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CZ.NIC's Turris Omnia MCU ECDSA message signing via keyctl + * + * 2025 by Marek Behún <kabel@kernel.org> + */ + +#include <crypto/sha2.h> +#include <linux/cleanup.h> +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/key.h> +#include <linux/mutex.h> +#include <linux/string.h> +#include <linux/types.h> + +#include <linux/turris-omnia-mcu-interface.h> +#include <linux/turris-signing-key.h> +#include "turris-omnia-mcu.h" + +static irqreturn_t omnia_msg_signed_irq_handler(int irq, void *dev_id) +{ + u8 reply[1 + OMNIA_MCU_CRYPTO_SIGNATURE_LEN]; + struct omnia_mcu *mcu = dev_id; + int err; + + err = omnia_cmd_read(mcu->client, OMNIA_CMD_CRYPTO_COLLECT_SIGNATURE, + reply, sizeof(reply)); + if (!err && reply[0] != OMNIA_MCU_CRYPTO_SIGNATURE_LEN) + err = -EIO; + + guard(mutex)(&mcu->sign_lock); + + if (mcu->sign_requested) { + mcu->sign_err = err; + if (!err) + memcpy(mcu->signature, &reply[1], + OMNIA_MCU_CRYPTO_SIGNATURE_LEN); + mcu->sign_requested = false; + complete(&mcu->msg_signed); + } + + return IRQ_HANDLED; +} + +static int omnia_mcu_sign(const struct key *key, const void *msg, + void *signature) +{ + struct omnia_mcu *mcu = dev_get_drvdata(turris_signing_key_get_dev(key)); + u8 cmd[1 + SHA256_DIGEST_SIZE], reply; + int err; + + scoped_guard(mutex, &mcu->sign_lock) { + if (mcu->sign_requested) + return -EBUSY; + + cmd[0] = OMNIA_CMD_CRYPTO_SIGN_MESSAGE; + memcpy(&cmd[1], msg, SHA256_DIGEST_SIZE); + + err = omnia_cmd_write_read(mcu->client, cmd, sizeof(cmd), + &reply, 1); + if (err) + return err; + + if (!reply) + return -EBUSY; + + mcu->sign_requested = true; + } + + if (wait_for_completion_interruptible(&mcu->msg_signed)) + return -EINTR; + + guard(mutex)(&mcu->sign_lock); + + if (mcu->sign_err) + return mcu->sign_err; + + memcpy(signature, mcu->signature, OMNIA_MCU_CRYPTO_SIGNATURE_LEN); + + /* forget the signature, for security */ + memzero_explicit(mcu->signature, sizeof(mcu->signature)); + + return OMNIA_MCU_CRYPTO_SIGNATURE_LEN; +} + +static const void *omnia_mcu_get_public_key(const struct key *key) +{ + struct omnia_mcu *mcu = dev_get_drvdata(turris_signing_key_get_dev(key)); + + return mcu->board_public_key; +} + +static const struct turris_signing_key_subtype omnia_signing_key_subtype = { + .key_size = 256, + .data_size = SHA256_DIGEST_SIZE, + .sig_size = OMNIA_MCU_CRYPTO_SIGNATURE_LEN, + .public_key_size = OMNIA_MCU_CRYPTO_PUBLIC_KEY_LEN, + .hash_algo = "sha256", + .get_public_key = omnia_mcu_get_public_key, + .sign = omnia_mcu_sign, +}; + +static int omnia_mcu_read_public_key(struct omnia_mcu *mcu) +{ + u8 reply[1 + OMNIA_MCU_CRYPTO_PUBLIC_KEY_LEN]; + int err; + + err = omnia_cmd_read(mcu->client, OMNIA_CMD_CRYPTO_GET_PUBLIC_KEY, + reply, sizeof(reply)); + if (err) + return err; + + if (reply[0] != OMNIA_MCU_CRYPTO_PUBLIC_KEY_LEN) + return -EIO; + + memcpy(mcu->board_public_key, &reply[1], + OMNIA_MCU_CRYPTO_PUBLIC_KEY_LEN); + + return 0; +} + +int omnia_mcu_register_keyctl(struct omnia_mcu *mcu) +{ + struct device *dev = &mcu->client->dev; + char desc[48]; + int err; + + if (!(mcu->features & OMNIA_FEAT_CRYPTO)) + return 0; + + err = omnia_mcu_read_public_key(mcu); + if (err) + return dev_err_probe(dev, err, + "Cannot read board public key\n"); + + err = devm_mutex_init(dev, &mcu->sign_lock); + if (err) + return err; + + init_completion(&mcu->msg_signed); + + err = omnia_mcu_request_irq(mcu, OMNIA_INT_MESSAGE_SIGNED, + omnia_msg_signed_irq_handler, + "turris-omnia-mcu-keyctl"); + if (err) + return dev_err_probe(dev, err, + "Cannot request MESSAGE_SIGNED IRQ\n"); + + sprintf(desc, "Turris Omnia SN %016llX MCU ECDSA key", + mcu->board_serial_number); + + err = devm_turris_signing_key_create(dev, &omnia_signing_key_subtype, + desc); + if (err) + return dev_err_probe(dev, err, "Cannot create signing key\n"); + + return 0; +} diff --git a/drivers/platform/cznic/turris-omnia-mcu-trng.c b/drivers/platform/cznic/turris-omnia-mcu-trng.c index 9a1d9292dc9a..e3826959e6de 100644 --- a/drivers/platform/cznic/turris-omnia-mcu-trng.c +++ b/drivers/platform/cznic/turris-omnia-mcu-trng.c @@ -5,12 +5,9 @@ * 2024 by Marek Behún <kabel@kernel.org> */ -#include <linux/bitfield.h> #include <linux/completion.h> #include <linux/container_of.h> #include <linux/errno.h> -#include <linux/gpio/consumer.h> -#include <linux/gpio/driver.h> #include <linux/hw_random.h> #include <linux/i2c.h> #include <linux/interrupt.h> @@ -62,17 +59,12 @@ static int omnia_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) int omnia_mcu_register_trng(struct omnia_mcu *mcu) { struct device *dev = &mcu->client->dev; - u8 irq_idx, dummy; - int irq, err; + u8 dummy; + int err; if (!(mcu->features & OMNIA_FEAT_TRNG)) return 0; - irq_idx = omnia_int_to_gpio_idx[__bf_shf(OMNIA_INT_TRNG)]; - irq = gpiod_to_irq(gpio_device_get_desc(mcu->gc.gpiodev, irq_idx)); - if (irq < 0) - return dev_err_probe(dev, irq, "Cannot get TRNG IRQ\n"); - /* * If someone else cleared the TRNG interrupt but did not read the * entropy, a new interrupt won't be generated, and entropy collection @@ -86,9 +78,8 @@ int omnia_mcu_register_trng(struct omnia_mcu *mcu) init_completion(&mcu->trng_entropy_ready); - err = devm_request_threaded_irq(dev, irq, NULL, omnia_trng_irq_handler, - IRQF_ONESHOT, "turris-omnia-mcu-trng", - mcu); + err = omnia_mcu_request_irq(mcu, OMNIA_INT_TRNG, omnia_trng_irq_handler, + "turris-omnia-mcu-trng"); if (err) return dev_err_probe(dev, err, "Cannot request TRNG IRQ\n"); diff --git a/drivers/platform/cznic/turris-omnia-mcu.h b/drivers/platform/cznic/turris-omnia-mcu.h index 088541be3f4c..8473a3031917 100644 --- a/drivers/platform/cznic/turris-omnia-mcu.h +++ b/drivers/platform/cznic/turris-omnia-mcu.h @@ -12,11 +12,17 @@ #include <linux/gpio/driver.h> #include <linux/hw_random.h> #include <linux/if_ether.h> +#include <linux/interrupt.h> #include <linux/mutex.h> #include <linux/types.h> #include <linux/watchdog.h> #include <linux/workqueue.h> +enum { + OMNIA_MCU_CRYPTO_PUBLIC_KEY_LEN = 1 + 32, + OMNIA_MCU_CRYPTO_SIGNATURE_LEN = 64, +}; + struct i2c_client; struct rtc_device; @@ -55,6 +61,12 @@ struct rtc_device; * @wdt: watchdog driver structure * @trng: RNG driver structure * @trng_entropy_ready: RNG entropy ready completion + * @msg_signed: message signed completion + * @sign_lock: mutex to protect message signing state + * @sign_requested: flag indicating that message signing was requested but not completed + * @sign_err: message signing error number, filled in interrupt handler + * @signature: message signing signature, filled in interrupt handler + * @board_public_key: board public key, if stored in MCU */ struct omnia_mcu { struct i2c_client *client; @@ -88,12 +100,22 @@ struct omnia_mcu { struct hwrng trng; struct completion trng_entropy_ready; #endif + +#ifdef CONFIG_TURRIS_OMNIA_MCU_KEYCTL + struct completion msg_signed; + struct mutex sign_lock; + bool sign_requested; + int sign_err; + u8 signature[OMNIA_MCU_CRYPTO_SIGNATURE_LEN]; + u8 board_public_key[OMNIA_MCU_CRYPTO_PUBLIC_KEY_LEN]; +#endif }; #ifdef CONFIG_TURRIS_OMNIA_MCU_GPIO -extern const u8 omnia_int_to_gpio_idx[32]; extern const struct attribute_group omnia_mcu_gpio_group; int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu); +int omnia_mcu_request_irq(struct omnia_mcu *mcu, u32 spec, + irq_handler_t thread_fn, const char *devname); #else static inline int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu) { @@ -101,6 +123,15 @@ static inline int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu) } #endif +#ifdef CONFIG_TURRIS_OMNIA_MCU_KEYCTL +int omnia_mcu_register_keyctl(struct omnia_mcu *mcu); +#else +static inline int omnia_mcu_register_keyctl(struct omnia_mcu *mcu) +{ + return 0; +} +#endif + #ifdef CONFIG_TURRIS_OMNIA_MCU_SYSOFF_WAKEUP extern const struct attribute_group omnia_mcu_poweroff_group; int omnia_mcu_register_sys_off_and_wakeup(struct omnia_mcu *mcu); diff --git a/drivers/platform/cznic/turris-signing-key.c b/drivers/platform/cznic/turris-signing-key.c new file mode 100644 index 000000000000..3827178565e2 --- /dev/null +++ b/drivers/platform/cznic/turris-signing-key.c @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Some of CZ.NIC's Turris devices support signing messages with a per-device unique asymmetric + * cryptographic key that was burned into the device at manufacture. + * + * This helper module exposes this message signing ability via the keyctl() syscall. Upon load, it + * creates the `.turris-signing-keys` keyring. A device-specific driver then has to create a signing + * key by calling devm_turris_signing_key_create(). + * + * 2025 by Marek Behún <kabel@kernel.org> + */ + +#include <linux/device.h> +#include <linux/err.h> +#include <linux/key-type.h> +#include <linux/key.h> +#include <linux/keyctl.h> +#include <linux/module.h> +#include <linux/seq_file.h> +#include <linux/string.h> +#include <linux/types.h> + +#include <linux/turris-signing-key.h> + +static int turris_signing_key_instantiate(struct key *key, + struct key_preparsed_payload *payload) +{ + return 0; +} + +static void turris_signing_key_describe(const struct key *key, struct seq_file *m) +{ + const struct turris_signing_key_subtype *subtype = dereference_key_rcu(key); + + if (!subtype) + return; + + seq_printf(m, "%s: %*phN", key->description, subtype->public_key_size, + subtype->get_public_key(key)); +} + +static long turris_signing_key_read(const struct key *key, char *buffer, size_t buflen) +{ + const struct turris_signing_key_subtype *subtype = dereference_key_rcu(key); + + if (!subtype) + return -EIO; + + if (buffer) { + if (buflen > subtype->public_key_size) + buflen = subtype->public_key_size; + + memcpy(buffer, subtype->get_public_key(key), subtype->public_key_size); + } + + return subtype->public_key_size; +} + +static bool turris_signing_key_asym_valid_params(const struct turris_signing_key_subtype *subtype, + const struct kernel_pkey_params *params) +{ + if (params->encoding && strcmp(params->encoding, "raw")) + return false; + + if (params->hash_algo && strcmp(params->hash_algo, subtype->hash_algo)) + return false; + + return true; +} + +static int turris_signing_key_asym_query(const struct kernel_pkey_params *params, + struct kernel_pkey_query *info) +{ + const struct turris_signing_key_subtype *subtype = dereference_key_rcu(params->key); + + if (!subtype) + return -EIO; + + if (!turris_signing_key_asym_valid_params(subtype, params)) + return -EINVAL; + + info->supported_ops = KEYCTL_SUPPORTS_SIGN; + info->key_size = subtype->key_size; + info->max_data_size = subtype->data_size; + info->max_sig_size = subtype->sig_size; + info->max_enc_size = 0; + info->max_dec_size = 0; + + return 0; +} + +static int turris_signing_key_asym_eds_op(struct kernel_pkey_params *params, + const void *in, void *out) +{ + const struct turris_signing_key_subtype *subtype = dereference_key_rcu(params->key); + int err; + + if (!subtype) + return -EIO; + + if (!turris_signing_key_asym_valid_params(subtype, params)) + return -EINVAL; + + if (params->op != kernel_pkey_sign) + return -EOPNOTSUPP; + + if (params->in_len != subtype->data_size || params->out_len != subtype->sig_size) + return -EINVAL; + + err = subtype->sign(params->key, in, out); + if (err) + return err; + + return subtype->sig_size; +} + +static struct key_type turris_signing_key_type = { + .name = "turris-signing-key", + .instantiate = turris_signing_key_instantiate, + .describe = turris_signing_key_describe, + .read = turris_signing_key_read, + .asym_query = turris_signing_key_asym_query, + .asym_eds_op = turris_signing_key_asym_eds_op, +}; + +static struct key *turris_signing_keyring; + +static void turris_signing_key_release(void *key) +{ + key_unlink(turris_signing_keyring, key); + key_put(key); +} + +int +devm_turris_signing_key_create(struct device *dev, const struct turris_signing_key_subtype *subtype, + const char *desc) +{ + struct key *key; + key_ref_t kref; + + kref = key_create(make_key_ref(turris_signing_keyring, true), + turris_signing_key_type.name, desc, NULL, 0, + (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ | + KEY_USR_SEARCH, + KEY_ALLOC_BUILT_IN | KEY_ALLOC_SET_KEEP | KEY_ALLOC_NOT_IN_QUOTA); + if (IS_ERR(kref)) + return PTR_ERR(kref); + + key = key_ref_to_ptr(kref); + key->payload.data[1] = dev; + rcu_assign_keypointer(key, subtype); + + return devm_add_action_or_reset(dev, turris_signing_key_release, key); +} +EXPORT_SYMBOL_GPL(devm_turris_signing_key_create); + +static int turris_signing_key_init(void) +{ + int err; + + err = register_key_type(&turris_signing_key_type); + if (err) + return err; + + turris_signing_keyring = keyring_alloc(".turris-signing-keys", + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(), + (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | + KEY_USR_READ | KEY_USR_SEARCH, + KEY_ALLOC_BUILT_IN | KEY_ALLOC_SET_KEEP | + KEY_ALLOC_NOT_IN_QUOTA, + NULL, NULL); + if (IS_ERR(turris_signing_keyring)) { + pr_err("Cannot allocate Turris keyring\n"); + + unregister_key_type(&turris_signing_key_type); + + return PTR_ERR(turris_signing_keyring); + } + + return 0; +} +module_init(turris_signing_key_init); + +static void turris_signing_key_exit(void) +{ + key_put(turris_signing_keyring); + unregister_key_type(&turris_signing_key_type); +} +module_exit(turris_signing_key_exit); + +MODULE_AUTHOR("Marek Behun <kabel@kernel.org>"); +MODULE_DESCRIPTION("CZ.NIC's Turris signing key helper"); +MODULE_LICENSE("GPL"); diff --git a/drivers/power/supply/qcom_pmi8998_charger.c b/drivers/power/supply/qcom_pmi8998_charger.c index 74a8d8ed8d9f..c2f8f2e24398 100644 --- a/drivers/power/supply/qcom_pmi8998_charger.c +++ b/drivers/power/supply/qcom_pmi8998_charger.c @@ -2,7 +2,7 @@ /* * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2023, Linaro Ltd. - * Author: Caleb Connolly <caleb.connolly@linaro.org> + * Author: Casey Connolly <casey.connolly@linaro.org> * * This driver is for the switch-mode battery charger and boost * hardware found in pmi8998 and related PMICs. @@ -1045,6 +1045,6 @@ static struct platform_driver qcom_spmi_smb2 = { module_platform_driver(qcom_spmi_smb2); -MODULE_AUTHOR("Caleb Connolly <caleb.connolly@linaro.org>"); +MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>"); MODULE_DESCRIPTION("Qualcomm SMB2 Charger Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index cbf531d0ba68..995cfeca972b 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -98,18 +98,6 @@ MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); #endif /* - * An internal DMA coherent buffer - */ -struct mport_dma_buf { - void *ib_base; - dma_addr_t ib_phys; - u32 ib_size; - u64 ib_rio_base; - bool ib_map; - struct file *filp; -}; - -/* * Internal memory mapping structure */ enum rio_mport_map_dir { @@ -131,14 +119,6 @@ struct rio_mport_mapping { struct file *filp; }; -struct rio_mport_dma_map { - int valid; - u64 length; - void *vaddr; - dma_addr_t paddr; -}; - -#define MPORT_MAX_DMA_BUFS 16 #define MPORT_EVENT_DEPTH 10 /* diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index 9544b8ee0c96..46daf32ea13b 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c @@ -1775,19 +1775,6 @@ struct dma_chan *rio_request_mport_dma(struct rio_mport *mport) EXPORT_SYMBOL_GPL(rio_request_mport_dma); /** - * rio_request_dma - request RapidIO capable DMA channel that supports - * specified target RapidIO device. - * @rdev: RIO device associated with DMA transfer - * - * Returns pointer to allocated DMA channel or NULL if failed. - */ -struct dma_chan *rio_request_dma(struct rio_dev *rdev) -{ - return rio_request_mport_dma(rdev->net->hport); -} -EXPORT_SYMBOL_GPL(rio_request_dma); - -/** * rio_release_dma - release specified DMA channel * @dchan: DMA channel to release */ @@ -1834,57 +1821,9 @@ struct dma_async_tx_descriptor *rio_dma_prep_xfer(struct dma_chan *dchan, } EXPORT_SYMBOL_GPL(rio_dma_prep_xfer); -/** - * rio_dma_prep_slave_sg - RapidIO specific wrapper - * for device_prep_slave_sg callback defined by DMAENGINE. - * @rdev: RIO device control structure - * @dchan: DMA channel to configure - * @data: RIO specific data descriptor - * @direction: DMA data transfer direction (TO or FROM the device) - * @flags: dmaengine defined flags - * - * Initializes RapidIO capable DMA channel for the specified data transfer. - * Uses DMA channel private extension to pass information related to remote - * target RIO device. - * - * Returns: pointer to DMA transaction descriptor if successful, - * error-valued pointer or NULL if failed. - */ -struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev, - struct dma_chan *dchan, struct rio_dma_data *data, - enum dma_transfer_direction direction, unsigned long flags) -{ - return rio_dma_prep_xfer(dchan, rdev->destid, data, direction, flags); -} -EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg); - #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ /** - * rio_find_mport - find RIO mport by its ID - * @mport_id: number (ID) of mport device - * - * Given a RIO mport number, the desired mport is located - * in the global list of mports. If the mport is found, a pointer to its - * data structure is returned. If no mport is found, %NULL is returned. - */ -struct rio_mport *rio_find_mport(int mport_id) -{ - struct rio_mport *port; - - mutex_lock(&rio_mport_list_lock); - list_for_each_entry(port, &rio_mports, node) { - if (port->id == mport_id) - goto found; - } - port = NULL; -found: - mutex_unlock(&rio_mport_list_lock); - - return port; -} - -/** * rio_register_scan - enumeration/discovery method registration interface * @mport_id: mport device ID for which fabric scan routine has to be set * (RIO_MPORT_ANY = set for all available mports) @@ -1962,48 +1901,6 @@ err_out: EXPORT_SYMBOL_GPL(rio_register_scan); /** - * rio_unregister_scan - removes enumeration/discovery method from mport - * @mport_id: mport device ID for which fabric scan routine has to be - * unregistered (RIO_MPORT_ANY = apply to all mports that use - * the specified scan_ops) - * @scan_ops: enumeration/discovery operations structure - * - * Removes enumeration or discovery method assigned to the specified mport - * device. If RIO_MPORT_ANY is specified, removes the specified operations from - * all mports that have them attached. - */ -int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops) -{ - struct rio_mport *port; - struct rio_scan_node *scan; - - pr_debug("RIO: %s for mport_id=%d\n", __func__, mport_id); - - if (mport_id != RIO_MPORT_ANY && mport_id >= RIO_MAX_MPORTS) - return -EINVAL; - - mutex_lock(&rio_mport_list_lock); - - list_for_each_entry(port, &rio_mports, node) - if (port->id == mport_id || - (mport_id == RIO_MPORT_ANY && port->nscan == scan_ops)) - port->nscan = NULL; - - list_for_each_entry(scan, &rio_scans, node) { - if (scan->mport_id == mport_id) { - list_del(&scan->node); - kfree(scan); - break; - } - } - - mutex_unlock(&rio_mport_list_lock); - - return 0; -} -EXPORT_SYMBOL_GPL(rio_unregister_scan); - -/** * rio_mport_scan - execute enumeration/discovery on the specified mport * @mport_id: number (ID) of mport device */ diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h index f482de0d0370..a0e2a09ddb8e 100644 --- a/drivers/rapidio/rio.h +++ b/drivers/rapidio/rio.h @@ -41,9 +41,7 @@ extern void rio_del_device(struct rio_dev *rdev, enum rio_device_state state); extern int rio_enable_rx_tx_port(struct rio_mport *port, int local, u16 destid, u8 hopcount, u8 port_num); extern int rio_register_scan(int mport_id, struct rio_scan *scan_ops); -extern int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops); extern void rio_attach_device(struct rio_dev *rdev); -extern struct rio_mport *rio_find_mport(int mport_id); extern int rio_mport_scan(int mport_id); /* Structures internal to the RIO core code */ diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c index 9135227301c8..97287e838ce1 100644 --- a/drivers/rapidio/rio_cm.c +++ b/drivers/rapidio/rio_cm.c @@ -198,12 +198,6 @@ struct cm_peer { struct rio_dev *rdev; }; -struct rio_cm_work { - struct work_struct work; - struct cm_dev *cm; - void *data; -}; - struct conn_req { struct list_head node; u32 destid; /* requester destID */ diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 99f6f9784e68..d85be5899da6 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -225,6 +225,13 @@ config RESET_RZG2L_USBPHY_CTRL Support for USBPHY Control found on RZ/G2L family. It mainly controls reset and power down of the USB/PHY. +config RESET_RZV2H_USB2PHY + tristate "Renesas RZ/V2H(P) (and similar SoCs) USB2PHY Reset driver" + depends on ARCH_RENESAS || COMPILE_TEST + help + Support for USB2PHY Port reset Control found on the RZ/V2H(P) SoC + (and similar SoCs). + config RESET_SCMI tristate "Reset driver controlled via ARM SCMI interface" depends on ARM_SCMI_PROTOCOL || COMPILE_TEST @@ -279,6 +286,16 @@ config RESET_SUNXI help This enables the reset driver for Allwinner SoCs. +config RESET_TH1520 + tristate "T-HEAD 1520 reset controller" + depends on ARCH_THEAD || COMPILE_TEST + select REGMAP_MMIO + help + This driver provides support for the T-HEAD TH1520 SoC reset controller, + which manages hardware reset lines for SoC components such as the GPU. + Enable this option if you need to control hardware resets on TH1520-based + systems. + config RESET_TI_SCI tristate "TI System Control Interface (TI-SCI) reset driver" depends on TI_SCI_PROTOCOL || (COMPILE_TEST && TI_SCI_PROTOCOL=n) diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index 31f9904d13f9..91e6348e3351 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -31,11 +31,13 @@ obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o obj-$(CONFIG_RESET_RASPBERRYPI) += reset-raspberrypi.o obj-$(CONFIG_RESET_RZG2L_USBPHY_CTRL) += reset-rzg2l-usbphy-ctrl.o +obj-$(CONFIG_RESET_RZV2H_USB2PHY) += reset-rzv2h-usb2phy.o obj-$(CONFIG_RESET_SCMI) += reset-scmi.o obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o obj-$(CONFIG_RESET_SUNPLUS) += reset-sunplus.o obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o +obj-$(CONFIG_RESET_TH1520) += reset-th1520.o obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o obj-$(CONFIG_RESET_TI_TPS380X) += reset-tps380x.o diff --git a/drivers/reset/reset-rzv2h-usb2phy.c b/drivers/reset/reset-rzv2h-usb2phy.c new file mode 100644 index 000000000000..ae643575b067 --- /dev/null +++ b/drivers/reset/reset-rzv2h-usb2phy.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas RZ/V2H(P) USB2PHY Port reset control driver + * + * Copyright (C) 2025 Renesas Electronics Corporation + */ + +#include <linux/cleanup.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <linux/reset-controller.h> + +struct rzv2h_usb2phy_regval { + u16 reg; + u16 val; +}; + +struct rzv2h_usb2phy_reset_of_data { + const struct rzv2h_usb2phy_regval *init_vals; + unsigned int init_val_count; + + u16 reset_reg; + u16 reset_assert_val; + u16 reset_deassert_val; + u16 reset_status_bits; + u16 reset_release_val; + + u16 reset2_reg; + u16 reset2_acquire_val; + u16 reset2_release_val; +}; + +struct rzv2h_usb2phy_reset_priv { + const struct rzv2h_usb2phy_reset_of_data *data; + void __iomem *base; + struct device *dev; + struct reset_controller_dev rcdev; + spinlock_t lock; /* protects register accesses */ +}; + +static inline struct rzv2h_usb2phy_reset_priv +*rzv2h_usbphy_rcdev_to_priv(struct reset_controller_dev *rcdev) +{ + return container_of(rcdev, struct rzv2h_usb2phy_reset_priv, rcdev); +} + +/* This function must be called only after pm_runtime_resume_and_get() has been called */ +static void rzv2h_usbphy_assert_helper(struct rzv2h_usb2phy_reset_priv *priv) +{ + const struct rzv2h_usb2phy_reset_of_data *data = priv->data; + + scoped_guard(spinlock, &priv->lock) { + writel(data->reset2_acquire_val, priv->base + data->reset2_reg); + writel(data->reset_assert_val, priv->base + data->reset_reg); + } + + usleep_range(11, 20); +} + +static int rzv2h_usbphy_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct rzv2h_usb2phy_reset_priv *priv = rzv2h_usbphy_rcdev_to_priv(rcdev); + struct device *dev = priv->dev; + int ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret) { + dev_err(dev, "pm_runtime_resume_and_get failed\n"); + return ret; + } + + rzv2h_usbphy_assert_helper(priv); + + pm_runtime_put(dev); + + return 0; +} + +static int rzv2h_usbphy_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct rzv2h_usb2phy_reset_priv *priv = rzv2h_usbphy_rcdev_to_priv(rcdev); + const struct rzv2h_usb2phy_reset_of_data *data = priv->data; + struct device *dev = priv->dev; + int ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret) { + dev_err(dev, "pm_runtime_resume_and_get failed\n"); + return ret; + } + + scoped_guard(spinlock, &priv->lock) { + writel(data->reset_deassert_val, priv->base + data->reset_reg); + writel(data->reset2_release_val, priv->base + data->reset2_reg); + writel(data->reset_release_val, priv->base + data->reset_reg); + } + + pm_runtime_put(dev); + + return 0; +} + +static int rzv2h_usbphy_reset_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct rzv2h_usb2phy_reset_priv *priv = rzv2h_usbphy_rcdev_to_priv(rcdev); + struct device *dev = priv->dev; + int ret; + u32 reg; + + ret = pm_runtime_resume_and_get(dev); + if (ret) { + dev_err(dev, "pm_runtime_resume_and_get failed\n"); + return ret; + } + + reg = readl(priv->base + priv->data->reset_reg); + + pm_runtime_put(dev); + + return (reg & priv->data->reset_status_bits) == priv->data->reset_status_bits; +} + +static const struct reset_control_ops rzv2h_usbphy_reset_ops = { + .assert = rzv2h_usbphy_reset_assert, + .deassert = rzv2h_usbphy_reset_deassert, + .status = rzv2h_usbphy_reset_status, +}; + +static int rzv2h_usb2phy_reset_of_xlate(struct reset_controller_dev *rcdev, + const struct of_phandle_args *reset_spec) +{ + /* No special handling needed, we have only one reset line per device */ + return 0; +} + +static int rzv2h_usb2phy_reset_probe(struct platform_device *pdev) +{ + const struct rzv2h_usb2phy_reset_of_data *data; + struct rzv2h_usb2phy_reset_priv *priv; + struct device *dev = &pdev->dev; + struct reset_control *rstc; + int error; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + data = of_device_get_match_data(dev); + priv->data = data; + priv->dev = dev; + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + rstc = devm_reset_control_get_shared_deasserted(dev, NULL); + if (IS_ERR(rstc)) + return dev_err_probe(dev, PTR_ERR(rstc), + "failed to get deasserted reset\n"); + + spin_lock_init(&priv->lock); + + error = devm_pm_runtime_enable(dev); + if (error) + return dev_err_probe(dev, error, "Failed to enable pm_runtime\n"); + + error = pm_runtime_resume_and_get(dev); + if (error) + return dev_err_probe(dev, error, "pm_runtime_resume_and_get failed\n"); + + for (unsigned int i = 0; i < data->init_val_count; i++) + writel(data->init_vals[i].val, priv->base + data->init_vals[i].reg); + + /* keep usb2phy in asserted state */ + rzv2h_usbphy_assert_helper(priv); + + pm_runtime_put(dev); + + priv->rcdev.ops = &rzv2h_usbphy_reset_ops; + priv->rcdev.of_reset_n_cells = 0; + priv->rcdev.nr_resets = 1; + priv->rcdev.of_xlate = rzv2h_usb2phy_reset_of_xlate; + priv->rcdev.of_node = dev->of_node; + priv->rcdev.dev = dev; + + return devm_reset_controller_register(dev, &priv->rcdev); +} + +/* + * initialization values required to prepare the PHY to receive + * assert and deassert requests. + */ +static const struct rzv2h_usb2phy_regval rzv2h_init_vals[] = { + { .reg = 0xc10, .val = 0x67c }, + { .reg = 0xc14, .val = 0x1f }, + { .reg = 0x600, .val = 0x909 }, +}; + +static const struct rzv2h_usb2phy_reset_of_data rzv2h_reset_of_data = { + .init_vals = rzv2h_init_vals, + .init_val_count = ARRAY_SIZE(rzv2h_init_vals), + .reset_reg = 0, + .reset_assert_val = 0x206, + .reset_status_bits = BIT(2), + .reset_deassert_val = 0x200, + .reset_release_val = 0x0, + .reset2_reg = 0xb04, + .reset2_acquire_val = 0x303, + .reset2_release_val = 0x3, +}; + +static const struct of_device_id rzv2h_usb2phy_reset_of_match[] = { + { .compatible = "renesas,r9a09g057-usb2phy-reset", .data = &rzv2h_reset_of_data }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, rzv2h_usb2phy_reset_of_match); + +static struct platform_driver rzv2h_usb2phy_reset_driver = { + .driver = { + .name = "rzv2h_usb2phy_reset", + .of_match_table = rzv2h_usb2phy_reset_of_match, + }, + .probe = rzv2h_usb2phy_reset_probe, +}; +module_platform_driver(rzv2h_usb2phy_reset_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>"); +MODULE_DESCRIPTION("Renesas RZ/V2H(P) USB2PHY Control"); diff --git a/drivers/reset/reset-th1520.c b/drivers/reset/reset-th1520.c new file mode 100644 index 000000000000..7874f0693e1b --- /dev/null +++ b/drivers/reset/reset-th1520.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2024 Samsung Electronics Co., Ltd. + * Author: Michal Wilczynski <m.wilczynski@samsung.com> + */ + +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/reset-controller.h> +#include <linux/regmap.h> + +#include <dt-bindings/reset/thead,th1520-reset.h> + + /* register offset in VOSYS_REGMAP */ +#define TH1520_GPU_RST_CFG 0x0 +#define TH1520_GPU_RST_CFG_MASK GENMASK(1, 0) + +/* register values */ +#define TH1520_GPU_SW_GPU_RST BIT(0) +#define TH1520_GPU_SW_CLKGEN_RST BIT(1) + +struct th1520_reset_priv { + struct reset_controller_dev rcdev; + struct regmap *map; +}; + +struct th1520_reset_map { + u32 bit; + u32 reg; +}; + +static const struct th1520_reset_map th1520_resets[] = { + [TH1520_RESET_ID_GPU] = { + .bit = TH1520_GPU_SW_GPU_RST, + .reg = TH1520_GPU_RST_CFG, + }, + [TH1520_RESET_ID_GPU_CLKGEN] = { + .bit = TH1520_GPU_SW_CLKGEN_RST, + .reg = TH1520_GPU_RST_CFG, + } +}; + +static inline struct th1520_reset_priv * +to_th1520_reset(struct reset_controller_dev *rcdev) +{ + return container_of(rcdev, struct th1520_reset_priv, rcdev); +} + +static int th1520_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct th1520_reset_priv *priv = to_th1520_reset(rcdev); + const struct th1520_reset_map *reset; + + reset = &th1520_resets[id]; + + return regmap_update_bits(priv->map, reset->reg, reset->bit, 0); +} + +static int th1520_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct th1520_reset_priv *priv = to_th1520_reset(rcdev); + const struct th1520_reset_map *reset; + + reset = &th1520_resets[id]; + + return regmap_update_bits(priv->map, reset->reg, reset->bit, + reset->bit); +} + +static const struct reset_control_ops th1520_reset_ops = { + .assert = th1520_reset_assert, + .deassert = th1520_reset_deassert, +}; + +static const struct regmap_config th1520_reset_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .fast_io = true, +}; + +static int th1520_reset_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct th1520_reset_priv *priv; + void __iomem *base; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + priv->map = devm_regmap_init_mmio(dev, base, + &th1520_reset_regmap_config); + if (IS_ERR(priv->map)) + return PTR_ERR(priv->map); + + /* Initialize GPU resets to asserted state */ + ret = regmap_update_bits(priv->map, TH1520_GPU_RST_CFG, + TH1520_GPU_RST_CFG_MASK, 0); + if (ret) + return ret; + + priv->rcdev.owner = THIS_MODULE; + priv->rcdev.nr_resets = ARRAY_SIZE(th1520_resets); + priv->rcdev.ops = &th1520_reset_ops; + priv->rcdev.of_node = dev->of_node; + + return devm_reset_controller_register(dev, &priv->rcdev); +} + +static const struct of_device_id th1520_reset_match[] = { + { .compatible = "thead,th1520-reset" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, th1520_reset_match); + +static struct platform_driver th1520_reset_driver = { + .driver = { + .name = "th1520-reset", + .of_match_table = th1520_reset_match, + }, + .probe = th1520_reset_probe, +}; +module_platform_driver(th1520_reset_driver); + +MODULE_AUTHOR("Michal Wilczynski <m.wilczynski@samsung.com>"); +MODULE_DESCRIPTION("T-HEAD TH1520 SoC reset controller"); +MODULE_LICENSE("GPL"); diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index dac85294d2f5..e284eea331d7 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -255,7 +255,7 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, /* * The recording commands needs to be called with option QID - * for guests that have previlege classes A or B. + * for guests that have privilege classes A or B. * Purging has to be done as separate step, because recording * can't be switched on as long as records are on the queue. * Doing both at the same time doesn't work. @@ -557,7 +557,7 @@ static ssize_t vmlogrdr_purge_store(struct device * dev, /* * The recording command needs to be called with option QID - * for guests that have previlege classes A or B. + * for guests that have privilege classes A or B. * Other guests will not recognize the command and we have to * issue the same command without the QID parameter. */ diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 22074e81bd38..dc2265ebb11b 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -312,15 +312,13 @@ static void zfcp_print_sl(struct seq_file *m, struct service_level *sl) static int zfcp_setup_adapter_work_queue(struct zfcp_adapter *adapter) { - char name[TASK_COMM_LEN]; - - snprintf(name, sizeof(name), "zfcp_q_%s", - dev_name(&adapter->ccw_device->dev)); - adapter->work_queue = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); + adapter->work_queue = + alloc_ordered_workqueue("zfcp_q_%s", WQ_MEM_RECLAIM, + dev_name(&adapter->ccw_device->dev)); + if (!adapter->work_queue) + return -ENOMEM; - if (adapter->work_queue) - return 0; - return -ENOMEM; + return 0; } static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter) diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 8dc6be9a00c1..96b335c92603 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -83,65 +83,6 @@ /*#define DC395x_NO_SYNC*/ /*#define DC395x_NO_WIDE*/ -/*--------------------------------------------------------------------------- - Debugging - ---------------------------------------------------------------------------*/ -/* - * Types of debugging that can be enabled and disabled - */ -#define DBG_KG 0x0001 -#define DBG_0 0x0002 -#define DBG_1 0x0004 -#define DBG_SG 0x0020 -#define DBG_FIFO 0x0040 -#define DBG_PIO 0x0080 - - -/* - * Set set of things to output debugging for. - * Undefine to remove all debugging - */ -/*#define DEBUG_MASK (DBG_0|DBG_1|DBG_SG|DBG_FIFO|DBG_PIO)*/ -/*#define DEBUG_MASK DBG_0*/ - - -/* - * Output a kernel mesage at the specified level and append the - * driver name and a ": " to the start of the message - */ -#define dprintkl(level, format, arg...) \ - printk(level DC395X_NAME ": " format , ## arg) - - -#ifdef DEBUG_MASK -/* - * print a debug message - this is formated with KERN_DEBUG, then the - * driver name followed by a ": " and then the message is output. - * This also checks that the specified debug level is enabled before - * outputing the message - */ -#define dprintkdbg(type, format, arg...) \ - do { \ - if ((type) & (DEBUG_MASK)) \ - dprintkl(KERN_DEBUG , format , ## arg); \ - } while (0) - -/* - * Check if the specified type of debugging is enabled - */ -#define debug_enabled(type) ((DEBUG_MASK) & (type)) - -#else -/* - * No debugging. Do nothing - */ -#define dprintkdbg(type, format, arg...) \ - do {} while (0) -#define debug_enabled(type) (0) - -#endif - - #ifndef PCI_VENDOR_ID_TEKRAM #define PCI_VENDOR_ID_TEKRAM 0x1DE1 /* Vendor ID */ #endif @@ -432,7 +373,6 @@ static void *dc395x_scsi_phase1[] = { /* real period:48ns,76ns,100ns,124ns,148ns,176ns,200ns,248ns */ static u8 clock_period[] = { 12, 18, 25, 31, 37, 43, 50, 62 }; -static u16 clock_speed[] = { 200, 133, 100, 80, 67, 58, 50, 40 }; /*--------------------------------------------------------------------------- @@ -564,7 +504,6 @@ static void set_safe_settings(void) { int i; - dprintkl(KERN_INFO, "Using safe settings.\n"); for (i = 0; i < CFG_NUM; i++) { cfg_data[i].value = cfg_data[i].safe; @@ -581,15 +520,6 @@ static void fix_settings(void) { int i; - dprintkdbg(DBG_1, - "setup: AdapterId=%08x MaxSpeed=%08x DevMode=%08x " - "AdapterMode=%08x Tags=%08x ResetDelay=%08x\n", - cfg_data[CFG_ADAPTER_ID].value, - cfg_data[CFG_MAX_SPEED].value, - cfg_data[CFG_DEV_MODE].value, - cfg_data[CFG_ADAPTER_MODE].value, - cfg_data[CFG_TAGS].value, - cfg_data[CFG_RESET_DELAY].value); for (i = 0; i < CFG_NUM; i++) { if (cfg_data[i].value < cfg_data[i].min @@ -822,8 +752,6 @@ static void waiting_timeout(struct timer_list *t) { unsigned long flags; struct AdapterCtlBlk *acb = from_timer(acb, t, waiting_timer); - dprintkdbg(DBG_1, - "waiting_timeout: Queue woken up by timer. acb=%p\n", acb); DC395x_LOCK_IO(acb->scsi_host, flags); waiting_process_next(acb); DC395x_UNLOCK_IO(acb->scsi_host, flags); @@ -864,8 +792,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, { int nseg; enum dma_data_direction dir = cmd->sc_data_direction; - dprintkdbg(DBG_0, "build_srb: (0x%p) <%02i-%i>\n", - cmd, dcb->target_id, dcb->target_lun); srb->dcb = dcb; srb->cmd = cmd; @@ -887,12 +813,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, nseg = scsi_dma_map(cmd); BUG_ON(nseg < 0); - if (dir == DMA_NONE || !nseg) { - dprintkdbg(DBG_0, - "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n", - cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd), - srb->segment_x[0].address); - } else { + if (!(dir == DMA_NONE || !nseg)) { int i; u32 reqlen = scsi_bufflen(cmd); struct scatterlist *sg; @@ -900,11 +821,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, srb->sg_count = nseg; - dprintkdbg(DBG_0, - "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n", - reqlen, scsi_sglist(cmd), scsi_sg_count(cmd), - srb->sg_count); - scsi_for_each_sg(cmd, sg, srb->sg_count, i) { u32 busaddr = (u32)sg_dma_address(sg); u32 seglen = (u32)sg->length; @@ -933,8 +849,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, srb->sg_bus_addr = dma_map_single(&dcb->acb->dev->dev, srb->segment_x, SEGMENTX_LEN, DMA_TO_DEVICE); - dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n", - srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN); } srb->request_length = srb->total_xfer_length; @@ -966,8 +880,6 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd) struct ScsiReqBlk *srb; struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)cmd->device->host->hostdata; - dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n", - cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]); /* Assume BAD_TARGET; will be cleared later */ set_host_byte(cmd, DID_BAD_TARGET); @@ -975,37 +887,26 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd) /* ignore invalid targets */ if (cmd->device->id >= acb->scsi_host->max_id || cmd->device->lun >= acb->scsi_host->max_lun || - cmd->device->lun >31) { + cmd->device->lun > 31) goto complete; - } /* does the specified lun on the specified device exist */ - if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) { - dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n", - cmd->device->id, (u8)cmd->device->lun); + if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) goto complete; - } /* do we have a DCB for the device */ dcb = find_dcb(acb, cmd->device->id, cmd->device->lun); - if (!dcb) { - /* should never happen */ - dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>", - cmd->device->id, (u8)cmd->device->lun); + if (!dcb) goto complete; - } set_host_byte(cmd, DID_OK); set_status_byte(cmd, SAM_STAT_GOOD); srb = list_first_entry_or_null(&acb->srb_free_list, - struct ScsiReqBlk, list); + struct ScsiReqBlk, list); + if (!srb) { - /* - * Return 1 since we are unable to queue this command at this - * point in time. - */ - dprintkdbg(DBG_0, "queue_command: No free srb's\n"); + /* should never happen */ return 1; } list_del(&srb->list); @@ -1020,7 +921,6 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd) /* process immediately */ send_srb(acb, srb); } - dprintkdbg(DBG_1, "queue_command: (0x%p) done\n", cmd); return 0; complete: @@ -1036,82 +936,8 @@ complete: static DEF_SCSI_QCMD(dc395x_queue_command) -static void dump_register_info(struct AdapterCtlBlk *acb, - struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) -{ - u16 pstat; - struct pci_dev *dev = acb->dev; - pci_read_config_word(dev, PCI_STATUS, &pstat); - if (!dcb) - dcb = acb->active_dcb; - if (!srb && dcb) - srb = dcb->active_srb; - if (srb) { - if (!srb->cmd) - dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n", - srb, srb->cmd); - else - dprintkl(KERN_INFO, "dump: srb=%p cmd=%p " - "cmnd=0x%02x <%02i-%i>\n", - srb, srb->cmd, - srb->cmd->cmnd[0], srb->cmd->device->id, - (u8)srb->cmd->device->lun); - printk(" sglist=%p cnt=%i idx=%i len=%zu\n", - srb->segment_x, srb->sg_count, srb->sg_index, - srb->total_xfer_length); - printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n", - srb->state, srb->status, srb->scsi_phase, - (acb->active_dcb) ? "" : "not"); - } - dprintkl(KERN_INFO, "dump: SCSI{status=0x%04x fifocnt=0x%02x " - "signals=0x%02x irqstat=0x%02x sync=0x%02x target=0x%02x " - "rselid=0x%02x ctr=0x%08x irqen=0x%02x config=0x%04x " - "config2=0x%02x cmd=0x%02x selto=0x%02x}\n", - DC395x_read16(acb, TRM_S1040_SCSI_STATUS), - DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT), - DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL), - DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS), - DC395x_read8(acb, TRM_S1040_SCSI_SYNC), - DC395x_read8(acb, TRM_S1040_SCSI_TARGETID), - DC395x_read8(acb, TRM_S1040_SCSI_IDMSG), - DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), - DC395x_read8(acb, TRM_S1040_SCSI_INTEN), - DC395x_read16(acb, TRM_S1040_SCSI_CONFIG0), - DC395x_read8(acb, TRM_S1040_SCSI_CONFIG2), - DC395x_read8(acb, TRM_S1040_SCSI_COMMAND), - DC395x_read8(acb, TRM_S1040_SCSI_TIMEOUT)); - dprintkl(KERN_INFO, "dump: DMA{cmd=0x%04x fifocnt=0x%02x fstat=0x%02x " - "irqstat=0x%02x irqen=0x%02x cfg=0x%04x tctr=0x%08x " - "ctctr=0x%08x addr=0x%08x:0x%08x}\n", - DC395x_read16(acb, TRM_S1040_DMA_COMMAND), - DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT), - DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT), - DC395x_read8(acb, TRM_S1040_DMA_STATUS), - DC395x_read8(acb, TRM_S1040_DMA_INTEN), - DC395x_read16(acb, TRM_S1040_DMA_CONFIG), - DC395x_read32(acb, TRM_S1040_DMA_XCNT), - DC395x_read32(acb, TRM_S1040_DMA_CXCNT), - DC395x_read32(acb, TRM_S1040_DMA_XHIGHADDR), - DC395x_read32(acb, TRM_S1040_DMA_XLOWADDR)); - dprintkl(KERN_INFO, "dump: gen{gctrl=0x%02x gstat=0x%02x gtmr=0x%02x} " - "pci{status=0x%04x}\n", - DC395x_read8(acb, TRM_S1040_GEN_CONTROL), - DC395x_read8(acb, TRM_S1040_GEN_STATUS), - DC395x_read8(acb, TRM_S1040_GEN_TIMER), - pstat); -} - - static inline void clear_fifo(struct AdapterCtlBlk *acb, char *txt) { -#if debug_enabled(DBG_FIFO) - u8 lines = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL); - u8 fifocnt = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT); - if (!(fifocnt & 0x40)) - dprintkdbg(DBG_FIFO, - "clear_fifo: (%i bytes) on phase %02x in %s\n", - fifocnt & 0x3f, lines, txt); -#endif DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRFIFO); } @@ -1120,7 +946,6 @@ static void reset_dev_param(struct AdapterCtlBlk *acb) { struct DeviceCtlBlk *dcb; struct NvRamType *eeprom = &acb->eeprom; - dprintkdbg(DBG_0, "reset_dev_param: acb=%p\n", acb); list_for_each_entry(dcb, &acb->dcb_list, list) { u8 period_index; @@ -1148,9 +973,6 @@ static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd) { struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)cmd->device->host->hostdata; - dprintkl(KERN_INFO, - "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n", - cmd, cmd->device->id, (u8)cmd->device->lun, cmd); if (timer_pending(&acb->waiting_timer)) timer_delete(&acb->waiting_timer); @@ -1216,14 +1038,10 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd) (struct AdapterCtlBlk *)cmd->device->host->hostdata; struct DeviceCtlBlk *dcb; struct ScsiReqBlk *srb; - dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n", - cmd, cmd->device->id, (u8)cmd->device->lun, cmd); dcb = find_dcb(acb, cmd->device->id, cmd->device->lun); - if (!dcb) { - dprintkl(KERN_DEBUG, "eh_abort: No such device\n"); + if (!dcb) return FAILED; - } srb = find_cmd(cmd, &dcb->srb_waiting_list); if (srb) { @@ -1232,16 +1050,12 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd) pci_unmap_srb(acb, srb); free_tag(dcb, srb); list_add_tail(&srb->list, &acb->srb_free_list); - dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n"); set_host_byte(cmd, DID_ABORT); return SUCCESS; } srb = find_cmd(cmd, &dcb->srb_going_list); if (srb) { - dprintkl(KERN_DEBUG, "eh_abort: Command in progress\n"); /* XXX: Should abort the command here */ - } else { - dprintkl(KERN_DEBUG, "eh_abort: Command not found\n"); } return FAILED; } @@ -1253,10 +1067,6 @@ static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, { u8 *ptr = srb->msgout_buf + srb->msg_count; if (srb->msg_count > 1) { - dprintkl(KERN_INFO, - "build_sdtr: msgout_buf BUSY (%i: %02x %02x)\n", - srb->msg_count, srb->msgout_buf[0], - srb->msgout_buf[1]); return; } if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) { @@ -1278,13 +1088,9 @@ static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) & (acb->config & HCC_WIDE_CARD)) ? 1 : 0; u8 *ptr = srb->msgout_buf + srb->msg_count; - if (srb->msg_count > 1) { - dprintkl(KERN_INFO, - "build_wdtr: msgout_buf BUSY (%i: %02x %02x)\n", - srb->msg_count, srb->msgout_buf[0], - srb->msgout_buf[1]); + if (srb->msg_count > 1) return; - } + srb->msg_count += spi_populate_width_msg(ptr, wide); srb->state |= SRB_DO_WIDE_NEGO; } @@ -1316,11 +1122,9 @@ void selection_timeout_missed(unsigned long ptr) unsigned long flags; struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr; struct ScsiReqBlk *srb; - dprintkl(KERN_DEBUG, "Chip forgot to produce SelTO IRQ!\n"); - if (!acb->active_dcb || !acb->active_dcb->active_srb) { - dprintkl(KERN_DEBUG, "... but no cmd pending? Oops!\n"); + if (!acb->active_dcb || !acb->active_dcb->active_srb) return; - } + DC395x_LOCK_IO(acb->scsi_host, flags); srb = acb->active_dcb->active_srb; disconnect(acb); @@ -1335,8 +1139,6 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, u16 __maybe_unused s_stat2, return_code; u8 s_stat, scsicommand, i, identify_message; u8 *ptr; - dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n", - dcb->target_id, dcb->target_lun, srb); srb->tag_number = TAG_NONE; /* acb->tag_max_num: had error read in eeprom */ @@ -1345,8 +1147,6 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS); #if 1 if (s_stat & 0x20 /* s_stat2 & 0x02000 */ ) { - dprintkdbg(DBG_KG, "start_scsi: (0x%p) BUSY %02x %04x\n", - s_stat, s_stat2); /* * Try anyway? * @@ -1361,24 +1161,16 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, return 1; } #endif - if (acb->active_dcb) { - dprintkl(KERN_DEBUG, "start_scsi: (0x%p) Attempt to start a" - "command while another command (0x%p) is active.", - srb->cmd, - acb->active_dcb->active_srb ? - acb->active_dcb->active_srb->cmd : NULL); + if (acb->active_dcb) return 1; - } - if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) { - dprintkdbg(DBG_KG, "start_scsi: (0x%p) Failed (busy)\n", srb->cmd); + + if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) return 1; - } + /* Allow starting of SCSI commands half a second before we allow the mid-level * to queue them again after a reset */ - if (time_before(jiffies, acb->last_reset - HZ / 2)) { - dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n"); + if (time_before(jiffies, acb->last_reset - HZ / 2)) return 1; - } /* Flush FIFO */ clear_fifo(acb, "start_scsi"); @@ -1442,10 +1234,6 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, tag_number++; } if (tag_number >= dcb->max_command) { - dprintkl(KERN_WARNING, "start_scsi: (0x%p) " - "Out of tags target=<%02i-%i>)\n", - srb->cmd, srb->cmd->device->id, - (u8)srb->cmd->device->lun); srb->state = SRB_READY; DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT); @@ -1462,9 +1250,6 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, #endif /*polling:*/ /* Send CDB ..command block ......... */ - dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n", - srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun, - srb->cmd->cmnd[0], srb->tag_number); if (srb->flag & AUTO_REQSENSE) { DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE); DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5)); @@ -1486,8 +1271,6 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, * we caught an interrupt (must be reset or reselection ... ) * : Let's process it first! */ - dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> Failed - busy\n", - srb->cmd, dcb->target_id, dcb->target_lun); srb->state = SRB_READY; free_tag(dcb, srb); srb->msg_count = 0; @@ -1551,14 +1334,6 @@ static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb, /* This acknowledges the IRQ */ scsi_intstatus = DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS); - if ((scsi_status & 0x2007) == 0x2002) - dprintkl(KERN_DEBUG, - "COP after COP completed? %04x\n", scsi_status); - if (debug_enabled(DBG_KG)) { - if (scsi_intstatus & INT_SELTIMEOUT) - dprintkdbg(DBG_KG, "handle_interrupt: Selection timeout\n"); - } - /*dprintkl(KERN_DEBUG, "handle_interrupt: intstatus = 0x%02x ", scsi_intstatus); */ if (timer_pending(&acb->selto_timer)) timer_delete(&acb->selto_timer); @@ -1571,27 +1346,21 @@ static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb, reselect(acb); goto out_unlock; } - if (scsi_intstatus & INT_SELECT) { - dprintkl(KERN_INFO, "Host does not support target mode!\n"); + if (scsi_intstatus & INT_SELECT) goto out_unlock; - } + if (scsi_intstatus & INT_SCSIRESET) { scsi_reset_detect(acb); goto out_unlock; } if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) { dcb = acb->active_dcb; - if (!dcb) { - dprintkl(KERN_DEBUG, - "Oops: BusService (%04x %02x) w/o ActiveDCB!\n", - scsi_status, scsi_intstatus); + if (!dcb) goto out_unlock; - } + srb = dcb->active_srb; - if (dcb->flag & ABORT_DEV_) { - dprintkdbg(DBG_0, "MsgOut Abort Device.....\n"); + if (dcb->flag & ABORT_DEV_) enable_msgout_abort(acb, srb); - } /* software sequential machine */ phase = (u16)srb->scsi_phase; @@ -1659,9 +1428,7 @@ static irqreturn_t dc395x_interrupt(int irq, void *dev_id) } else if (dma_status & 0x20) { /* Error from the DMA engine */ - dprintkl(KERN_INFO, "Interrupt from DMA engine: 0x%02x!\n", dma_status); #if 0 - dprintkl(KERN_INFO, "This means DMA error! Try to handle ...\n"); if (acb->active_dcb) { acb->active_dcb-> flag |= ABORT_DEV_; if (acb->active_dcb->active_srb) @@ -1669,7 +1436,6 @@ static irqreturn_t dc395x_interrupt(int irq, void *dev_id) } DC395x_write8(acb, TRM_S1040_DMA_CONTROL, ABORTXFER | CLRXFIFO); #else - dprintkl(KERN_INFO, "Ignoring DMA error (probably a bad thing) ...\n"); acb = NULL; #endif handled = IRQ_HANDLED; @@ -1682,7 +1448,6 @@ static irqreturn_t dc395x_interrupt(int irq, void *dev_id) static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "msgout_phase0: (0x%p)\n", srb->cmd); if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) *pscsi_status = PH_BUS_FREE; /*.. initial phase */ @@ -1696,18 +1461,12 @@ static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, { u16 i; u8 *ptr; - dprintkdbg(DBG_0, "msgout_phase1: (0x%p)\n", srb->cmd); clear_fifo(acb, "msgout_phase1"); - if (!(srb->state & SRB_MSGOUT)) { + if (!(srb->state & SRB_MSGOUT)) srb->state |= SRB_MSGOUT; - dprintkl(KERN_DEBUG, - "msgout_phase1: (0x%p) Phase unexpected\n", - srb->cmd); /* So what ? */ - } + if (!srb->msg_count) { - dprintkdbg(DBG_0, "msgout_phase1: (0x%p) NOP msg\n", - srb->cmd); DC395x_write8(acb, TRM_S1040_SCSI_FIFO, NOP); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ @@ -1728,7 +1487,6 @@ static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "command_phase0: (0x%p)\n", srb->cmd); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); } @@ -1739,7 +1497,6 @@ static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, struct DeviceCtlBlk *dcb; u8 *ptr; u16 i; - dprintkdbg(DBG_0, "command_phase1: (0x%p)\n", srb->cmd); clear_fifo(acb, "command_phase1"); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN); @@ -1768,26 +1525,6 @@ static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, /* - * Verify that the remaining space in the hw sg lists is the same as - * the count of remaining bytes in srb->total_xfer_length - */ -static void sg_verify_length(struct ScsiReqBlk *srb) -{ - if (debug_enabled(DBG_SG)) { - unsigned len = 0; - unsigned idx = srb->sg_index; - struct SGentry *psge = srb->segment_x + idx; - for (; idx < srb->sg_count; psge++, idx++) - len += psge->length; - if (len != srb->total_xfer_length) - dprintkdbg(DBG_SG, - "Inconsistent SRB S/G lengths (Tot=%i, Count=%i) !!\n", - srb->total_xfer_length, len); - } -} - - -/* * Compute the next Scatter Gather list index and adjust its length * and address if necessary */ @@ -1797,15 +1534,11 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left) u32 xferred = srb->total_xfer_length - left; /* bytes transferred */ struct SGentry *psge = srb->segment_x + srb->sg_index; - dprintkdbg(DBG_0, - "sg_update_list: Transferred %i of %i bytes, %i remain\n", - xferred, srb->total_xfer_length, left); if (xferred == 0) { /* nothing to update since we did not transfer any data */ return; } - sg_verify_length(srb); srb->total_xfer_length = left; /* update remaining count */ for (idx = srb->sg_index; idx < srb->sg_count; idx++) { if (xferred >= psge->length) { @@ -1826,7 +1559,6 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left) } psge++; } - sg_verify_length(srb); } @@ -1882,8 +1614,6 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, struct DeviceCtlBlk *dcb = srb->dcb; u16 scsi_status = *pscsi_status; u32 d_left_counter = 0; - dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n", - srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); /* * KG: We need to drain the buffers before we draw any conclusions! @@ -1897,14 +1627,6 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, * KG: Stop DMA engine pushing more data into the SCSI FIFO * If we need more data, the DMA SG list will be freshly set up, anyway */ - dprintkdbg(DBG_PIO, "data_out_phase0: " - "DMA{fifocnt=0x%02x fifostat=0x%02x} " - "SCSI{fifocnt=0x%02x cnt=0x%06x status=0x%04x} total=0x%06x\n", - DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT), - DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT), - DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT), - DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), scsi_status, - srb->total_xfer_length); DC395x_write8(acb, TRM_S1040_DMA_CONTROL, STOPDMAXFER | CLRXFIFO); if (!(srb->state & SRB_XFERPAD)) { @@ -1928,16 +1650,6 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, if (dcb->sync_period & WIDE_SYNC) d_left_counter <<= 1; - dprintkdbg(DBG_KG, "data_out_phase0: FIFO contains %i %s\n" - "SCSI{fifocnt=0x%02x cnt=0x%08x} " - "DMA{fifocnt=0x%04x cnt=0x%02x ctr=0x%08x}\n", - DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT), - (dcb->sync_period & WIDE_SYNC) ? "words" : "bytes", - DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT), - DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), - DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT), - DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT), - DC395x_read32(acb, TRM_S1040_DMA_CXCNT)); } /* * calculate all the residue data that not yet tranfered @@ -1958,9 +1670,6 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC && scsi_bufflen(srb->cmd) % 2) { d_left_counter = 0; - dprintkl(KERN_INFO, - "data_out_phase0: Discard 1 byte (0x%02x)\n", - scsi_status); } /* * KG: Oops again. Same thinko as above: The SCSI might have been @@ -1991,8 +1700,6 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, || ((oldxferred & ~PAGE_MASK) == (PAGE_SIZE - diff)) ) { - dprintkl(KERN_INFO, "data_out_phase0: " - "Work around chip bug (%i)?\n", diff); d_left_counter = srb->total_xfer_length - diff; sg_update_list(srb, d_left_counter); @@ -2003,17 +1710,14 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, } } } - if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT) { + if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT) cleanup_after_transfer(acb, srb); - } } static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n", - srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); clear_fifo(acb, "data_out_phase1"); /* do prepare before transfer when data out phase */ data_io_transfer(acb, srb, XFERDATAOUT); @@ -2024,8 +1728,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, { u16 scsi_status = *pscsi_status; - dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n", - srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); /* * KG: DataIn is much more tricky than DataOut. When the device is finished @@ -2045,8 +1747,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, unsigned int sc, fc; if (scsi_status & PARITYERROR) { - dprintkl(KERN_INFO, "data_in_phase0: (0x%p) " - "Parity Error\n", srb->cmd); srb->status |= PARITY_ERROR; } /* @@ -2058,26 +1758,14 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) { #if 0 int ctr = 6000000; - dprintkl(KERN_DEBUG, - "DIP0: Wait for DMA FIFO to flush ...\n"); /*DC395x_write8 (TRM_S1040_DMA_CONTROL, STOPDMAXFER); */ /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 7); */ /*DC395x_write8 (TRM_S1040_SCSI_COMMAND, SCMD_DMA_IN); */ while (! (DC395x_read16(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80) && --ctr); - if (ctr < 6000000 - 1) - dprintkl(KERN_DEBUG - "DIP0: Had to wait for DMA ...\n"); - if (!ctr) - dprintkl(KERN_ERR, - "Deadlock in DIP0 waiting for DMA FIFO empty!!\n"); /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 0); */ #endif - dprintkdbg(DBG_KG, "data_in_phase0: " - "DMA{fifocnt=0x%02x fifostat=0x%02x}\n", - DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT), - DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT)); } /* Now: Check remainig data: The SCSI counters should tell us ... */ sc = DC395x_read32(acb, TRM_S1040_SCSI_COUNTER); @@ -2085,17 +1773,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, d_left_counter = sc + ((fc & 0x1f) << ((srb->dcb->sync_period & WIDE_SYNC) ? 1 : 0)); - dprintkdbg(DBG_KG, "data_in_phase0: " - "SCSI{fifocnt=0x%02x%s ctr=0x%08x} " - "DMA{fifocnt=0x%02x fifostat=0x%02x ctr=0x%08x} " - "Remain{totxfer=%i scsi_fifo+ctr=%i}\n", - fc, - (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes", - sc, - fc, - DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT), - DC395x_read32(acb, TRM_S1040_DMA_CXCNT), - srb->total_xfer_length, d_left_counter); #if DC395x_LASTPIO /* KG: Less than or equal to 4 bytes can not be transferred via DMA, it seems. */ if (d_left_counter @@ -2104,12 +1781,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, /*u32 addr = (srb->segment_x[srb->sg_index].address); */ /*sg_update_list (srb, d_left_counter); */ - dprintkdbg(DBG_PIO, "data_in_phase0: PIO (%i %s) " - "for remaining %i bytes:", - fc & 0x1f, - (srb->dcb->sync_period & WIDE_SYNC) ? - "words" : "bytes", - srb->total_xfer_length); if (srb->dcb->sync_period & WIDE_SYNC) DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, CFG2_WIDEFIFO); @@ -2133,9 +1804,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); *virt++ = byte; - if (debug_enabled(DBG_PIO)) - printk(" %02x", byte); - d_left_counter--; sg_subtract_one(srb); @@ -2158,8 +1826,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, *virt++ = byte; srb->total_xfer_length--; - if (debug_enabled(DBG_PIO)) - printk(" %02x", byte); } DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0); @@ -2168,10 +1834,7 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, scsi_kunmap_atomic_sg(base); local_irq_restore(flags); } - /*printk(" %08x", *(u32*)(bus_to_virt (addr))); */ /*srb->total_xfer_length = 0; */ - if (debug_enabled(DBG_PIO)) - printk("\n"); } #endif /* DC395x_LASTPIO */ @@ -2207,9 +1870,6 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, TempDMAstatus = DC395x_read8(acb, TRM_S1040_DMA_STATUS); } while (!(TempDMAstatus & DMAXFERCOMP) && --ctr); - if (!ctr) - dprintkl(KERN_ERR, - "Deadlock in DataInPhase0 waiting for DMA!!\n"); srb->total_xfer_length = 0; #endif srb->total_xfer_length = d_left_counter; @@ -2226,17 +1886,14 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, } } /* KG: The target may decide to disconnect: Empty FIFO before! */ - if ((*pscsi_status & PHASEMASK) != PH_DATA_IN) { + if ((*pscsi_status & PHASEMASK) != PH_DATA_IN) cleanup_after_transfer(acb, srb); - } } static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n", - srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); data_io_transfer(acb, srb, XFERDATAIN); } @@ -2246,13 +1903,7 @@ static void data_io_transfer(struct AdapterCtlBlk *acb, { struct DeviceCtlBlk *dcb = srb->dcb; u8 bval; - dprintkdbg(DBG_0, - "data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n", - srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun, - ((io_dir & DMACMD_DIR) ? 'r' : 'w'), - srb->total_xfer_length, srb->sg_index, srb->sg_count); - if (srb == acb->tmp_srb) - dprintkl(KERN_ERR, "data_io_transfer: Using tmp_srb!\n"); + if (srb->sg_index >= srb->sg_count) { /* can't happen? out of bounds error */ return; @@ -2265,9 +1916,6 @@ static void data_io_transfer(struct AdapterCtlBlk *acb, * Maybe, even ABORTXFER would be appropriate */ if (dma_status & XFERPENDING) { - dprintkl(KERN_DEBUG, "data_io_transfer: Xfer pending! " - "Expect trouble!\n"); - dump_register_info(acb, dcb, srb); DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO); } /* clear_fifo(acb, "IO"); */ @@ -2346,9 +1994,6 @@ static void data_io_transfer(struct AdapterCtlBlk *acb, left_io -= len; while (len--) { - if (debug_enabled(DBG_PIO)) - printk(" %02x", *virt); - DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *virt++); sg_subtract_one(srb); @@ -2360,14 +2005,10 @@ static void data_io_transfer(struct AdapterCtlBlk *acb, if (srb->dcb->sync_period & WIDE_SYNC) { if (ln % 2) { DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0); - if (debug_enabled(DBG_PIO)) - printk(" |00"); } DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0); } /*DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, ln); */ - if (debug_enabled(DBG_PIO)) - printk("\n"); DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT); } @@ -2419,8 +2060,6 @@ static void data_io_transfer(struct AdapterCtlBlk *acb, static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n", - srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */ srb->state = SRB_COMPLETED; @@ -2433,8 +2072,6 @@ static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n", - srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun); srb->state = SRB_STATUS; DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP); @@ -2464,9 +2101,6 @@ static inline void msgin_reject(struct AdapterCtlBlk *acb, DC395x_ENABLE_MSGOUT; srb->state &= ~SRB_MSGIN; srb->state |= SRB_MSGOUT; - dprintkl(KERN_INFO, "msgin_reject: 0x%02x <%02i-%i>\n", - srb->msgin_buf[0], - srb->dcb->target_id, srb->dcb->target_lun); } @@ -2475,13 +2109,6 @@ static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb, { struct ScsiReqBlk *srb = NULL; struct ScsiReqBlk *i; - dprintkdbg(DBG_0, "msgin_qtag: (0x%p) tag=%i srb=%p\n", - srb->cmd, tag, srb); - - if (!(dcb->tag_mask & (1 << tag))) - dprintkl(KERN_DEBUG, - "msgin_qtag: tag_mask=0x%08x does not reserve tag %i!\n", - dcb->tag_mask, tag); if (list_empty(&dcb->srb_going_list)) goto mingx0; @@ -2494,8 +2121,6 @@ static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb, if (!srb) goto mingx0; - dprintkdbg(DBG_0, "msgin_qtag: (0x%p) <%02i-%i>\n", - srb->cmd, srb->dcb->target_id, srb->dcb->target_lun); if (dcb->flag & ABORT_DEV_) { /*srb->state = SRB_ABORT_SENT; */ enable_msgout_abort(acb, srb); @@ -2518,7 +2143,6 @@ static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb, srb->msgout_buf[0] = ABORT_TASK; srb->msg_count = 1; DC395x_ENABLE_MSGOUT; - dprintkl(KERN_DEBUG, "msgin_qtag: Unknown tag %i - abort\n", tag); return srb; } @@ -2537,8 +2161,6 @@ static inline void reprogram_regs(struct AdapterCtlBlk *acb, static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) { struct DeviceCtlBlk *dcb = srb->dcb; - dprintkl(KERN_DEBUG, "msgin_set_async: No sync transfers <%02i-%i>\n", - dcb->target_id, dcb->target_lun); dcb->sync_mode &= ~(SYNC_NEGO_ENABLE); dcb->sync_mode |= SYNC_NEGO_DONE; @@ -2551,7 +2173,6 @@ static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) && !(dcb->sync_mode & WIDE_NEGO_DONE)) { build_wdtr(acb, dcb, srb); DC395x_ENABLE_MSGOUT; - dprintkdbg(DBG_0, "msgin_set_async(rej): Try WDTR anyway\n"); } } @@ -2562,12 +2183,6 @@ static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) struct DeviceCtlBlk *dcb = srb->dcb; u8 bval; int fact; - dprintkdbg(DBG_1, "msgin_set_sync: <%02i> Sync: %ins " - "(%02i.%01i MHz) Offset %i\n", - dcb->target_id, srb->msgin_buf[3] << 2, - (250 / srb->msgin_buf[3]), - ((250 % srb->msgin_buf[3]) * 10) / srb->msgin_buf[3], - srb->msgin_buf[4]); if (srb->msgin_buf[4] > 15) srb->msgin_buf[4] = 15; @@ -2584,10 +2199,7 @@ static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) || dcb->min_nego_period > clock_period[bval])) bval++; - if (srb->msgin_buf[3] < clock_period[bval]) - dprintkl(KERN_INFO, - "msgin_set_sync: Increase sync nego period to %ins\n", - clock_period[bval] << 2); + srb->msgin_buf[3] = clock_period[bval]; dcb->sync_period &= 0xf0; dcb->sync_period |= ALT_SYNC | bval; @@ -2598,18 +2210,8 @@ static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) else fact = 250; - dprintkl(KERN_INFO, - "Target %02i: %s Sync: %ins Offset %i (%02i.%01i MB/s)\n", - dcb->target_id, (fact == 500) ? "Wide16" : "", - dcb->min_nego_period << 2, dcb->sync_offset, - (fact / dcb->min_nego_period), - ((fact % dcb->min_nego_period) * 10 + - dcb->min_nego_period / 2) / dcb->min_nego_period); - if (!(srb->state & SRB_DO_SYNC_NEGO)) { /* Reply with corrected SDTR Message */ - dprintkl(KERN_DEBUG, "msgin_set_sync: answer w/%ins %i\n", - srb->msgin_buf[3] << 2, srb->msgin_buf[4]); memcpy(srb->msgout_buf, srb->msgin_buf, 5); srb->msg_count = 5; @@ -2620,7 +2222,6 @@ static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) && !(dcb->sync_mode & WIDE_NEGO_DONE)) { build_wdtr(acb, dcb, srb); DC395x_ENABLE_MSGOUT; - dprintkdbg(DBG_0, "msgin_set_sync: Also try WDTR\n"); } } srb->state &= ~SRB_DO_SYNC_NEGO; @@ -2634,7 +2235,6 @@ static inline void msgin_set_nowide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) { struct DeviceCtlBlk *dcb = srb->dcb; - dprintkdbg(DBG_1, "msgin_set_nowide: <%02i>\n", dcb->target_id); dcb->sync_period &= ~WIDE_SYNC; dcb->sync_mode &= ~(WIDE_NEGO_ENABLE); @@ -2645,7 +2245,6 @@ static inline void msgin_set_nowide(struct AdapterCtlBlk *acb, && !(dcb->sync_mode & SYNC_NEGO_DONE)) { build_sdtr(acb, dcb, srb); DC395x_ENABLE_MSGOUT; - dprintkdbg(DBG_0, "msgin_set_nowide: Rejected. Try SDTR anyway\n"); } } @@ -2654,15 +2253,11 @@ static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) struct DeviceCtlBlk *dcb = srb->dcb; u8 wide = (dcb->dev_mode & NTC_DO_WIDE_NEGO && acb->config & HCC_WIDE_CARD) ? 1 : 0; - dprintkdbg(DBG_1, "msgin_set_wide: <%02i>\n", dcb->target_id); if (srb->msgin_buf[3] > wide) srb->msgin_buf[3] = wide; /* Completed */ if (!(srb->state & SRB_DO_WIDE_NEGO)) { - dprintkl(KERN_DEBUG, - "msgin_set_wide: Wide nego initiated <%02i>\n", - dcb->target_id); memcpy(srb->msgout_buf, srb->msgin_buf, 4); srb->msg_count = 4; srb->state |= SRB_DO_WIDE_NEGO; @@ -2676,15 +2271,11 @@ static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) dcb->sync_period &= ~WIDE_SYNC; srb->state &= ~SRB_DO_WIDE_NEGO; /*dcb->sync_mode &= ~(WIDE_NEGO_ENABLE+WIDE_NEGO_DONE); */ - dprintkdbg(DBG_1, - "msgin_set_wide: Wide (%i bit) negotiated <%02i>\n", - (8 << srb->msgin_buf[3]), dcb->target_id); reprogram_regs(acb, dcb); if ((dcb->sync_mode & SYNC_NEGO_ENABLE) && !(dcb->sync_mode & SYNC_NEGO_DONE)) { build_sdtr(acb, dcb, srb); DC395x_ENABLE_MSGOUT; - dprintkdbg(DBG_0, "msgin_set_wide: Also try SDTR.\n"); } } @@ -2705,7 +2296,6 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { struct DeviceCtlBlk *dcb = acb->active_dcb; - dprintkdbg(DBG_0, "msgin_phase0: (0x%p)\n", srb->cmd); srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); if (msgin_completed(srb->msgin_buf, acb->msg_len)) { @@ -2759,7 +2349,6 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, case IGNORE_WIDE_RESIDUE: /* Discard wide residual */ - dprintkdbg(DBG_0, "msgin_phase0: Ignore Wide Residual!\n"); break; case COMMAND_COMPLETE: @@ -2771,20 +2360,12 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, * SAVE POINTER may be ignored as we have the struct * ScsiReqBlk* associated with the scsi command. */ - dprintkdbg(DBG_0, "msgin_phase0: (0x%p) " - "SAVE POINTER rem=%i Ignore\n", - srb->cmd, srb->total_xfer_length); break; case RESTORE_POINTERS: - dprintkdbg(DBG_0, "msgin_phase0: RESTORE POINTER. Ignore\n"); break; case ABORT: - dprintkdbg(DBG_0, "msgin_phase0: (0x%p) " - "<%02i-%i> ABORT msg\n", - srb->cmd, dcb->target_id, - dcb->target_lun); dcb->flag |= ABORT_DEV_; enable_msgout_abort(acb, srb); break; @@ -2792,7 +2373,6 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, default: /* reject unknown messages */ if (srb->msgin_buf[0] & IDENTIFY_BASE) { - dprintkdbg(DBG_0, "msgin_phase0: Identify msg\n"); srb->msg_count = 1; srb->msgout_buf[0] = dcb->identify_msg; DC395x_ENABLE_MSGOUT; @@ -2815,7 +2395,6 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, u16 *pscsi_status) { - dprintkdbg(DBG_0, "msgin_phase1: (0x%p)\n", srb->cmd); clear_fifo(acb, "msgin_phase1"); DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1); if (!(srb->state & SRB_MSGIN)) { @@ -2869,7 +2448,6 @@ static void disconnect(struct AdapterCtlBlk *acb) struct ScsiReqBlk *srb; if (!dcb) { - dprintkl(KERN_ERR, "disconnect: No such device\n"); udelay(500); /* Suspend queue for a while */ acb->last_reset = @@ -2881,21 +2459,16 @@ static void disconnect(struct AdapterCtlBlk *acb) } srb = dcb->active_srb; acb->active_dcb = NULL; - dprintkdbg(DBG_0, "disconnect: (0x%p)\n", srb->cmd); srb->scsi_phase = PH_BUS_FREE; /* initial phase */ clear_fifo(acb, "disconnect"); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT); if (srb->state & SRB_UNEXPECT_RESEL) { - dprintkl(KERN_ERR, - "disconnect: Unexpected reselection <%02i-%i>\n", - dcb->target_id, dcb->target_lun); srb->state = 0; waiting_process_next(acb); } else if (srb->state & SRB_ABORT_SENT) { dcb->flag &= ~ABORT_DEV_; acb->last_reset = jiffies + HZ / 2 + 1; - dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n"); doing_srb_done(acb, DID_ABORT, srb->cmd, 1); waiting_process_next(acb); } else { @@ -2910,16 +2483,10 @@ static void disconnect(struct AdapterCtlBlk *acb) if (srb->state != SRB_START_ && srb->state != SRB_MSGOUT) { srb->state = SRB_READY; - dprintkl(KERN_DEBUG, - "disconnect: (0x%p) Unexpected\n", - srb->cmd); srb->target_status = SCSI_STAT_SEL_TIMEOUT; goto disc1; } else { /* Normal selection timeout */ - dprintkdbg(DBG_KG, "disconnect: (0x%p) " - "<%02i-%i> SelTO\n", srb->cmd, - dcb->target_id, dcb->target_lun); if (srb->retry_count++ > DC395x_MAX_RETRIES || acb->scan_devices) { srb->target_status = @@ -2928,9 +2495,6 @@ static void disconnect(struct AdapterCtlBlk *acb) } free_tag(dcb, srb); list_move(&srb->list, &dcb->srb_waiting_list); - dprintkdbg(DBG_KG, - "disconnect: (0x%p) Retry\n", - srb->cmd); waiting_set_timer(acb, HZ / 20); } } else if (srb->state & SRB_DISCONNECT) { @@ -2939,9 +2503,6 @@ static void disconnect(struct AdapterCtlBlk *acb) * SRB_DISCONNECT (This is what we expect!) */ if (bval & 0x40) { - dprintkdbg(DBG_0, "disconnect: SCSI bus stat " - " 0x%02x: ACK set! Other controllers?\n", - bval); /* It could come from another initiator, therefore don't do much ! */ } else waiting_process_next(acb); @@ -2965,7 +2526,6 @@ static void reselect(struct AdapterCtlBlk *acb) struct ScsiReqBlk *srb = NULL; u16 rsel_tar_lun_id; u8 id, lun; - dprintkdbg(DBG_0, "reselect: acb=%p\n", acb); clear_fifo(acb, "reselect"); /*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT | DO_DATALATCH); */ @@ -2974,18 +2534,11 @@ static void reselect(struct AdapterCtlBlk *acb) if (dcb) { /* Arbitration lost but Reselection win */ srb = dcb->active_srb; if (!srb) { - dprintkl(KERN_DEBUG, "reselect: Arb lost Resel won, " - "but active_srb == NULL\n"); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ return; } /* Why the if ? */ if (!acb->scan_devices) { - dprintkdbg(DBG_KG, "reselect: (0x%p) <%02i-%i> " - "Arb lost but Resel win rsel=%i stat=0x%04x\n", - srb->cmd, dcb->target_id, - dcb->target_lun, rsel_tar_lun_id, - DC395x_read16(acb, TRM_S1040_SCSI_STATUS)); /*srb->state |= SRB_DISCONNECT; */ srb->state = SRB_READY; @@ -2997,25 +2550,15 @@ static void reselect(struct AdapterCtlBlk *acb) } } /* Read Reselected Target Id and LUN */ - if (!(rsel_tar_lun_id & (IDENTIFY_BASE << 8))) - dprintkl(KERN_DEBUG, "reselect: Expects identify msg. " - "Got %i!\n", rsel_tar_lun_id); id = rsel_tar_lun_id & 0xff; lun = (rsel_tar_lun_id >> 8) & 7; dcb = find_dcb(acb, id, lun); if (!dcb) { - dprintkl(KERN_ERR, "reselect: From non existent device " - "<%02i-%i>\n", id, lun); DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */ return; } acb->active_dcb = dcb; - if (!(dcb->dev_mode & NTC_DO_DISCONNECT)) - dprintkl(KERN_DEBUG, "reselect: in spite of forbidden " - "disconnection? <%02i-%i>\n", - dcb->target_id, dcb->target_lun); - if (dcb->sync_mode & EN_TAG_QUEUEING) { srb = acb->tmp_srb; dcb->active_srb = srb; @@ -3026,9 +2569,6 @@ static void reselect(struct AdapterCtlBlk *acb) /* * abort command */ - dprintkl(KERN_DEBUG, - "reselect: w/o disconnected cmds <%02i-%i>\n", - dcb->target_id, dcb->target_lun); srb = acb->tmp_srb; srb->state = SRB_UNEXPECT_RESEL; dcb->active_srb = srb; @@ -3045,7 +2585,6 @@ static void reselect(struct AdapterCtlBlk *acb) srb->scsi_phase = PH_BUS_FREE; /* initial phase */ /* Program HA ID, target ID, period and offset */ - dprintkdbg(DBG_0, "reselect: select <%i>\n", dcb->target_id); DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id); /* host ID */ DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id); /* target ID */ DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset); /* offset */ @@ -3111,12 +2650,8 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) if (scsi_sg_count(cmd) && dir != DMA_NONE) { /* unmap DC395x SG list */ - dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n", - srb->sg_bus_addr, SEGMENTX_LEN); dma_unmap_single(&acb->dev->dev, srb->sg_bus_addr, SEGMENTX_LEN, DMA_TO_DEVICE); - dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n", - scsi_sg_count(cmd), scsi_bufflen(cmd)); /* unmap the sg segments */ scsi_dma_unmap(cmd); } @@ -3130,8 +2665,6 @@ static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb, if (!(srb->flag & AUTO_REQSENSE)) return; /* Unmap sense buffer */ - dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n", - srb->segment_x[0].address); dma_unmap_single(&acb->dev->dev, srb->segment_x[0].address, srb->segment_x[0].length, DMA_FROM_DEVICE); /* Restore SG stuff */ @@ -3155,16 +2688,10 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, enum dma_data_direction dir = cmd->sc_data_direction; int ckc_only = 1; - dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd, - srb->cmd->device->id, (u8)srb->cmd->device->lun); - dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n", - srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count, - scsi_sgtalbe(cmd)); status = srb->target_status; set_host_byte(cmd, DID_OK); set_status_byte(cmd, SAM_STAT_GOOD); if (srb->flag & AUTO_REQSENSE) { - dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n"); pci_unmap_srb_sense(acb, srb); /* ** target status.......................... @@ -3172,57 +2699,11 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, srb->flag &= ~AUTO_REQSENSE; srb->adapter_status = 0; srb->target_status = SAM_STAT_CHECK_CONDITION; - if (debug_enabled(DBG_1)) { - switch (cmd->sense_buffer[2] & 0x0f) { - case NOT_READY: - dprintkl(KERN_DEBUG, - "ReqSense: NOT_READY cmnd=0x%02x <%02i-%i> stat=%i scan=%i ", - cmd->cmnd[0], dcb->target_id, - dcb->target_lun, status, acb->scan_devices); - break; - case UNIT_ATTENTION: - dprintkl(KERN_DEBUG, - "ReqSense: UNIT_ATTENTION cmnd=0x%02x <%02i-%i> stat=%i scan=%i ", - cmd->cmnd[0], dcb->target_id, - dcb->target_lun, status, acb->scan_devices); - break; - case ILLEGAL_REQUEST: - dprintkl(KERN_DEBUG, - "ReqSense: ILLEGAL_REQUEST cmnd=0x%02x <%02i-%i> stat=%i scan=%i ", - cmd->cmnd[0], dcb->target_id, - dcb->target_lun, status, acb->scan_devices); - break; - case MEDIUM_ERROR: - dprintkl(KERN_DEBUG, - "ReqSense: MEDIUM_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ", - cmd->cmnd[0], dcb->target_id, - dcb->target_lun, status, acb->scan_devices); - break; - case HARDWARE_ERROR: - dprintkl(KERN_DEBUG, - "ReqSense: HARDWARE_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ", - cmd->cmnd[0], dcb->target_id, - dcb->target_lun, status, acb->scan_devices); - break; - } - if (cmd->sense_buffer[7] >= 6) - printk("sense=0x%02x ASC=0x%02x ASCQ=0x%02x " - "(0x%08x 0x%08x)\n", - cmd->sense_buffer[2], cmd->sense_buffer[12], - cmd->sense_buffer[13], - *((unsigned int *)(cmd->sense_buffer + 3)), - *((unsigned int *)(cmd->sense_buffer + 8))); - else - printk("sense=0x%02x No ASC/ASCQ (0x%08x)\n", - cmd->sense_buffer[2], - *((unsigned int *)(cmd->sense_buffer + 3))); - } if (status == SAM_STAT_CHECK_CONDITION) { set_host_byte(cmd, DID_BAD_TARGET); goto ckc_e; } - dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n"); set_status_byte(cmd, SAM_STAT_CHECK_CONDITION); @@ -3239,8 +2720,6 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, return; } else if (status == SAM_STAT_TASK_SET_FULL) { tempcnt = (u8)list_size(&dcb->srb_going_list); - dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n", - dcb->target_id, dcb->target_lun, tempcnt); if (tempcnt > 1) tempcnt--; dcb->max_command = tempcnt; @@ -3314,21 +2793,10 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, /* Here is the info for Doug Gilbert's sg3 ... */ scsi_set_resid(cmd, srb->total_xfer_length); - if (debug_enabled(DBG_KG)) { - if (srb->total_xfer_length) - dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> " - "cmnd=0x%02x Missed %i bytes\n", - cmd, cmd->device->id, (u8)cmd->device->lun, - cmd->cmnd[0], srb->total_xfer_length); - } if (srb != acb->tmp_srb) { /* Add to free list */ - dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n", - cmd, cmd->result); list_move_tail(&srb->list, &acb->srb_free_list); - } else { - dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n"); } scsi_done(cmd); @@ -3341,7 +2809,6 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag, struct scsi_cmnd *cmd, u8 force) { struct DeviceCtlBlk *dcb; - dprintkl(KERN_INFO, "doing_srb_done: pids "); list_for_each_entry(dcb, &acb->dcb_list, list) { struct ScsiReqBlk *srb; @@ -3365,15 +2832,6 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag, scsi_done(p); } } - if (!list_empty(&dcb->srb_going_list)) - dprintkl(KERN_DEBUG, - "How could the ML send cmnds to the Going queue? <%02i-%i>\n", - dcb->target_id, dcb->target_lun); - if (dcb->tag_mask) - dprintkl(KERN_DEBUG, - "tag_mask for <%02i-%i> should be empty, is %08x!\n", - dcb->target_id, dcb->target_lun, - dcb->tag_mask); /* Waiting queue */ list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) { @@ -3392,19 +2850,13 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag, scsi_done(cmd); } } - if (!list_empty(&dcb->srb_waiting_list)) - dprintkl(KERN_DEBUG, "ML queued %i cmnds again to <%02i-%i>\n", - list_size(&dcb->srb_waiting_list), dcb->target_id, - dcb->target_lun); dcb->flag &= ~ABORT_DEV_; } - printk("\n"); } static void reset_scsi_bus(struct AdapterCtlBlk *acb) { - dprintkdbg(DBG_0, "reset_scsi_bus: acb=%p\n", acb); acb->acb_flag |= RESET_DEV; /* RESET_DETECT, RESET_DONE, RESET_DEV */ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI); @@ -3451,7 +2903,6 @@ static void set_basic_config(struct AdapterCtlBlk *acb) static void scsi_reset_detect(struct AdapterCtlBlk *acb) { - dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb); /* delay half a second */ if (timer_pending(&acb->waiting_timer)) timer_delete(&acb->waiting_timer); @@ -3488,8 +2939,6 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb) { struct scsi_cmnd *cmd = srb->cmd; - dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n", - cmd, cmd->device->id, (u8)cmd->device->lun); srb->flag |= AUTO_REQSENSE; srb->adapter_status = 0; @@ -3511,16 +2960,10 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, srb->segment_x[0].address = dma_map_single(&acb->dev->dev, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); - dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n", - cmd->sense_buffer, srb->segment_x[0].address, - SCSI_SENSE_BUFFERSIZE); srb->sg_count = 1; srb->sg_index = 0; if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */ - dprintkl(KERN_DEBUG, - "request_sense: (0x%p) failed <%02i-%i>\n", - srb->cmd, dcb->target_id, dcb->target_lun); list_move(&srb->list, &dcb->srb_waiting_list); waiting_set_timer(acb, HZ / 100); } @@ -3548,7 +2991,6 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb; dcb = kmalloc(sizeof(struct DeviceCtlBlk), GFP_ATOMIC); - dprintkdbg(DBG_0, "device_alloc: <%02i-%i>\n", target, lun); if (!dcb) return NULL; dcb->acb = NULL; @@ -3598,10 +3040,6 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb, return NULL; } - dprintkdbg(DBG_1, - "device_alloc: <%02i-%i> copy from <%02i-%i>\n", - dcb->target_id, dcb->target_lun, - p->target_id, p->target_lun); dcb->sync_mode = p->sync_mode; dcb->sync_period = p->sync_period; dcb->min_nego_period = p->min_nego_period; @@ -3651,8 +3089,6 @@ static void adapter_remove_device(struct AdapterCtlBlk *acb, { struct DeviceCtlBlk *i; struct DeviceCtlBlk *tmp; - dprintkdbg(DBG_0, "adapter_remove_device: <%02i-%i>\n", - dcb->target_id, dcb->target_lun); /* fix up any pointers to this device that we have in the adapter */ if (acb->active_dcb == dcb) @@ -3685,10 +3121,6 @@ static void adapter_remove_and_free_device(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb) { if (list_size(&dcb->srb_going_list) > 1) { - dprintkdbg(DBG_1, "adapter_remove_and_free_device: <%02i-%i> " - "Won't remove because of %i active requests.\n", - dcb->target_id, dcb->target_lun, - list_size(&dcb->srb_going_list)); return; } adapter_remove_device(acb, dcb); @@ -3706,8 +3138,6 @@ static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb) { struct DeviceCtlBlk *dcb; struct DeviceCtlBlk *tmp; - dprintkdbg(DBG_1, "adapter_remove_and_free_all_devices: num=%i\n", - list_size(&acb->dcb_list)); list_for_each_entry_safe(dcb, tmp, &acb->dcb_list, list) adapter_remove_and_free_device(acb, dcb); @@ -4002,8 +3432,6 @@ static void check_eeprom(struct NvRamType *eeprom, unsigned long io_port) * Checksum is wrong. * Load a set of defaults into the eeprom buffer */ - dprintkl(KERN_WARNING, - "EEProm checksum error: using default values and options.\n"); eeprom->sub_vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM; eeprom->sub_vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8); eeprom->sub_sys_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040; @@ -4055,15 +3483,6 @@ static void check_eeprom(struct NvRamType *eeprom, unsigned long io_port) **/ static void print_eeprom_settings(struct NvRamType *eeprom) { - dprintkl(KERN_INFO, "Used settings: AdapterID=%02i, Speed=%i(%02i.%01iMHz), dev_mode=0x%02x\n", - eeprom->scsi_id, - eeprom->target[0].period, - clock_speed[eeprom->target[0].period] / 10, - clock_speed[eeprom->target[0].period] % 10, - eeprom->target[0].cfg0); - dprintkl(KERN_INFO, " AdaptMode=0x%02x, Tags=%i(%02i), DelayReset=%is\n", - eeprom->channel_cfg, eeprom->max_tag, - 1 << eeprom->max_tag, eeprom->delay_time); } @@ -4094,15 +3513,12 @@ static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb) for (i = 0; i < DC395x_MAX_SRB_CNT; i++) acb->srb_array[i].segment_x = NULL; - dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages); while (pages--) { ptr = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!ptr) { adapter_sg_tables_free(acb); return 1; } - dprintkdbg(DBG_1, "Allocate %li bytes at %p for SG segments %i\n", - PAGE_SIZE, ptr, srb_idx); i = 0; while (i < srbs_per_page && srb_idx < DC395x_MAX_SRB_CNT) acb->srb_array[srb_idx++].segment_x = @@ -4111,8 +3527,6 @@ static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb) if (i < srbs_per_page) acb->srb.segment_x = ptr + (i * DC395x_MAX_SG_LISTENTRY); - else - dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n"); return 0; } @@ -4132,8 +3546,6 @@ static void adapter_print_config(struct AdapterCtlBlk *acb) u8 bval; bval = DC395x_read8(acb, TRM_S1040_GEN_STATUS); - dprintkl(KERN_INFO, "%sConnectors: ", - ((bval & WIDESCSI) ? "(Wide) " : "")); if (!(bval & CON5068)) printk("ext%s ", !(bval & EXT68HIGH) ? "68" : "50"); if (!(bval & CON68)) @@ -4293,7 +3705,6 @@ static void adapter_init_chip(struct AdapterCtlBlk *acb) acb->config |= HCC_SCSI_RESET; if (acb->config & HCC_SCSI_RESET) { - dprintkl(KERN_INFO, "Performing initial SCSI bus reset\n"); DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI); /*while (!( DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET )); */ @@ -4327,7 +3738,6 @@ static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port, u32 io_port_len, unsigned int irq) { if (!request_region(io_port, io_port_len, DC395X_NAME)) { - dprintkl(KERN_ERR, "Failed to reserve IO region 0x%lx\n", io_port); goto failed; } /* store port base to indicate we have registered it */ @@ -4336,7 +3746,6 @@ static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port, if (request_irq(irq, dc395x_interrupt, IRQF_SHARED, DC395X_NAME, acb)) { /* release the region we just claimed */ - dprintkl(KERN_INFO, "Failed to register IRQ\n"); goto failed; } /* store irq to indicate we have registered it */ @@ -4353,18 +3762,12 @@ static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port, adapter_print_config(acb); if (adapter_sg_tables_alloc(acb)) { - dprintkl(KERN_DEBUG, "Memory allocation for SG tables failed\n"); goto failed; } adapter_init_scsi_host(acb->scsi_host); adapter_init_chip(acb); set_basic_config(acb); - dprintkdbg(DBG_0, - "adapter_init: acb=%p, pdcb_map=%p psrb_array=%p " - "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n", - acb, acb->dcb_map, acb->srb_array, sizeof(struct AdapterCtlBlk), - sizeof(struct DeviceCtlBlk), sizeof(struct ScsiReqBlk)); return 0; failed: @@ -4528,14 +3931,6 @@ static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host) seq_putc(m, '\n'); } - if (debug_enabled(DBG_1)) { - seq_printf(m, "DCB list for ACB %p:\n", acb); - list_for_each_entry(dcb, &acb->dcb_list, list) { - seq_printf(m, "%p -> ", dcb); - } - seq_puts(m, "END\n"); - } - DC395x_UNLOCK_IO(acb->scsi_host, flags); return 0; } @@ -4560,21 +3955,6 @@ static const struct scsi_host_template dc395x_driver_template = { /** - * banner_display - Display banner on first instance of driver - * initialized. - **/ -static void banner_display(void) -{ - static int banner_done = 0; - if (!banner_done) - { - dprintkl(KERN_INFO, "%s %s\n", DC395X_BANNER, DC395X_VERSION); - banner_done = 1; - } -} - - -/** * dc395x_init_one - Initialise a single instance of the adapter. * * The PCI layer will call this once for each instance of the adapter @@ -4595,33 +3975,25 @@ static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id) unsigned int io_port_len; unsigned int irq; - dprintkdbg(DBG_0, "Init one instance (%s)\n", pci_name(dev)); - banner_display(); - if (pci_enable_device(dev)) - { - dprintkl(KERN_INFO, "PCI Enable device failed.\n"); return -ENODEV; - } + io_port_base = pci_resource_start(dev, 0) & PCI_BASE_ADDRESS_IO_MASK; io_port_len = pci_resource_len(dev, 0); irq = dev->irq; - dprintkdbg(DBG_0, "IO_PORT=0x%04lx, IRQ=0x%x\n", io_port_base, dev->irq); /* allocate scsi host information (includes out adapter) */ scsi_host = scsi_host_alloc(&dc395x_driver_template, sizeof(struct AdapterCtlBlk)); - if (!scsi_host) { - dprintkl(KERN_INFO, "scsi_host_alloc failed\n"); + if (!scsi_host) goto fail; - } + acb = (struct AdapterCtlBlk*)scsi_host->hostdata; acb->scsi_host = scsi_host; acb->dev = dev; /* initialise the adapter and everything we need */ if (adapter_init(acb, io_port_base, io_port_len, irq)) { - dprintkl(KERN_INFO, "adapter init failed\n"); acb = NULL; goto fail; } @@ -4629,10 +4001,9 @@ static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id) pci_set_master(dev); /* get the scsi mid level to scan for new devices on the bus */ - if (scsi_add_host(scsi_host, &dev->dev)) { - dprintkl(KERN_ERR, "scsi_add_host failed\n"); + if (scsi_add_host(scsi_host, &dev->dev)) goto fail; - } + pci_set_drvdata(dev, scsi_host); scsi_scan_host(scsi_host); @@ -4659,8 +4030,6 @@ static void dc395x_remove_one(struct pci_dev *dev) struct Scsi_Host *scsi_host = pci_get_drvdata(dev); struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)(scsi_host->hostdata); - dprintkdbg(DBG_0, "dc395x_remove_one: acb=%p\n", acb); - scsi_remove_host(scsi_host); adapter_uninit(acb); pci_disable_device(dev); diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c index 5e7fb110bc3f..d9a231fc0e0d 100644 --- a/drivers/scsi/elx/libefc_sli/sli4.c +++ b/drivers/scsi/elx/libefc_sli/sli4.c @@ -3804,7 +3804,7 @@ sli_cmd_common_write_object(struct sli4 *sli4, void *buf, u16 noc, wr_obj->desired_write_len_dword = cpu_to_le32(dwflags); wr_obj->write_offset = cpu_to_le32(offset); - strncpy(wr_obj->object_name, obj_name, sizeof(wr_obj->object_name) - 1); + strscpy(wr_obj->object_name, obj_name); wr_obj->host_buffer_descriptor_count = cpu_to_le32(1); bde = (struct sli4_bde *)wr_obj->host_buffer_descriptor; @@ -3833,7 +3833,7 @@ sli_cmd_common_delete_object(struct sli4 *sli4, void *buf, char *obj_name) SLI4_SUBSYSTEM_COMMON, CMD_V0, SLI4_RQST_PYLD_LEN(cmn_delete_object)); - strncpy(req->object_name, obj_name, sizeof(req->object_name) - 1); + strscpy(req->object_name, obj_name); return 0; } @@ -3856,7 +3856,7 @@ sli_cmd_common_read_object(struct sli4 *sli4, void *buf, u32 desired_read_len, cpu_to_le32(desired_read_len & SLI4_REQ_DESIRE_READLEN); rd_obj->read_offset = cpu_to_le32(offset); - strncpy(rd_obj->object_name, obj_name, sizeof(rd_obj->object_name) - 1); + strscpy(rd_obj->object_name, obj_name); rd_obj->host_buffer_descriptor_count = cpu_to_le32(1); bde = (struct sli4_bde *)rd_obj->host_buffer_descriptor; diff --git a/drivers/scsi/fnic/fip.c b/drivers/scsi/fnic/fip.c index 6e7c0b00eb41..19395e2aee44 100644 --- a/drivers/scsi/fnic/fip.c +++ b/drivers/scsi/fnic/fip.c @@ -200,7 +200,7 @@ void fnic_fcoe_start_fcf_discovery(struct fnic *fnic) return; } - memset(iport->selected_fcf.fcf_mac, 0, ETH_ALEN); + eth_zero_addr(iport->selected_fcf.fcf_mac); pdisc_sol = (struct fip_discovery *) frame; *pdisc_sol = (struct fip_discovery) { @@ -588,12 +588,12 @@ void fnic_common_fip_cleanup(struct fnic *fnic) if (!is_zero_ether_addr(iport->fpma)) vnic_dev_del_addr(fnic->vdev, iport->fpma); - memset(iport->fpma, 0, ETH_ALEN); + eth_zero_addr(iport->fpma); iport->fcid = 0; iport->r_a_tov = 0; iport->e_d_tov = 0; - memset(fnic->iport.fcfmac, 0, ETH_ALEN); - memset(iport->selected_fcf.fcf_mac, 0, ETH_ALEN); + eth_zero_addr(fnic->iport.fcfmac); + eth_zero_addr(iport->selected_fcf.fcf_mac); iport->selected_fcf.fcf_priority = 0; iport->selected_fcf.fka_adv_period = 0; iport->selected_fcf.ka_disabled = 0; diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index e17f5d8226bf..1323ed8aa717 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -46,6 +46,13 @@ #define HISI_SAS_IOST_ITCT_CACHE_DW_SZ 10 #define HISI_SAS_FIFO_DATA_DW_SIZE 32 +#define HISI_SAS_REG_MEM_SIZE 4 +#define HISI_SAS_MAX_CDB_LEN 16 +#define HISI_SAS_BLK_QUEUE_DEPTH 64 + +#define BYTE_TO_DW 4 +#define BYTE_TO_DDW 8 + #define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer)) #define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table)) @@ -92,6 +99,8 @@ #define HISI_SAS_WAIT_PHYUP_TIMEOUT (30 * HZ) #define HISI_SAS_CLEAR_ITCT_TIMEOUT (20 * HZ) +#define HISI_SAS_DELAY_FOR_PHY_DISABLE 100 +#define NAME_BUF_SIZE 256 struct hisi_hba; @@ -167,6 +176,8 @@ struct hisi_sas_debugfs_fifo { u32 rd_data[HISI_SAS_FIFO_DATA_DW_SIZE]; }; +#define FRAME_RCVD_BUF 32 +#define SAS_PHY_RESV_SIZE 2 struct hisi_sas_phy { struct work_struct works[HISI_PHYES_NUM]; struct hisi_hba *hisi_hba; @@ -178,10 +189,10 @@ struct hisi_sas_phy { spinlock_t lock; u64 port_id; /* from hw */ u64 frame_rcvd_size; - u8 frame_rcvd[32]; + u8 frame_rcvd[FRAME_RCVD_BUF]; u8 phy_attached; u8 in_reset; - u8 reserved[2]; + u8 reserved[SAS_PHY_RESV_SIZE]; u32 phy_type; u32 code_violation_err_count; enum sas_linkrate minimum_linkrate; @@ -348,7 +359,8 @@ struct hisi_sas_hw { const struct scsi_host_template *sht; }; -#define HISI_SAS_MAX_DEBUGFS_DUMP (50) +#define HISI_SAS_MAX_DEBUGFS_DUMP 50 +#define HISI_SAS_DEFAULT_DEBUGFS_DUMP 1 struct hisi_sas_debugfs_cq { struct hisi_sas_cq *cq; @@ -448,12 +460,12 @@ struct hisi_hba { dma_addr_t sata_breakpoint_dma; struct hisi_sas_slot *slot_info; unsigned long flags; - const struct hisi_sas_hw *hw; /* Low level hw interface */ + const struct hisi_sas_hw *hw; /* Low level hw interface */ unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)]; struct work_struct rst_work; u32 phy_state; - u32 intr_coal_ticks; /* Time of interrupt coalesce in us */ - u32 intr_coal_count; /* Interrupt count to coalesce */ + u32 intr_coal_ticks; /* Time of interrupt coalesce in us */ + u32 intr_coal_count; /* Interrupt count to coalesce */ int cq_nvecs; @@ -528,12 +540,13 @@ struct hisi_sas_cmd_hdr { __le64 dif_prd_table_addr; }; +#define ITCT_RESV_DDW 12 struct hisi_sas_itct { __le64 qw0; __le64 sas_addr; __le64 qw2; __le64 qw3; - __le64 qw4_15[12]; + __le64 qw4_15[ITCT_RESV_DDW]; }; struct hisi_sas_iost { @@ -543,22 +556,26 @@ struct hisi_sas_iost { __le64 qw3; }; +#define ERROR_RECORD_BUF_DW 4 struct hisi_sas_err_record { - u32 data[4]; + u32 data[ERROR_RECORD_BUF_DW]; }; +#define FIS_RESV_DW 3 struct hisi_sas_initial_fis { struct hisi_sas_err_record err_record; struct dev_to_host_fis fis; - u32 rsvd[3]; + u32 rsvd[FIS_RESV_DW]; }; +#define BREAKPOINT_DATA_SIZE 128 struct hisi_sas_breakpoint { - u8 data[128]; + u8 data[BREAKPOINT_DATA_SIZE]; }; +#define BREAKPOINT_TAG_NUM 32 struct hisi_sas_sata_breakpoint { - struct hisi_sas_breakpoint tag[32]; + struct hisi_sas_breakpoint tag[BREAKPOINT_TAG_NUM]; }; struct hisi_sas_sge { @@ -569,13 +586,15 @@ struct hisi_sas_sge { __le32 data_off; }; +#define SMP_CMD_TABLE_SIZE 44 struct hisi_sas_command_table_smp { - u8 bytes[44]; + u8 bytes[SMP_CMD_TABLE_SIZE]; }; +#define DUMMY_BUF_SIZE 12 struct hisi_sas_command_table_stp { struct host_to_dev_fis command_fis; - u8 dummy[12]; + u8 dummy[DUMMY_BUF_SIZE]; u8 atapi_cdb[ATAPI_CDB_LEN]; }; @@ -589,12 +608,13 @@ struct hisi_sas_sge_dif_page { struct hisi_sas_sge sge[HISI_SAS_SGE_DIF_PAGE_CNT]; } __aligned(16); +#define PROT_BUF_SIZE 7 struct hisi_sas_command_table_ssp { struct ssp_frame_hdr hdr; union { struct { struct ssp_command_iu task; - u32 prot[7]; + u32 prot[PROT_BUF_SIZE]; }; struct ssp_tmf_iu ssp_task; struct xfer_rdy_iu xfer_rdy; @@ -608,9 +628,10 @@ union hisi_sas_command_table { struct hisi_sas_command_table_stp stp; } __aligned(16); +#define IU_BUF_SIZE 1024 struct hisi_sas_status_buffer { struct hisi_sas_err_record err; - u8 iu[1024]; + u8 iu[IU_BUF_SIZE]; } __aligned(16); struct hisi_sas_slot_buf_table { diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 944cf2fb0561..4864e957be0b 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -7,6 +7,16 @@ #include "hisi_sas.h" #define DRV_NAME "hisi_sas" +#define LINK_RATE_BIT_MASK 2 +#define FIS_BUF_SIZE 20 +#define WAIT_CMD_COMPLETE_DELAY 100 +#define WAIT_CMD_COMPLETE_TMROUT 5000 +#define DELAY_FOR_LINK_READY 2000 +#define BLK_CNT_OPTIMIZE_MARK 64 +#define HZ_TO_MHZ 1000000 +#define DELAY_FOR_SOFTRESET_MAX 1000 +#define DELAY_FOR_SOFTRESET_MIN 900 + #define DEV_IS_GONE(dev) \ ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) @@ -114,12 +124,10 @@ u8 hisi_sas_get_ata_protocol(struct sas_task *task) } default: - { if (direction == DMA_NONE) return HISI_SAS_SATA_PROTOCOL_NONDATA; return hisi_sas_get_ata_protocol_from_tf(qc); } - } } EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); @@ -131,7 +139,7 @@ void hisi_sas_sata_done(struct sas_task *task, struct hisi_sas_status_buffer *status_buf = hisi_sas_status_buf_addr_mem(slot); u8 *iu = &status_buf->iu[0]; - struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; + struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; resp->frame_len = sizeof(struct dev_to_host_fis); memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); @@ -151,7 +159,7 @@ u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) max -= SAS_LINK_RATE_1_5_GBPS; for (i = 0; i <= max; i++) - rate |= 1 << (i * 2); + rate |= 1 << (i * LINK_RATE_BIT_MASK); return rate; } EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); @@ -900,7 +908,7 @@ int hisi_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim) if (ret) return ret; if (!dev_is_sata(dev)) - sas_change_queue_depth(sdev, 64); + sas_change_queue_depth(sdev, HISI_SAS_BLK_QUEUE_DEPTH); return 0; } @@ -1262,7 +1270,7 @@ static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, sas_phy->phy->minimum_linkrate = min; hisi_sas_phy_enable(hisi_hba, phy_no, 0); - msleep(100); + msleep(HISI_SAS_DELAY_FOR_PHY_DISABLE); hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); hisi_sas_phy_enable(hisi_hba, phy_no, 1); @@ -1292,7 +1300,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, case PHY_FUNC_LINK_RESET: hisi_sas_phy_enable(hisi_hba, phy_no, 0); - msleep(100); + msleep(HISI_SAS_DELAY_FOR_PHY_DISABLE); hisi_sas_phy_enable(hisi_hba, phy_no, 1); break; @@ -1347,7 +1355,7 @@ static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, static int hisi_sas_softreset_ata_disk(struct domain_device *device) { - u8 fis[20] = {0}; + u8 fis[FIS_BUF_SIZE] = {0}; struct ata_port *ap = device->sata_dev.ap; struct ata_link *link; int rc = TMF_RESP_FUNC_FAILED; @@ -1364,7 +1372,7 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device) } if (rc == TMF_RESP_FUNC_COMPLETE) { - usleep_range(900, 1000); + usleep_range(DELAY_FOR_SOFTRESET_MIN, DELAY_FOR_SOFTRESET_MAX); ata_for_each_link(link, ap, EDGE) { int pmp = sata_srst_pmp(link); @@ -1494,7 +1502,7 @@ static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, struct device *dev = hisi_hba->dev; int rc = TMF_RESP_FUNC_FAILED; struct ata_link *link; - u8 fis[20] = {0}; + u8 fis[FIS_BUF_SIZE] = {0}; int i; for (i = 0; i < hisi_hba->n_phy; i++) { @@ -1561,7 +1569,9 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); scsi_block_requests(shost); - hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); + hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, + WAIT_CMD_COMPLETE_DELAY, + WAIT_CMD_COMPLETE_TMROUT); /* * hisi_hba->timer is only used for v1/v2 hw, and check hw->sht @@ -1862,7 +1872,7 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) rc = ata_wait_after_reset(link, jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT, smp_ata_check_ready_type); } else { - msleep(2000); + msleep(DELAY_FOR_LINK_READY); } return rc; @@ -1885,33 +1895,14 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device) } hisi_sas_dereg_device(hisi_hba, device); - rc = hisi_sas_debug_I_T_nexus_reset(device); - if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) { - struct sas_phy *local_phy; - + if (dev_is_sata(device)) { rc = hisi_sas_softreset_ata_disk(device); - switch (rc) { - case -ECOMM: - rc = -ENODEV; - break; - case TMF_RESP_FUNC_FAILED: - case -EMSGSIZE: - case -EIO: - local_phy = sas_get_local_phy(device); - rc = sas_phy_enable(local_phy, 0); - if (!rc) { - local_phy->enabled = 0; - dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n", - SAS_ADDR(device->sas_addr), rc); - rc = -ENODEV; - } - sas_put_local_phy(local_phy); - break; - default: - break; - } + if (rc == TMF_RESP_FUNC_FAILED) + dev_err(dev, "ata disk %016llx reset (%d)\n", + SAS_ADDR(device->sas_addr), rc); } + rc = hisi_sas_debug_I_T_nexus_reset(device); if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) hisi_sas_release_task(hisi_hba, device); @@ -1934,12 +1925,9 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) hisi_sas_dereg_device(hisi_hba, device); if (dev_is_sata(device)) { - struct sas_phy *phy; - - phy = sas_get_local_phy(device); + struct sas_phy *phy = sas_get_local_phy(device); rc = sas_phy_reset(phy, true); - if (rc == 0) hisi_sas_release_task(hisi_hba, device); sas_put_local_phy(phy); @@ -2123,7 +2111,7 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy, hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags); hisi_sas_port_notify_formed(sas_phy); } else { - struct hisi_sas_port *port = phy->port; + struct hisi_sas_port *port = phy->port; if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) || phy->in_reset) { @@ -2296,12 +2284,14 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba) goto err_out; /* roundup to avoid overly large block size */ - max_command_entries_ru = roundup(max_command_entries, 64); + max_command_entries_ru = roundup(max_command_entries, + BLK_CNT_OPTIMIZE_MARK); if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table); else sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); - sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); + + sz_slot_buf_ru = roundup(sz_slot_buf_ru, BLK_CNT_OPTIMIZE_MARK); s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE); blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; slots_per_blk = s / sz_slot_buf_ru; @@ -2466,7 +2456,8 @@ int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) if (IS_ERR(refclk)) dev_dbg(dev, "no ref clk property\n"); else - hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; + hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / + HZ_TO_MHZ; if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { dev_err(dev, "could not get property phy-count\n"); @@ -2588,7 +2579,7 @@ int hisi_sas_probe(struct platform_device *pdev, shost->max_id = HISI_SAS_MAX_DEVICES; shost->max_lun = ~0; shost->max_channel = 1; - shost->max_cmd_len = 16; + shost->max_cmd_len = HISI_SAS_MAX_CDB_LEN; if (hisi_hba->hw->slot_index_alloc) { shost->can_queue = HISI_SAS_MAX_COMMANDS; shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 6621d633b2cc..6d97339371fb 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -1759,7 +1759,7 @@ static const struct scsi_host_template sht_v1_hw = { .sg_tablesize = HISI_SAS_SGE_PAGE_CNT, .sdev_init = hisi_sas_sdev_init, .shost_groups = host_v1_hw_groups, - .host_reset = hisi_sas_host_reset, + .host_reset = hisi_sas_host_reset, }; static const struct hisi_sas_hw hisi_sas_v1_hw = { diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 1e9830940f84..2adfedb8484c 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -2771,7 +2771,7 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p) irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) >> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff; while (irq_msk) { - if (irq_msk & 1) { + if (irq_msk & 1) { u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); @@ -3111,7 +3111,7 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p) return IRQ_HANDLED; } -static irqreturn_t cq_thread_v2_hw(int irq_no, void *p) +static irqreturn_t cq_thread_v2_hw(int irq_no, void *p) { struct hisi_sas_cq *cq = p; struct hisi_hba *hisi_hba = cq->hisi_hba; @@ -3499,7 +3499,7 @@ static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type, * numbered drive in the fourth byte. * See SFF-8485 Rev. 0.7 Table 24. */ - void __iomem *reg_addr = hisi_hba->sgpio_regs + + void __iomem *reg_addr = hisi_hba->sgpio_regs + reg_index * 4 + phy_no; int data_idx = phy_no + 3 - (phy_no % 4) * 2; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 08dac9ae2f10..bc5d5356dd00 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -466,6 +466,12 @@ #define ITCT_HDR_RTOLT_OFF 48 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF) +/*debugfs*/ +#define TWO_PARA_PER_LINE 2 +#define FOUR_PARA_PER_LINE 4 +#define DUMP_BUF_SIZE 8 +#define BIST_BUF_SIZE 16 + struct hisi_sas_protect_iu_v3_hw { u32 dw0; u32 lbrtcv; @@ -536,6 +542,43 @@ struct hisi_sas_err_record_v3 { #define BASE_VECTORS_V3_HW 16 #define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1) +#define IRQ_PHY_UP_DOWN_INDEX 1 +#define IRQ_CHL_INDEX 2 +#define IRQ_AXI_INDEX 11 + +#define DELAY_FOR_RESET_HW 100 +#define HDR_SG_MOD 0x2 +#define LUN_SIZE 8 +#define ATTR_PRIO_REGION 9 +#define CDB_REGION 12 +#define PRIO_OFF 3 +#define TMF_REGION 10 +#define TAG_MSB 12 +#define TAG_LSB 13 +#define SMP_FRAME_TYPE 2 +#define SMP_CRC_SIZE 4 +#define HDR_TAG_OFF 3 +#define HOST_NO_OFF 6 +#define PHY_NO_OFF 7 +#define IDENTIFY_REG_READ 6 +#define LINK_RESET_TIMEOUT_OFF 4 +#define DECIMALISM_FLAG 10 +#define WAIT_RETRY 100 +#define WAIT_TMROUT 5000 + +#define ID_DWORD0_INDEX 0 +#define ID_DWORD1_INDEX 1 +#define ID_DWORD2_INDEX 2 +#define ID_DWORD3_INDEX 3 +#define ID_DWORD4_INDEX 4 +#define ID_DWORD5_INDEX 5 +#define TICKS_BIT_INDEX 24 +#define COUNT_BIT_INDEX 8 + +#define PORT_REG_LENGTH 0x100 +#define GLOBAL_REG_LENGTH 0x800 +#define AXI_REG_LENGTH 0x61 +#define RAS_REG_LENGTH 0x10 #define CHNL_INT_STS_MSK 0xeeeeeeee #define CHNL_INT_STS_PHY_MSK 0xe @@ -811,17 +854,17 @@ static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no) identify_buffer = (u32 *)(&identify_frame); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, - __swab32(identify_buffer[0])); + __swab32(identify_buffer[ID_DWORD0_INDEX])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, - __swab32(identify_buffer[1])); + __swab32(identify_buffer[ID_DWORD1_INDEX])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, - __swab32(identify_buffer[2])); + __swab32(identify_buffer[ID_DWORD2_INDEX])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, - __swab32(identify_buffer[3])); + __swab32(identify_buffer[ID_DWORD3_INDEX])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, - __swab32(identify_buffer[4])); + __swab32(identify_buffer[ID_DWORD4_INDEX])); hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, - __swab32(identify_buffer[5])); + __swab32(identify_buffer[ID_DWORD5_INDEX])); } static void setup_itct_v3_hw(struct hisi_hba *hisi_hba, @@ -941,7 +984,7 @@ static int reset_hw_v3_hw(struct hisi_hba *hisi_hba) /* Disable all of the PHYs */ hisi_sas_stop_phys(hisi_hba); - udelay(50); + udelay(HISI_SAS_DELAY_FOR_PHY_DISABLE); /* Ensure axi bus idle */ ret = hisi_sas_read32_poll_timeout(AXI_CFG, val, !val, @@ -981,7 +1024,7 @@ static int hw_init_v3_hw(struct hisi_hba *hisi_hba) return rc; } - msleep(100); + msleep(DELAY_FOR_RESET_HW); init_reg_v3_hw(hisi_hba); if (guid_parse("D5918B4B-37AE-4E10-A99F-E5E8A6EF4C1F", &guid)) { @@ -1030,7 +1073,7 @@ static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) cfg &= ~PHY_CFG_ENA_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); - mdelay(50); + mdelay(HISI_SAS_DELAY_FOR_PHY_DISABLE); state = hisi_sas_read32(hisi_hba, PHY_STATE); if (state & BIT(phy_no)) { @@ -1066,7 +1109,7 @@ static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no) hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, txid_auto | TX_HARDRST_MSK); } - msleep(100); + msleep(HISI_SAS_DELAY_FOR_PHY_DISABLE); hisi_sas_phy_enable(hisi_hba, phy_no, 1); } @@ -1111,7 +1154,8 @@ static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id) for (i = 0; i < hisi_hba->n_phy; i++) if (phy_state & BIT(i)) - if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id) + if (((phy_port_num_ma >> (i * HISI_SAS_REG_MEM_SIZE)) & 0xf) == + port_id) bitmap |= BIT(i); return bitmap; @@ -1308,10 +1352,10 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, /* map itct entry */ dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; - dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) - + 3) / 4) << CMD_HDR_CFL_OFF) | - ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) | - (2 << CMD_HDR_SG_MOD_OFF); + dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr) + + 3) / BYTE_TO_DW) << CMD_HDR_CFL_OFF) | + ((HISI_SAS_MAX_SSP_RESP_SZ / BYTE_TO_DW) << CMD_HDR_MRFL_OFF) | + (HDR_SG_MOD << CMD_HDR_SG_MOD_OFF); hdr->dw2 = cpu_to_le32(dw2); hdr->transfer_tags = cpu_to_le32(slot->idx); @@ -1331,18 +1375,19 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) + sizeof(struct ssp_frame_hdr); - memcpy(buf_cmd, &task->ssp_task.LUN, 8); + memcpy(buf_cmd, &task->ssp_task.LUN, LUN_SIZE); if (!tmf) { - buf_cmd[9] = ssp_task->task_attr; - memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); + buf_cmd[ATTR_PRIO_REGION] = ssp_task->task_attr; + memcpy(buf_cmd + CDB_REGION, scsi_cmnd->cmnd, + scsi_cmnd->cmd_len); } else { - buf_cmd[10] = tmf->tmf; + buf_cmd[TMF_REGION] = tmf->tmf; switch (tmf->tmf) { case TMF_ABORT_TASK: case TMF_QUERY_TASK: - buf_cmd[12] = + buf_cmd[TAG_MSB] = (tmf->tag_of_task_to_be_managed >> 8) & 0xff; - buf_cmd[13] = + buf_cmd[TAG_LSB] = tmf->tag_of_task_to_be_managed & 0xff; break; default: @@ -1375,7 +1420,8 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, unsigned int interval = scsi_prot_interval(scsi_cmnd); unsigned int ilog2_interval = ilog2(interval); - len = (task->total_xfer_len >> ilog2_interval) * 8; + len = (task->total_xfer_len >> ilog2_interval) * + BYTE_TO_DDW; } } @@ -1395,6 +1441,7 @@ static void prep_smp_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev = device->lldd_dev; dma_addr_t req_dma_addr; unsigned int req_len; + u32 cfl; /* req */ sg_req = &task->smp_task.smp_req; @@ -1405,7 +1452,7 @@ static void prep_smp_v3_hw(struct hisi_hba *hisi_hba, /* dw0 */ hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) | (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */ - (2 << CMD_HDR_CMD_OFF)); /* smp */ + (SMP_FRAME_TYPE << CMD_HDR_CMD_OFF)); /* smp */ /* map itct entry */ hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) | @@ -1413,8 +1460,9 @@ static void prep_smp_v3_hw(struct hisi_hba *hisi_hba, (DIR_NO_DATA << CMD_HDR_DIR_OFF)); /* dw2 */ - hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) | - (HISI_SAS_MAX_SMP_RESP_SZ / 4 << + cfl = (req_len - SMP_CRC_SIZE) / BYTE_TO_DW; + hdr->dw2 = cpu_to_le32((cfl << CMD_HDR_CFL_OFF) | + (HISI_SAS_MAX_SMP_RESP_SZ / BYTE_TO_DW << CMD_HDR_MRFL_OFF)); hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF); @@ -1479,12 +1527,13 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba, struct ata_queued_cmd *qc = task->uldd_task; hdr_tag = qc->tag; - task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); + task->ata_task.fis.sector_count |= + (u8)(hdr_tag << HDR_TAG_OFF); dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; } - dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF | - 2 << CMD_HDR_SG_MOD_OFF; + dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / BYTE_TO_DW) << CMD_HDR_CFL_OFF | + HDR_SG_MOD << CMD_HDR_SG_MOD_OFF; hdr->dw2 = cpu_to_le32(dw2); /* dw3 */ @@ -1544,9 +1593,9 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); - port_id = (port_id >> (4 * phy_no)) & 0xf; + port_id = (port_id >> (HISI_SAS_REG_MEM_SIZE * phy_no)) & 0xf; link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE); - link_rate = (link_rate >> (phy_no * 4)) & 0xf; + link_rate = (link_rate >> (phy_no * HISI_SAS_REG_MEM_SIZE)) & 0xf; if (port_id == 0xf) { dev_err(dev, "phyup: phy%d invalid portid\n", phy_no); @@ -1579,8 +1628,8 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) sas_phy->oob_mode = SATA_OOB_MODE; attached_sas_addr[0] = 0x50; - attached_sas_addr[6] = shost->host_no; - attached_sas_addr[7] = phy_no; + attached_sas_addr[HOST_NO_OFF] = shost->host_no; + attached_sas_addr[PHY_NO_OFF] = phy_no; memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE); @@ -1596,7 +1645,7 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) (struct sas_identify_frame *)frame_rcvd; dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate); - for (i = 0; i < 6; i++) { + for (i = 0; i < IDENTIFY_REG_READ; i++) { u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no, RX_IDAF_DWORD0 + (i * 4)); frame_rcvd[i] = __swab32(idaf); @@ -1701,7 +1750,7 @@ static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p) irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) & 0x11111111; while (irq_msk) { - if (irq_msk & 1) { + if (irq_msk & 1) { u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); @@ -1866,7 +1915,7 @@ static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no) dev_warn(dev, "phy%d stp link timeout (0x%x)\n", phy_no, reg_value); - if (reg_value & BIT(4)) + if (reg_value & BIT(LINK_RESET_TIMEOUT_OFF)) hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); } @@ -1924,8 +1973,7 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) u32 irq_msk; int phy_no = 0; - irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) - & CHNL_INT_STS_MSK; + irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) & CHNL_INT_STS_MSK; while (irq_msk) { if (irq_msk & (CHNL_INT_STS_INT0_MSK << (phy_no * CHNL_WIDTH))) @@ -2570,7 +2618,6 @@ static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba) if (vectors < 0) return -ENOENT; - hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW - hisi_hba->iopoll_q_cnt; shost->nr_hw_queues = hisi_hba->cq_nvecs + hisi_hba->iopoll_q_cnt; @@ -2583,7 +2630,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) struct pci_dev *pdev = hisi_hba->pci_dev; int rc, i; - rc = devm_request_irq(dev, pci_irq_vector(pdev, 1), + rc = devm_request_irq(dev, pci_irq_vector(pdev, IRQ_PHY_UP_DOWN_INDEX), int_phy_up_down_bcast_v3_hw, 0, DRV_NAME " phy", hisi_hba); if (rc) { @@ -2591,7 +2638,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) return -ENOENT; } - rc = devm_request_irq(dev, pci_irq_vector(pdev, 2), + rc = devm_request_irq(dev, pci_irq_vector(pdev, IRQ_CHL_INDEX), int_chnl_int_v3_hw, 0, DRV_NAME " channel", hisi_hba); if (rc) { @@ -2599,7 +2646,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) return -ENOENT; } - rc = devm_request_irq(dev, pci_irq_vector(pdev, 11), + rc = devm_request_irq(dev, pci_irq_vector(pdev, IRQ_AXI_INDEX), fatal_axi_int_v3_hw, 0, DRV_NAME " fatal", hisi_hba); if (rc) { @@ -2612,7 +2659,8 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) for (i = 0; i < hisi_hba->cq_nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; - int nr = hisi_sas_intr_conv ? 16 : 16 + i; + int nr = hisi_sas_intr_conv ? BASE_VECTORS_V3_HW : + BASE_VECTORS_V3_HW + i; unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : IRQF_ONESHOT; @@ -2670,14 +2718,14 @@ static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba) struct pci_dev *pdev = hisi_hba->pci_dev; int i; - synchronize_irq(pci_irq_vector(pdev, 1)); - synchronize_irq(pci_irq_vector(pdev, 2)); - synchronize_irq(pci_irq_vector(pdev, 11)); + synchronize_irq(pci_irq_vector(pdev, IRQ_PHY_UP_DOWN_INDEX)); + synchronize_irq(pci_irq_vector(pdev, IRQ_CHL_INDEX)); + synchronize_irq(pci_irq_vector(pdev, IRQ_AXI_INDEX)); for (i = 0; i < hisi_hba->queue_count; i++) hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1); for (i = 0; i < hisi_hba->cq_nvecs; i++) - synchronize_irq(pci_irq_vector(pdev, i + 16)); + synchronize_irq(pci_irq_vector(pdev, i + BASE_VECTORS_V3_HW)); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff); @@ -2709,7 +2757,7 @@ static int disable_host_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_stop_phys(hisi_hba); - mdelay(10); + mdelay(HISI_SAS_DELAY_FOR_PHY_DISABLE); reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL); @@ -2846,13 +2894,13 @@ static ssize_t intr_coal_ticks_v3_hw_store(struct device *dev, u32 intr_coal_ticks; int ret; - ret = kstrtou32(buf, 10, &intr_coal_ticks); + ret = kstrtou32(buf, DECIMALISM_FLAG, &intr_coal_ticks); if (ret) { dev_err(dev, "Input data of interrupt coalesce unmatch\n"); return -EINVAL; } - if (intr_coal_ticks >= BIT(24)) { + if (intr_coal_ticks >= BIT(TICKS_BIT_INDEX)) { dev_err(dev, "intr_coal_ticks must be less than 2^24!\n"); return -EINVAL; } @@ -2885,13 +2933,13 @@ static ssize_t intr_coal_count_v3_hw_store(struct device *dev, u32 intr_coal_count; int ret; - ret = kstrtou32(buf, 10, &intr_coal_count); + ret = kstrtou32(buf, DECIMALISM_FLAG, &intr_coal_count); if (ret) { dev_err(dev, "Input data of interrupt coalesce unmatch\n"); return -EINVAL; } - if (intr_coal_count >= BIT(8)) { + if (intr_coal_count >= BIT(COUNT_BIT_INDEX)) { dev_err(dev, "intr_coal_count must be less than 2^8!\n"); return -EINVAL; } @@ -3023,7 +3071,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu[] = { static const struct hisi_sas_debugfs_reg debugfs_port_reg = { .lu = debugfs_port_reg_lu, - .count = 0x100, + .count = PORT_REG_LENGTH, .base_off = PORT_BASE, }; @@ -3097,7 +3145,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = { static const struct hisi_sas_debugfs_reg debugfs_global_reg = { .lu = debugfs_global_reg_lu, - .count = 0x800, + .count = GLOBAL_REG_LENGTH, }; static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = { @@ -3110,7 +3158,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = { static const struct hisi_sas_debugfs_reg debugfs_axi_reg = { .lu = debugfs_axi_reg_lu, - .count = 0x61, + .count = AXI_REG_LENGTH, .base_off = AXI_MASTER_CFG_BASE, }; @@ -3127,7 +3175,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = { static const struct hisi_sas_debugfs_reg debugfs_ras_reg = { .lu = debugfs_ras_reg_lu, - .count = 0x10, + .count = RAS_REG_LENGTH, .base_off = RAS_BASE, }; @@ -3136,7 +3184,7 @@ static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba) struct Scsi_Host *shost = hisi_hba->shost; scsi_block_requests(shost); - wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000); + wait_cmds_complete_timeout_v3_hw(hisi_hba, WAIT_RETRY, WAIT_TMROUT); set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); hisi_sas_sync_cqs(hisi_hba); @@ -3177,7 +3225,7 @@ static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba, return; } - memset(buf, 0, cache_dw_size * 4); + memset(buf, 0, cache_dw_size * BYTE_TO_DW); buf[0] = val; for (i = 1; i < cache_dw_size; i++) @@ -3224,7 +3272,7 @@ static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba) reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); /* init OOB link rate as 1.5 Gbits */ reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK; - reg_val |= (0x8 << CFG_PROG_OOB_PHY_LINK_RATE_OFF); + reg_val |= (SAS_LINK_RATE_1_5_GBPS << CFG_PROG_OOB_PHY_LINK_RATE_OFF); hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, reg_val); /* enable PHY */ @@ -3233,6 +3281,9 @@ static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba) #define SAS_PHY_BIST_CODE_INIT 0x1 #define SAS_PHY_BIST_CODE1_INIT 0X80 +#define SAS_PHY_BIST_INIT_DELAY 100 +#define SAS_PHY_BIST_LOOP_TEST_0 1 +#define SAS_PHY_BIST_LOOP_TEST_1 2 static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable) { u32 reg_val, mode_tmp; @@ -3251,12 +3302,13 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable) ffe[FFE_SATA_1_5_GBPS], ffe[FFE_SATA_3_0_GBPS], ffe[FFE_SATA_6_0_GBPS], fix_code[FIXED_CODE], fix_code[FIXED_CODE_1]); - mode_tmp = path_mode ? 2 : 1; + mode_tmp = path_mode ? SAS_PHY_BIST_LOOP_TEST_1 : + SAS_PHY_BIST_LOOP_TEST_0; if (enable) { /* some preparations before bist test */ hisi_sas_bist_test_prep_v3_hw(hisi_hba); - /* set linkrate of bit test*/ + /* set linkrate of bit test */ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE); reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK; @@ -3294,13 +3346,13 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable) SAS_PHY_BIST_CODE1_INIT); } - mdelay(100); + mdelay(SAS_PHY_BIST_INIT_DELAY); reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, reg_val); /* clear error bit */ - mdelay(100); + mdelay(SAS_PHY_BIST_INIT_DELAY); hisi_sas_phy_read32(hisi_hba, phy_no, SAS_BIST_ERR_CNT); } else { /* disable bist test and recover it */ @@ -3354,7 +3406,7 @@ static const struct scsi_host_template sht_v3_hw = { .shost_groups = host_v3_hw_groups, .sdev_groups = sdev_groups_v3_hw, .tag_alloc_policy_rr = true, - .host_reset = hisi_sas_host_reset, + .host_reset = hisi_sas_host_reset, .host_tagset = 1, .mq_poll = queue_complete_v3_hw, }; @@ -3496,7 +3548,7 @@ static void debugfs_snapshot_port_reg_v3_hw(struct hisi_hba *hisi_hba) for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) { databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data; for (i = 0; i < port->count; i++, databuf++) { - offset = port->base_off + 4 * i; + offset = port->base_off + HISI_SAS_REG_MEM_SIZE * i; *databuf = hisi_sas_phy_read32(hisi_hba, phy_cnt, offset); } @@ -3510,7 +3562,8 @@ static void debugfs_snapshot_global_reg_v3_hw(struct hisi_hba *hisi_hba) int i; for (i = 0; i < debugfs_global_reg.count; i++, databuf++) - *databuf = hisi_sas_read32(hisi_hba, 4 * i); + *databuf = hisi_sas_read32(hisi_hba, + HISI_SAS_REG_MEM_SIZE * i); } static void debugfs_snapshot_axi_reg_v3_hw(struct hisi_hba *hisi_hba) @@ -3521,7 +3574,9 @@ static void debugfs_snapshot_axi_reg_v3_hw(struct hisi_hba *hisi_hba) int i; for (i = 0; i < axi->count; i++, databuf++) - *databuf = hisi_sas_read32(hisi_hba, 4 * i + axi->base_off); + *databuf = hisi_sas_read32(hisi_hba, + HISI_SAS_REG_MEM_SIZE * i + + axi->base_off); } static void debugfs_snapshot_ras_reg_v3_hw(struct hisi_hba *hisi_hba) @@ -3532,7 +3587,9 @@ static void debugfs_snapshot_ras_reg_v3_hw(struct hisi_hba *hisi_hba) int i; for (i = 0; i < ras->count; i++, databuf++) - *databuf = hisi_sas_read32(hisi_hba, 4 * i + ras->base_off); + *databuf = hisi_sas_read32(hisi_hba, + HISI_SAS_REG_MEM_SIZE * i + + ras->base_off); } static void debugfs_snapshot_itct_reg_v3_hw(struct hisi_hba *hisi_hba) @@ -3595,12 +3652,11 @@ static void debugfs_print_reg_v3_hw(u32 *regs_val, struct seq_file *s, int i; for (i = 0; i < reg->count; i++) { - int off = i * 4; + int off = i * HISI_SAS_REG_MEM_SIZE; const char *name; name = debugfs_to_reg_name_v3_hw(off, reg->base_off, reg->lu); - if (name) seq_printf(s, "0x%08x 0x%08x %s\n", off, regs_val[i], name); @@ -3673,9 +3729,9 @@ static void debugfs_show_row_64_v3_hw(struct seq_file *s, int index, /* completion header size not fixed per HW version */ seq_printf(s, "index %04d:\n\t", index); - for (i = 1; i <= sz / 8; i++, ptr++) { + for (i = 1; i <= sz / BYTE_TO_DDW; i++, ptr++) { seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr)); - if (!(i % 2)) + if (!(i % TWO_PARA_PER_LINE)) seq_puts(s, "\n\t"); } @@ -3689,9 +3745,9 @@ static void debugfs_show_row_32_v3_hw(struct seq_file *s, int index, /* completion header size not fixed per HW version */ seq_printf(s, "index %04d:\n\t", index); - for (i = 1; i <= sz / 4; i++, ptr++) { + for (i = 1; i <= sz / BYTE_TO_DW; i++, ptr++) { seq_printf(s, " 0x%08x", le32_to_cpu(*ptr)); - if (!(i % 4)) + if (!(i % FOUR_PARA_PER_LINE)) seq_puts(s, "\n\t"); } seq_puts(s, "\n"); @@ -3776,7 +3832,7 @@ static int debugfs_iost_cache_v3_hw_show(struct seq_file *s, void *p) struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private; struct hisi_sas_iost_itct_cache *iost_cache = debugfs_iost_cache->cache; - u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4; + u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * BYTE_TO_DW; int i, tab_idx; __le64 *iost; @@ -3824,7 +3880,7 @@ static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p) struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private; struct hisi_sas_iost_itct_cache *itct_cache = debugfs_itct_cache->cache; - u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4; + u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * BYTE_TO_DW; int i, tab_idx; __le64 *itct; @@ -3853,12 +3909,12 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index) u64 *debugfs_timestamp; struct dentry *dump_dentry; struct dentry *dentry; - char name[256]; + char name[NAME_BUF_SIZE]; int p; int c; int d; - snprintf(name, 256, "%d", index); + snprintf(name, NAME_BUF_SIZE, "%d", index); dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry); @@ -3874,7 +3930,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index) /* Create port dir and files */ dentry = debugfs_create_dir("port", dump_dentry); for (p = 0; p < hisi_hba->n_phy; p++) { - snprintf(name, 256, "%d", p); + snprintf(name, NAME_BUF_SIZE, "%d", p); debugfs_create_file(name, 0400, dentry, &hisi_hba->debugfs_port_reg[index][p], @@ -3884,7 +3940,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index) /* Create CQ dir and files */ dentry = debugfs_create_dir("cq", dump_dentry); for (c = 0; c < hisi_hba->queue_count; c++) { - snprintf(name, 256, "%d", c); + snprintf(name, NAME_BUF_SIZE, "%d", c); debugfs_create_file(name, 0400, dentry, &hisi_hba->debugfs_cq[index][c], @@ -3894,7 +3950,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index) /* Create DQ dir and files */ dentry = debugfs_create_dir("dq", dump_dentry); for (d = 0; d < hisi_hba->queue_count; d++) { - snprintf(name, 256, "%d", d); + snprintf(name, NAME_BUF_SIZE, "%d", d); debugfs_create_file(name, 0400, dentry, &hisi_hba->debugfs_dq[index][d], @@ -3931,9 +3987,9 @@ static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file, size_t count, loff_t *ppos) { struct hisi_hba *hisi_hba = file->f_inode->i_private; - char buf[8]; + char buf[DUMP_BUF_SIZE]; - if (count > 8) + if (count > DUMP_BUF_SIZE) return -EFAULT; if (copy_from_user(buf, user_buf, count)) @@ -3997,7 +4053,7 @@ static ssize_t debugfs_bist_linkrate_v3_hw_write(struct file *filp, { struct seq_file *m = filp->private_data; struct hisi_hba *hisi_hba = m->private; - char kbuf[16] = {}, *pkbuf; + char kbuf[BIST_BUF_SIZE] = {}, *pkbuf; bool found = false; int i; @@ -4014,7 +4070,7 @@ static ssize_t debugfs_bist_linkrate_v3_hw_write(struct file *filp, for (i = 0; i < ARRAY_SIZE(debugfs_loop_linkrate_v3_hw); i++) { if (!strncmp(debugfs_loop_linkrate_v3_hw[i].name, - pkbuf, 16)) { + pkbuf, BIST_BUF_SIZE)) { hisi_hba->debugfs_bist_linkrate = debugfs_loop_linkrate_v3_hw[i].value; found = true; @@ -4072,7 +4128,7 @@ static ssize_t debugfs_bist_code_mode_v3_hw_write(struct file *filp, { struct seq_file *m = filp->private_data; struct hisi_hba *hisi_hba = m->private; - char kbuf[16] = {}, *pkbuf; + char kbuf[BIST_BUF_SIZE] = {}, *pkbuf; bool found = false; int i; @@ -4089,7 +4145,7 @@ static ssize_t debugfs_bist_code_mode_v3_hw_write(struct file *filp, for (i = 0; i < ARRAY_SIZE(debugfs_loop_code_mode_v3_hw); i++) { if (!strncmp(debugfs_loop_code_mode_v3_hw[i].name, - pkbuf, 16)) { + pkbuf, BIST_BUF_SIZE)) { hisi_hba->debugfs_bist_code_mode = debugfs_loop_code_mode_v3_hw[i].value; found = true; @@ -4204,7 +4260,7 @@ static ssize_t debugfs_bist_mode_v3_hw_write(struct file *filp, { struct seq_file *m = filp->private_data; struct hisi_hba *hisi_hba = m->private; - char kbuf[16] = {}, *pkbuf; + char kbuf[BIST_BUF_SIZE] = {}, *pkbuf; bool found = false; int i; @@ -4220,7 +4276,8 @@ static ssize_t debugfs_bist_mode_v3_hw_write(struct file *filp, pkbuf = strstrip(kbuf); for (i = 0; i < ARRAY_SIZE(debugfs_loop_modes_v3_hw); i++) { - if (!strncmp(debugfs_loop_modes_v3_hw[i].name, pkbuf, 16)) { + if (!strncmp(debugfs_loop_modes_v3_hw[i].name, pkbuf, + BIST_BUF_SIZE)) { hisi_hba->debugfs_bist_mode = debugfs_loop_modes_v3_hw[i].value; found = true; @@ -4499,8 +4556,9 @@ static int debugfs_fifo_data_v3_hw_show(struct seq_file *s, void *p) debugfs_read_fifo_data_v3_hw(phy); - debugfs_show_row_32_v3_hw(s, 0, HISI_SAS_FIFO_DATA_DW_SIZE * 4, - (__le32 *)phy->fifo.rd_data); + debugfs_show_row_32_v3_hw(s, 0, + HISI_SAS_FIFO_DATA_DW_SIZE * HISI_SAS_REG_MEM_SIZE, + (__le32 *)phy->fifo.rd_data); return 0; } @@ -4632,14 +4690,14 @@ static int debugfs_alloc_v3_hw(struct hisi_hba *hisi_hba, int dump_index) struct hisi_sas_debugfs_regs *regs = &hisi_hba->debugfs_regs[dump_index][r]; - sz = debugfs_reg_array_v3_hw[r]->count * 4; + sz = debugfs_reg_array_v3_hw[r]->count * HISI_SAS_REG_MEM_SIZE; regs->data = devm_kmalloc(dev, sz, GFP_KERNEL); if (!regs->data) goto fail; regs->hisi_hba = hisi_hba; } - sz = debugfs_port_reg.count * 4; + sz = debugfs_port_reg.count * HISI_SAS_REG_MEM_SIZE; for (p = 0; p < hisi_hba->n_phy; p++) { struct hisi_sas_debugfs_port *port = &hisi_hba->debugfs_port_reg[dump_index][p]; @@ -4749,11 +4807,11 @@ static void debugfs_phy_down_cnt_init_v3_hw(struct hisi_hba *hisi_hba) { struct dentry *dir = debugfs_create_dir("phy_down_cnt", hisi_hba->debugfs_dir); - char name[16]; + char name[NAME_BUF_SIZE]; int phy_no; for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { - snprintf(name, 16, "%d", phy_no); + snprintf(name, NAME_BUF_SIZE, "%d", phy_no); debugfs_create_file(name, 0600, dir, &hisi_hba->phy[phy_no], &debugfs_phy_down_cnt_v3_hw_fops); @@ -4938,7 +4996,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) shost->max_id = HISI_SAS_MAX_DEVICES; shost->max_lun = ~0; shost->max_channel = 1; - shost->max_cmd_len = 16; + shost->max_cmd_len = HISI_SAS_MAX_CDB_LEN; shost->can_queue = HISI_SAS_UNRESERVED_IPTT; shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; if (hisi_hba->iopoll_q_cnt) @@ -5016,12 +5074,13 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) { int i; - devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 1), hisi_hba); - devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 2), hisi_hba); - devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 11), hisi_hba); + devm_free_irq(&pdev->dev, pci_irq_vector(pdev, IRQ_PHY_UP_DOWN_INDEX), hisi_hba); + devm_free_irq(&pdev->dev, pci_irq_vector(pdev, IRQ_CHL_INDEX), hisi_hba); + devm_free_irq(&pdev->dev, pci_irq_vector(pdev, IRQ_AXI_INDEX), hisi_hba); for (i = 0; i < hisi_hba->cq_nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; - int nr = hisi_sas_intr_conv ? 16 : 16 + i; + int nr = hisi_sas_intr_conv ? BASE_VECTORS_V3_HW : + BASE_VECTORS_V3_HW + i; devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq); } @@ -5051,9 +5110,11 @@ static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev) { struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct hisi_hba *hisi_hba = sha->lldd_ha; + struct Scsi_Host *shost = hisi_hba->shost; struct device *dev = hisi_hba->dev; int rc; + wait_event(shost->host_wait, !scsi_host_in_recovery(shost)); dev_info(dev, "FLR prepare\n"); down(&hisi_hba->sem); set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 287e1ba8ddd7..82deb6a83a8c 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -392,36 +392,6 @@ enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, } } -enum sci_status sci_remote_device_reset(struct isci_remote_device *idev) -{ - struct sci_base_state_machine *sm = &idev->sm; - enum sci_remote_device_states state = sm->current_state_id; - - switch (state) { - case SCI_DEV_INITIAL: - case SCI_DEV_STOPPED: - case SCI_DEV_STARTING: - case SCI_SMP_DEV_IDLE: - case SCI_SMP_DEV_CMD: - case SCI_DEV_STOPPING: - case SCI_DEV_FAILED: - case SCI_DEV_RESETTING: - case SCI_DEV_FINAL: - default: - dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", - __func__, dev_state_name(state)); - return SCI_FAILURE_INVALID_STATE; - case SCI_DEV_READY: - case SCI_STP_DEV_IDLE: - case SCI_STP_DEV_CMD: - case SCI_STP_DEV_NCQ: - case SCI_STP_DEV_NCQ_ERROR: - case SCI_STP_DEV_AWAIT_RESET: - sci_change_state(sm, SCI_DEV_RESETTING); - return SCI_SUCCESS; - } -} - enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, u32 frame_index) { diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index 561ae3f2cbbd..c1fdf45751cd 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h @@ -160,21 +160,6 @@ enum sci_status sci_remote_device_stop( u32 timeout); /** - * sci_remote_device_reset() - This method will reset the device making it - * ready for operation. This method must be called anytime the device is - * reset either through a SMP phy control or a port hard reset request. - * @remote_device: This parameter specifies the device to be reset. - * - * This method does not actually cause the device hardware to be reset. This - * method resets the software object so that it will be operational after a - * device hardware reset completes. An indication of whether the device reset - * was accepted. SCI_SUCCESS This value is returned if the device reset is - * started. - */ -enum sci_status sci_remote_device_reset( - struct isci_remote_device *idev); - -/** * enum sci_remote_device_states - This enumeration depicts all the states * for the common remote device state machine. * @SCI_DEV_INITIAL: Simply the initial state for the base remote device diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 397216ff2c7e..54ee8ecec3b3 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -291,6 +291,138 @@ buffer_done: return len; } +static ssize_t +lpfc_vmid_info_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_vmid *vmp; + int len = 0, i, j, k, cpu; + char hxstr[LPFC_MAX_VMID_SIZE * 3] = {0}; + struct timespec64 curr_tm; + struct lpfc_vmid_priority_range *vr; + u64 *lta, rct_acc = 0, max_lta = 0; + struct tm tm_val; + + ktime_get_ts64(&curr_tm); + + len += scnprintf(buf + len, PAGE_SIZE - len, "Key 'vmid':\n"); + + /* if enabled continue, else return */ + if (lpfc_is_vmid_enabled(phba)) { + len += scnprintf(buf + len, PAGE_SIZE - len, + "lpfc VMID Page: ON\n\n"); + } else { + len += scnprintf(buf + len, PAGE_SIZE - len, + "lpfc VMID Page: OFF\n\n"); + return len; + } + + /* if using priority tagging */ + if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) { + len += scnprintf(buf + len, PAGE_SIZE - len, + "VMID priority ranges:\n"); + vr = vport->vmid_priority.vmid_range; + for (i = 0; i < vport->vmid_priority.num_descriptors; ++i) { + len += scnprintf(buf + len, PAGE_SIZE - len, + "\t[x%x - x%x], qos: x%x\n", + vr->low, vr->high, vr->qos); + vr++; + } + } + + for (i = 0; i < phba->cfg_max_vmid; i++) { + vmp = &vport->vmid[i]; + max_lta = 0; + + /* only if the slot is used */ + if (!(vmp->flag & LPFC_VMID_SLOT_USED) || + !(vmp->flag & LPFC_VMID_REGISTERED)) + continue; + + /* if using priority tagging */ + if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) { + len += scnprintf(buf + len, PAGE_SIZE - len, + "VEM ID: %02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x:%02x:%02x\n", + vport->lpfc_vmid_host_uuid[0], + vport->lpfc_vmid_host_uuid[1], + vport->lpfc_vmid_host_uuid[2], + vport->lpfc_vmid_host_uuid[3], + vport->lpfc_vmid_host_uuid[4], + vport->lpfc_vmid_host_uuid[5], + vport->lpfc_vmid_host_uuid[6], + vport->lpfc_vmid_host_uuid[7], + vport->lpfc_vmid_host_uuid[8], + vport->lpfc_vmid_host_uuid[9], + vport->lpfc_vmid_host_uuid[10], + vport->lpfc_vmid_host_uuid[11], + vport->lpfc_vmid_host_uuid[12], + vport->lpfc_vmid_host_uuid[13], + vport->lpfc_vmid_host_uuid[14], + vport->lpfc_vmid_host_uuid[15]); + } + + /* IO stats */ + len += scnprintf(buf + len, PAGE_SIZE - len, + "ID00 READs:%llx WRITEs:%llx\n", + vmp->io_rd_cnt, + vmp->io_wr_cnt); + for (j = 0, k = 0; j < strlen(vmp->host_vmid); j++, k += 3) + sprintf((char *)(hxstr + k), "%2x ", vmp->host_vmid[j]); + /* UUIDs */ + len += scnprintf(buf + len, PAGE_SIZE - len, "UUID:\n"); + len += scnprintf(buf + len, PAGE_SIZE - len, "%s\n", hxstr); + + len += scnprintf(buf + len, PAGE_SIZE - len, "String (%s)\n", + vmp->host_vmid); + + if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) + len += scnprintf(buf + len, PAGE_SIZE - len, + "CS_CTL VMID: 0x%x\n", + vmp->un.cs_ctl_vmid); + else + len += scnprintf(buf + len, PAGE_SIZE - len, + "Application id: 0x%x\n", + vmp->un.app_id); + + /* calculate the last access time */ + for_each_possible_cpu(cpu) { + lta = per_cpu_ptr(vmp->last_io_time, cpu); + if (!lta) + continue; + + /* if last access time is less than timeout */ + if (time_after((unsigned long)*lta, jiffies)) + continue; + + if (*lta > max_lta) + max_lta = *lta; + } + + rct_acc = jiffies_to_msecs(jiffies - max_lta) / 1000; + /* current time */ + time64_to_tm(ktime_get_real_seconds(), + -(sys_tz.tz_minuteswest * 60) - rct_acc, &tm_val); + + len += scnprintf(buf + len, PAGE_SIZE - len, + "Last Access Time :" + "%ld-%d-%dT%02d:%02d:%02d\n\n", + 1900 + tm_val.tm_year, tm_val.tm_mon + 1, + tm_val.tm_mday, tm_val.tm_hour, + tm_val.tm_min, tm_val.tm_sec); + + if (len >= PAGE_SIZE) + return len; + + memset(hxstr, 0, LPFC_MAX_VMID_SIZE * 3); + } + return len; +} + /** * lpfc_drvr_version_show - Return the Emulex driver string with version number * @dev: class unused variable. @@ -3011,6 +3143,7 @@ static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL); static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show, NULL); static DEVICE_ATTR(cmf_info, 0444, lpfc_cmf_info_show, NULL); +static DEVICE_ATTR_RO(lpfc_vmid_info); #define WWN_SZ 8 /** @@ -6117,6 +6250,7 @@ static struct attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_vmid_inactivity_timeout.attr, &dev_attr_lpfc_vmid_app_header.attr, &dev_attr_lpfc_vmid_priority_tagging.attr, + &dev_attr_lpfc_vmid_info.attr, NULL, }; diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index c8f8496bbdf8..d61d979f9b77 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -2687,8 +2687,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, evt->wait_time_stamp = jiffies; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), - msecs_to_jiffies(1000 * - ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); + secs_to_jiffies(phba->fc_ratov * 2 + LPFC_DRVR_TIMEOUT)); if (list_empty(&evt->events_to_see)) ret_val = (time_left) ? -EINTR : -ETIMEDOUT; else { @@ -3258,8 +3257,7 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job) evt->waiting = 1; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), - msecs_to_jiffies(1000 * - ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); + secs_to_jiffies(phba->fc_ratov * 2 + LPFC_DRVR_TIMEOUT)); evt->waiting = 0; if (list_empty(&evt->events_to_see)) { rc = (time_left) ? -EINTR : -ETIMEDOUT; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 179be6c5a43e..3d15a964f5c9 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -161,7 +161,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) struct lpfc_hba *phba; struct lpfc_work_evt *evtp; unsigned long iflags; - bool nvme_reg = false; + bool drop_initial_node_ref = false; ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode; if (!ndlp) @@ -188,8 +188,13 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) spin_lock_irqsave(&ndlp->lock, iflags); ndlp->rport = NULL; - if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) - nvme_reg = true; + /* Only 1 thread can drop the initial node reference. + * If not registered for NVME and NLP_DROPPED flag is + * clear, remove the initial reference. + */ + if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) + if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag)) + drop_initial_node_ref = true; /* The scsi_transport is done with the rport so lpfc cannot * call to unregister. @@ -200,13 +205,16 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) /* If NLP_XPT_REGD was cleared in lpfc_nlp_unreg_node, * unregister calls were made to the scsi and nvme * transports and refcnt was already decremented. Clear - * the NLP_XPT_REGD flag only if the NVME Rport is + * the NLP_XPT_REGD flag only if the NVME nrport is * confirmed unregistered. */ - if (!nvme_reg && ndlp->fc4_xpt_flags & NLP_XPT_REGD) { - ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; + if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) { + if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) + ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; spin_unlock_irqrestore(&ndlp->lock, iflags); - lpfc_nlp_put(ndlp); /* may free ndlp */ + + /* Release scsi transport reference */ + lpfc_nlp_put(ndlp); } else { spin_unlock_irqrestore(&ndlp->lock, iflags); } @@ -214,14 +222,8 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) spin_unlock_irqrestore(&ndlp->lock, iflags); } - /* Only 1 thread can drop the initial node reference. If - * another thread has set NLP_DROPPED, this thread is done. - */ - if (nvme_reg || test_bit(NLP_DROPPED, &ndlp->nlp_flag)) - return; - - set_bit(NLP_DROPPED, &ndlp->nlp_flag); - lpfc_nlp_put(ndlp); + if (drop_initial_node_ref) + lpfc_nlp_put(ndlp); return; } @@ -4695,9 +4697,7 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) { vport->phba->nport_event_cnt++; if (vport->phba->nvmet_support == 0) { - /* Start devloss if target. */ - if (ndlp->nlp_type & NLP_NVME_TARGET) - lpfc_nvme_unregister_port(vport, ndlp); + lpfc_nvme_unregister_port(vport, ndlp); } else { /* NVMET has no upcall. */ lpfc_nlp_put(ndlp); @@ -5053,7 +5053,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba, case CMD_GEN_REQUEST64_CR: if (iocb->ndlp == ndlp) return 1; - fallthrough; + break; case CMD_ELS_REQUEST64_CR: if (remote_id == ndlp->nlp_DID) return 1; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 90021653e59e..2400602a8561 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1907,6 +1907,9 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, uint32_t intr_mode; LPFC_MBOXQ_t *mboxq; + /* Notifying the transport that the targets are going offline. */ + lpfc_scsi_dev_block(phba); + if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= LPFC_SLI_INTF_IF_TYPE_2) { /* diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index b1adb9f59097..a6647dd360d1 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -2508,7 +2508,10 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) "6031 RemotePort Registration failed " "err: %d, DID x%06x ref %u\n", ret, ndlp->nlp_DID, kref_read(&ndlp->kref)); - lpfc_nlp_put(ndlp); + + /* Only release reference if one was taken for this request */ + if (!oldrport) + lpfc_nlp_put(ndlp); } return ret; @@ -2614,7 +2617,8 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * clear any rport state until the transport calls back. */ - if (ndlp->nlp_type & NLP_NVME_TARGET) { + if ((ndlp->nlp_type & NLP_NVME_TARGET) || + (remoteport->port_role & FC_PORT_ROLE_NVME_TARGET)) { /* No concern about the role change on the nvme remoteport. * The transport will update it. */ diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 6574f9e74476..2ebb073e4ef3 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -3926,12 +3926,19 @@ void lpfc_poll_eratt(struct timer_list *t) uint64_t sli_intr, cnt; phba = from_timer(phba, t, eratt_poll); - if (!test_bit(HBA_SETUP, &phba->hba_flag)) - return; if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) return; + if (phba->sli_rev == LPFC_SLI_REV4 && + !test_bit(HBA_SETUP, &phba->hba_flag)) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0663 HBA still initializing 0x%lx, restart " + "timer\n", + phba->hba_flag); + goto restart_timer; + } + /* Here we will also keep track of interrupts per sec of the hba */ sli_intr = phba->sli.slistat.sli_intr; @@ -3950,13 +3957,16 @@ void lpfc_poll_eratt(struct timer_list *t) /* Check chip HA register for error event */ eratt = lpfc_sli_check_eratt(phba); - if (eratt) + if (eratt) { /* Tell the worker thread there is work to do */ lpfc_worker_wake_up(phba); - else - /* Restart the timer for next eratt poll */ - mod_timer(&phba->eratt_poll, - jiffies + secs_to_jiffies(phba->eratt_poll_interval)); + return; + } + +restart_timer: + /* Restart the timer for next eratt poll */ + mod_timer(&phba->eratt_poll, + jiffies + secs_to_jiffies(phba->eratt_poll_interval)); return; } @@ -6003,9 +6013,9 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr); phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr); - memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion)); - strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str, + memcpy(phba->BIOSVersion, cntl_attr->bios_ver_str, sizeof(phba->BIOSVersion)); + phba->BIOSVersion[sizeof(phba->BIOSVersion) - 1] = '\0'; lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, " diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 638b50f35287..749688aa8a82 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "14.4.0.8" +#define LPFC_DRIVER_VERSION "14.4.0.9" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index cc56a7334319..2797aa75a689 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -505,7 +505,7 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) wait_event_timeout(waitq, !test_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags), - msecs_to_jiffies(phba->fc_ratov * 2000)); + secs_to_jiffies(phba->fc_ratov * 2)); if (!test_bit(NLP_WAIT_FOR_LOGO, &ndlp->save_flags)) goto logo_cmpl; @@ -703,7 +703,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) wait_event_timeout(waitq, !test_bit(NLP_WAIT_FOR_DA_ID, &ndlp->save_flags), - msecs_to_jiffies(phba->fc_ratov * 2000)); + secs_to_jiffies(phba->fc_ratov * 2)); } lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS, diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index c186b892150f..ce444efd859e 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -985,6 +985,10 @@ static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc, goto out; } } + dprint_event_bh(mrioc, + "exposed target device with handle(0x%04x), perst_id(%d)\n", + tgtdev->dev_handle, perst_id); + goto out; } else mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev); out: @@ -1344,9 +1348,9 @@ static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, (struct mpi3_event_data_device_status_change *)fwevt->event_data; dev_handle = le16_to_cpu(evtdata->dev_handle); - ioc_info(mrioc, - "%s :device status change: handle(0x%04x): reason code(0x%x)\n", - __func__, dev_handle, evtdata->reason_code); + dprint_event_bh(mrioc, + "processing device status change event bottom half for handle(0x%04x), rc(0x%02x)\n", + dev_handle, evtdata->reason_code); switch (evtdata->reason_code) { case MPI3_EVENT_DEV_STAT_RC_HIDDEN: delete = 1; @@ -1365,8 +1369,13 @@ static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, } tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); - if (!tgtdev) + if (!tgtdev) { + dprint_event_bh(mrioc, + "processing device status change event bottom half,\n" + "cannot identify target device for handle(0x%04x), rc(0x%02x)\n", + dev_handle, evtdata->reason_code); goto out; + } if (uhide) { tgtdev->is_hidden = 0; if (!tgtdev->host_exposed) @@ -1406,12 +1415,17 @@ static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc, perst_id = le16_to_cpu(dev_pg0->persistent_id); dev_handle = le16_to_cpu(dev_pg0->dev_handle); - ioc_info(mrioc, - "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n", - __func__, dev_handle, perst_id); + dprint_event_bh(mrioc, + "processing device info change event bottom half for handle(0x%04x), perst_id(%d)\n", + dev_handle, perst_id); tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); - if (!tgtdev) + if (!tgtdev) { + dprint_event_bh(mrioc, + "cannot identify target device for device info\n" + "change event handle(0x%04x), perst_id(%d)\n", + dev_handle, perst_id); goto out; + } mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false); if (!tgtdev->is_hidden && !tgtdev->host_exposed) mpi3mr_report_tgtdev_to_host(mrioc, perst_id); @@ -2012,8 +2026,11 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, mpi3mr_fwevt_del_from_list(mrioc, fwevt); mrioc->current_event = fwevt; - if (mrioc->stop_drv_processing) + if (mrioc->stop_drv_processing) { + dprint_event_bh(mrioc, "ignoring event(0x%02x) in the bottom half handler\n" + "due to stop_drv_processing\n", fwevt->event_id); goto out; + } if (mrioc->unrecoverable) { dprint_event_bh(mrioc, @@ -2025,6 +2042,9 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, if (!fwevt->process_evt) goto evt_ack; + dprint_event_bh(mrioc, "processing event(0x%02x) in the bottom half handler\n", + fwevt->event_id); + switch (fwevt->event_id) { case MPI3_EVENT_DEVICE_ADDED: { @@ -2763,6 +2783,9 @@ static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, goto out; dev_handle = le16_to_cpu(evtdata->dev_handle); + dprint_event_th(mrioc, + "device status change event top half with rc(0x%02x) for handle(0x%04x)\n", + evtdata->reason_code, dev_handle); switch (evtdata->reason_code) { case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT: @@ -2786,8 +2809,12 @@ static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, } tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); - if (!tgtdev) + if (!tgtdev) { + dprint_event_th(mrioc, + "processing device status change event could not identify device for handle(0x%04x)\n", + dev_handle); goto out; + } if (hide) tgtdev->is_hidden = hide; if (tgtdev->starget && tgtdev->starget->hostdata) { @@ -2863,13 +2890,13 @@ static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc, u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout); if (shutdown_timeout <= 0) { - ioc_warn(mrioc, + dprint_event_th(mrioc, "%s :Invalid Shutdown Timeout received = %d\n", __func__, shutdown_timeout); return; } - ioc_info(mrioc, + dprint_event_th(mrioc, "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n", __func__, mrioc->facts.shutdown_timeout, shutdown_timeout); mrioc->facts.shutdown_timeout = shutdown_timeout; @@ -2945,9 +2972,9 @@ void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc) * @mrioc: Adapter instance reference * @event_reply: event data * - * Identify whteher the event has to handled and acknowledged - * and either process the event in the tophalf and/or schedule a - * bottom half through mpi3mr_fwevt_worker. + * Identifies whether the event has to be handled and acknowledged, + * and either processes the event in the top-half and/or schedule a + * bottom-half through mpi3mr_fwevt_worker(). * * Return: Nothing */ @@ -2974,9 +3001,11 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, struct mpi3_device_page0 *dev_pg0 = (struct mpi3_device_page0 *)event_reply->event_data; if (mpi3mr_create_tgtdev(mrioc, dev_pg0)) - ioc_err(mrioc, - "%s :Failed to add device in the device add event\n", - __func__); + dprint_event_th(mrioc, + "failed to process device added event for handle(0x%04x),\n" + "perst_id(%d) in the event top half handler\n", + le16_to_cpu(dev_pg0->dev_handle), + le16_to_cpu(dev_pg0->persistent_id)); else process_evt_bh = 1; break; @@ -3039,11 +3068,15 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, break; } if (process_evt_bh || ack_req) { + dprint_event_th(mrioc, + "scheduling bottom half handler for event(0x%02x),ack_required=%d\n", + evt_type, ack_req); sz = event_reply->event_data_length * 4; fwevt = mpi3mr_alloc_fwevt(sz); if (!fwevt) { - ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n", - __func__, __FILE__, __LINE__, __func__); + dprint_event_th(mrioc, + "failed to schedule bottom half handler for\n" + "event(0x%02x), ack_required=%d\n", evt_type, ack_req); return; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 063b10dd8251..02fc204b9bf7 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -2869,8 +2869,9 @@ _ctl_get_mpt_mctp_passthru_adapter(int dev_index) if (ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU) { if (count == dev_index) { spin_unlock(&gioc_lock); - return 0; + return ioc; } + count++; } } spin_unlock(&gioc_lock); diff --git a/drivers/scsi/mvsas/mv_64xx.h b/drivers/scsi/mvsas/mv_64xx.h index c25a5dfe7889..749f616b21af 100644 --- a/drivers/scsi/mvsas/mv_64xx.h +++ b/drivers/scsi/mvsas/mv_64xx.h @@ -101,8 +101,8 @@ enum sas_sata_vsp_regs { VSR_PHY_MODE9 = 0x09, /* Test */ VSR_PHY_MODE10 = 0x0A, /* Power */ VSR_PHY_MODE11 = 0x0B, /* Phy Mode */ - VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */ - VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */ + VSR_PHY_VS0 = 0x0C, /* Vendor Specific 0 */ + VSR_PHY_VS1 = 0x0D, /* Vendor Specific 1 */ }; enum chip_register_bits { diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index 85ff95c6543a..7618f9cc9986 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -644,7 +644,7 @@ static DEVICE_ATTR(gsm_log, S_IRUGO, pm8001_ctl_gsm_log_show, NULL); #define FLASH_CMD_SET_NVMD 0x02 struct flash_command { - u8 command[8]; + u8 command[8] __nonstring; int code; }; diff --git a/drivers/scsi/qedi/qedi_dbg.c b/drivers/scsi/qedi/qedi_dbg.c index 2ebef4d20b5b..2f3e044b818f 100644 --- a/drivers/scsi/qedi/qedi_dbg.c +++ b/drivers/scsi/qedi/qedi_dbg.c @@ -103,25 +103,3 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line, ret: va_end(va); } - -int -qedi_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter) -{ - int ret = 0; - - for (; iter->name; iter++) { - ret = sysfs_create_bin_file(&shost->shost_gendev.kobj, - iter->attr); - if (ret) - pr_err("Unable to create sysfs %s attr, err(%d).\n", - iter->name, ret); - } - return ret; -} - -void -qedi_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter) -{ - for (; iter->name; iter++) - sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr); -} diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h index 5a1ec4542183..864951865869 100644 --- a/drivers/scsi/qedi/qedi_dbg.h +++ b/drivers/scsi/qedi/qedi_dbg.h @@ -87,18 +87,6 @@ void qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line, void qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line, u32 info, const char *fmt, ...); -struct Scsi_Host; - -struct sysfs_bin_attrs { - char *name; - const struct bin_attribute *attr; -}; - -int qedi_create_sysfs_attr(struct Scsi_Host *shost, - struct sysfs_bin_attrs *iter); -void qedi_remove_sysfs_attr(struct Scsi_Host *shost, - struct sysfs_bin_attrs *iter); - /* DebugFS related code */ struct qedi_list_of_funcs { char *oper_str; diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h index 772218445a56..5e10441f2e22 100644 --- a/drivers/scsi/qedi/qedi_gbl.h +++ b/drivers/scsi/qedi/qedi_gbl.h @@ -45,7 +45,6 @@ int qedi_iscsi_cleanup_task(struct iscsi_task *task, void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd); void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt, struct qedi_cmd *qedi_cmd); -void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt); void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid); void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct iscsi_eqe_data *data); diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index e87885cc701c..b168bb2178e9 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -1877,14 +1877,6 @@ void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, s16 *tid) WARN_ON(1); } -void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt) -{ - *proto_itt = qedi->itt_map[tid].itt; - QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, - "Get itt map tid [0x%x with proto itt[0x%x]", - tid, *proto_itt); -} - struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid) { struct qedi_cmd *cmd = NULL; diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 691ef827a5ab..5136549005e7 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -2706,59 +2706,6 @@ ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf, } /* - * This function is for formatting and logging log messages. - * It is to be used when vha is available. It formats the message - * and logs it to the messages file. All the messages will be logged - * irrespective of value of ql2xextended_error_logging. - * parameters: - * level: The level of the log messages to be printed in the - * messages file. - * vha: Pointer to the scsi_qla_host_t - * id: This is a unique id for the level. It identifies the - * part of the code from where the message originated. - * msg: The message to be displayed. - */ -void -ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id, - const char *fmt, ...) -{ - va_list va; - struct va_format vaf; - char pbuf[128]; - - if (level > ql_errlev) - return; - - ql_ktrace(0, level, pbuf, NULL, qpair ? qpair->vha : NULL, id, fmt); - - if (!pbuf[0]) /* set by ql_ktrace */ - ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, - qpair ? qpair->vha : NULL, id); - - va_start(va, fmt); - - vaf.fmt = fmt; - vaf.va = &va; - - switch (level) { - case ql_log_fatal: /* FATAL LOG */ - pr_crit("%s%pV", pbuf, &vaf); - break; - case ql_log_warn: - pr_err("%s%pV", pbuf, &vaf); - break; - case ql_log_info: - pr_warn("%s%pV", pbuf, &vaf); - break; - default: - pr_info("%s%pV", pbuf, &vaf); - break; - } - - va_end(va); -} - -/* * This function is for formatting and logging debug information. * It is to be used when vha is available. It formats the message * and logs it to the messages file. diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index 54f0a412226f..5f4a8c9ae6ba 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -334,9 +334,6 @@ ql_log(uint, scsi_qla_host_t *vha, uint, const char *fmt, ...); void __attribute__((format (printf, 4, 5))) ql_log_pci(uint, struct pci_dev *pdev, uint, const char *fmt, ...); -void __attribute__((format (printf, 4, 5))) -ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...); - /* Debug Levels */ /* The 0x40000000 is the max value any debug level can have * as ql2xextended_error_logging is of type signed int diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index e556f57c91af..03e50e8fc08d 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -164,10 +164,8 @@ extern int ql2xsmartsan; extern int ql2xallocfwdump; extern int ql2xextended_error_logging; extern int ql2xextended_error_logging_ktrace; -extern int ql2xiidmaenable; extern int ql2xmqsupport; extern int ql2xfwloadbin; -extern int ql2xetsenable; extern int ql2xshiftctondsd; extern int ql2xdbwr; extern int ql2xasynctmfenable; @@ -720,7 +718,6 @@ extern void *qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t); extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t); extern int qla2x00_fdmi_register(scsi_qla_host_t *); extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *); -extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *); extern size_t qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t); extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *, struct ct_sns_rsp *, const char *); @@ -822,7 +819,6 @@ extern int qlafx00_rescan_isp(scsi_qla_host_t *); /* PCI related functions */ extern int qla82xx_pci_config(struct scsi_qla_host *); extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int); -extern int qla82xx_pci_region_offset(struct pci_dev *, int); extern int qla82xx_iospace_config(struct qla_hw_data *); /* Initialization related functions */ @@ -866,7 +862,6 @@ extern int qla82xx_rd_32(struct qla_hw_data *, ulong); /* ISP 8021 IDC */ extern void qla82xx_clear_drv_active(struct qla_hw_data *); -extern uint32_t qla82xx_wait_for_state_change(scsi_qla_host_t *, uint32_t); extern int qla82xx_idc_lock(struct qla_hw_data *); extern void qla82xx_idc_unlock(struct qla_hw_data *); extern int qla82xx_device_state_handler(scsi_qla_host_t *); diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index d2bddca7045a..51c7cea71f90 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -2626,96 +2626,6 @@ qla2x00_port_speed_capability(uint16_t speed) } /** - * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query. - * @vha: HA context - * @list: switch info entries to populate - * - * Returns 0 on success. - */ -int -qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) -{ - int rval; - uint16_t i; - struct qla_hw_data *ha = vha->hw; - ms_iocb_entry_t *ms_pkt; - struct ct_sns_req *ct_req; - struct ct_sns_rsp *ct_rsp; - struct ct_arg arg; - - if (!IS_IIDMA_CAPABLE(ha)) - return QLA_FUNCTION_FAILED; - if (!ha->flags.gpsc_supported) - return QLA_FUNCTION_FAILED; - - rval = qla2x00_mgmt_svr_login(vha); - if (rval) - return rval; - - arg.iocb = ha->ms_iocb; - arg.req_dma = ha->ct_sns_dma; - arg.rsp_dma = ha->ct_sns_dma; - arg.req_size = GPSC_REQ_SIZE; - arg.rsp_size = GPSC_RSP_SIZE; - arg.nport_handle = vha->mgmt_svr_loop_id; - - for (i = 0; i < ha->max_fibre_devices; i++) { - /* Issue GFPN_ID */ - /* Prepare common MS IOCB */ - ms_pkt = qla24xx_prep_ms_iocb(vha, &arg); - - /* Prepare CT request */ - ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD, - GPSC_RSP_SIZE); - ct_rsp = &ha->ct_sns->p.rsp; - - /* Prepare CT arguments -- port_name */ - memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name, - WWN_SIZE); - - /* Execute MS IOCB */ - rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, - sizeof(ms_iocb_entry_t)); - if (rval != QLA_SUCCESS) { - /*EMPTY*/ - ql_dbg(ql_dbg_disc, vha, 0x2059, - "GPSC issue IOCB failed (%d).\n", rval); - } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, - "GPSC")) != QLA_SUCCESS) { - /* FM command unsupported? */ - if (rval == QLA_INVALID_COMMAND && - (ct_rsp->header.reason_code == - CT_REASON_INVALID_COMMAND_CODE || - ct_rsp->header.reason_code == - CT_REASON_COMMAND_UNSUPPORTED)) { - ql_dbg(ql_dbg_disc, vha, 0x205a, - "GPSC command unsupported, disabling " - "query.\n"); - ha->flags.gpsc_supported = 0; - rval = QLA_FUNCTION_FAILED; - break; - } - rval = QLA_FUNCTION_FAILED; - } else { - list->fp_speed = qla2x00_port_speed_capability( - be16_to_cpu(ct_rsp->rsp.gpsc.speed)); - ql_dbg(ql_dbg_disc, vha, 0x205b, - "GPSC ext entry - fpn " - "%8phN speeds=%04x speed=%04x.\n", - list[i].fabric_port_name, - be16_to_cpu(ct_rsp->rsp.gpsc.speeds), - be16_to_cpu(ct_rsp->rsp.gpsc.speed)); - } - - /* Last device exit. */ - if (list[i].d_id.b.rsvd_1 != 0) - break; - } - - return (rval); -} - -/** * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query. * * @vha: HA context diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 6dfb70edb9a6..0cd3db8ed4ef 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -1099,11 +1099,6 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha) unsigned offset, n; struct qla_hw_data *ha = vha->hw; - struct crb_addr_pair { - long addr; - long data; - }; - /* Halt all the individual PEGs and other blocks of the ISP */ qla82xx_rom_lock(ha); @@ -1595,25 +1590,6 @@ qla82xx_get_fw_offs(struct qla_hw_data *ha) return (u8 *)&ha->hablob->fw->data[offset]; } -/* PCI related functions */ -int qla82xx_pci_region_offset(struct pci_dev *pdev, int region) -{ - unsigned long val = 0; - u32 control; - - switch (region) { - case 0: - val = 0; - break; - case 1: - pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control); - val = control + QLA82XX_MSIX_TBL_SPACE; - break; - } - return val; -} - - int qla82xx_iospace_config(struct qla_hw_data *ha) { @@ -2934,32 +2910,6 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha) } } -/* -* qla82xx_wait_for_state_change -* Wait for device state to change from given current state -* -* Note: -* IDC lock must not be held upon entry -* -* Return: -* Changed device state. -*/ -uint32_t -qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state) -{ - struct qla_hw_data *ha = vha->hw; - uint32_t dev_state; - - do { - msleep(1000); - qla82xx_idc_lock(ha); - dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); - qla82xx_idc_unlock(ha); - } while (dev_state == curr_state); - - return dev_state; -} - void qla8xxx_dev_failed_handler(scsi_qla_host_t *vha) { diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index b44d134e7105..288ce04fc2b1 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -176,12 +176,6 @@ MODULE_PARM_DESC(ql2xenablehba_err_chk, " 1 -- Error isolation enabled only for DIX Type 0\n" " 2 -- Error isolation enabled for all Types\n"); -int ql2xiidmaenable = 1; -module_param(ql2xiidmaenable, int, S_IRUGO); -MODULE_PARM_DESC(ql2xiidmaenable, - "Enables iIDMA settings " - "Default is 1 - perform iIDMA. 0 - no iIDMA."); - int ql2xmqsupport = 1; module_param(ql2xmqsupport, int, S_IRUGO); MODULE_PARM_DESC(ql2xmqsupport, @@ -199,12 +193,6 @@ MODULE_PARM_DESC(ql2xfwloadbin, " 1 -- load firmware from flash.\n" " 0 -- use default semantics.\n"); -int ql2xetsenable; -module_param(ql2xetsenable, int, S_IRUGO); -MODULE_PARM_DESC(ql2xetsenable, - "Enables firmware ETS burst." - "Default is 0 - skip ETS enablement."); - int ql2xdbwr = 1; module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xdbwr, diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 11eadb3bd36e..1e81582085e3 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1454,50 +1454,6 @@ static struct fc_port *qlt_create_sess( return sess; } -/* - * max_gen - specifies maximum session generation - * at which this deletion requestion is still valid - */ -void -qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) -{ - struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct fc_port *sess = fcport; - unsigned long flags; - - if (!vha->hw->tgt.tgt_ops) - return; - - if (!tgt) - return; - - spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); - if (tgt->tgt_stop) { - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - return; - } - if (!sess->se_sess) { - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - return; - } - - if (max_gen - sess->generation < 0) { - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092, - "Ignoring stale deletion request for se_sess %p / sess %p" - " for port %8phC, req_gen %d, sess_gen %d\n", - sess->se_sess, sess, sess->port_name, max_gen, - sess->generation); - return; - } - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); - - sess->local = 1; - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - qlt_schedule_sess_for_deletion(sess); -} - static inline int test_tgt_sess_count(struct qla_tgt *tgt) { struct qla_hw_data *ha = tgt->ha; @@ -5539,81 +5495,6 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); } -int -qlt_free_qfull_cmds(struct qla_qpair *qpair) -{ - struct scsi_qla_host *vha = qpair->vha; - struct qla_hw_data *ha = vha->hw; - unsigned long flags; - struct qla_tgt_cmd *cmd, *tcmd; - struct list_head free_list, q_full_list; - int rc = 0; - - if (list_empty(&ha->tgt.q_full_list)) - return 0; - - INIT_LIST_HEAD(&free_list); - INIT_LIST_HEAD(&q_full_list); - - spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); - if (list_empty(&ha->tgt.q_full_list)) { - spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); - return 0; - } - - list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list); - spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); - - spin_lock_irqsave(qpair->qp_lock_ptr, flags); - list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) { - if (cmd->q_full) - /* cmd->state is a borrowed field to hold status */ - rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state); - else if (cmd->term_exchg) - rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio); - - if (rc == -ENOMEM) - break; - - if (cmd->q_full) - ql_dbg(ql_dbg_io, vha, 0x3006, - "%s: busy sent for ox_id[%04x]\n", __func__, - be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); - else if (cmd->term_exchg) - ql_dbg(ql_dbg_io, vha, 0x3007, - "%s: Term exchg sent for ox_id[%04x]\n", __func__, - be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); - else - ql_dbg(ql_dbg_io, vha, 0x3008, - "%s: Unexpected cmd in QFull list %p\n", __func__, - cmd); - - list_move_tail(&cmd->cmd_list, &free_list); - - /* piggy back on hardware_lock for protection */ - vha->hw->tgt.num_qfull_cmds_alloc--; - } - spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); - - cmd = NULL; - - list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { - list_del(&cmd->cmd_list); - /* This cmd was never sent to TCM. There is no need - * to schedule free or call free_cmd - */ - qlt_free_cmd(cmd); - } - - if (!list_empty(&q_full_list)) { - spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); - list_splice(&q_full_list, &vha->hw->tgt.q_full_list); - spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); - } - - return rc; -} - static void qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio, uint16_t status) @@ -7091,16 +6972,6 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, } void -qlt_83xx_iospace_config(struct qla_hw_data *ha) -{ - if (!QLA_TGT_MODE_ENABLED()) - return; - - ha->msix_count += 1; /* For ATIO Q */ -} - - -void qlt_modify_vp_config(struct scsi_qla_host *vha, struct vp_config_entry_24xx *vpmod) { diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 354fca2e7feb..15a59c125c53 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -1014,7 +1014,6 @@ extern int qlt_lport_register(void *, u64, u64, u64, extern void qlt_lport_deregister(struct scsi_qla_host *); extern void qlt_unreg_sess(struct fc_port *); extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); -extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int); extern int __init qlt_init(void); extern void qlt_exit(void); extern void qlt_free_session_done(struct work_struct *); @@ -1082,8 +1081,6 @@ extern void qlt_mem_free(struct qla_hw_data *); extern int qlt_stop_phase1(struct qla_tgt *); extern void qlt_stop_phase2(struct qla_tgt *); extern irqreturn_t qla83xx_msix_atio_q(int, void *); -extern void qlt_83xx_iospace_config(struct qla_hw_data *); -extern int qlt_free_qfull_cmds(struct qla_qpair *); extern void qlt_logo_completion_handler(fc_port_t *, int); extern void qlt_do_generation_tick(struct scsi_qla_host *, int *); diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index 47adff9f0506..da2fc66ffedd 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -973,11 +973,6 @@ qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose) unsigned long off; unsigned offset, n; - struct crb_addr_pair { - long addr; - long data; - }; - /* Halt all the indiviual PEGs and other blocks of the ISP */ qla4_82xx_rom_lock(ha); diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index f0eec4708ddd..aef33d1e346a 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -162,7 +162,7 @@ static const char *sdebug_version_date = "20210520"; #define DEF_VPD_USE_HOSTNO 1 #define DEF_WRITESAME_LENGTH 0xFFFF #define DEF_ATOMIC_WR 0 -#define DEF_ATOMIC_WR_MAX_LENGTH 8192 +#define DEF_ATOMIC_WR_MAX_LENGTH 128 #define DEF_ATOMIC_WR_ALIGN 2 #define DEF_ATOMIC_WR_GRAN 2 #define DEF_ATOMIC_WR_MAX_LENGTH_BNDRY (DEF_ATOMIC_WR_MAX_LENGTH) @@ -294,6 +294,14 @@ struct tape_block { #define FF_SA (F_SA_HIGH | F_SA_LOW) #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY) +/* Device selection bit mask */ +#define DS_ALL 0xffffffff +#define DS_SBC (1 << TYPE_DISK) +#define DS_SSC (1 << TYPE_TAPE) +#define DS_ZBC (1 << TYPE_ZBC) + +#define DS_NO_SSC (DS_ALL & ~DS_SSC) + #define SDEBUG_MAX_PARTS 4 #define SDEBUG_MAX_CMD_LEN 32 @@ -472,6 +480,7 @@ struct opcode_info_t { /* for terminating element */ u8 opcode; /* if num_attached > 0, preferred */ u16 sa; /* service action */ + u32 devsel; /* device type mask for this definition */ u32 flags; /* OR-ed set of SDEB_F_* */ int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); const struct opcode_info_t *arrp; /* num_attached elements or NULL */ @@ -519,7 +528,8 @@ enum sdeb_opcode_index { SDEB_I_WRITE_FILEMARKS = 35, SDEB_I_SPACE = 36, SDEB_I_FORMAT_MEDIUM = 37, - SDEB_I_LAST_ELEM_P1 = 38, /* keep this last (previous + 1) */ + SDEB_I_ERASE = 38, + SDEB_I_LAST_ELEM_P1 = 39, /* keep this last (previous + 1) */ }; @@ -530,7 +540,7 @@ static const unsigned char opcode_ind_arr[256] = { SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0, SDEB_I_WRITE_FILEMARKS, SDEB_I_SPACE, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE, SDEB_I_RELEASE, - 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG, + 0, SDEB_I_ERASE, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG, SDEB_I_ALLOW_REMOVAL, 0, /* 0x20; 0x20->0x3f: 10 byte cdbs */ 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0, @@ -585,7 +595,9 @@ static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_read_tape(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_write_tape(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *); @@ -613,8 +625,10 @@ static int resp_read_blklimits(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_locate(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_write_filemarks(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_space(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_read_position(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_rewind(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_format_medium(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_erase(struct scsi_cmnd *, struct sdebug_dev_info *); static int sdebug_do_add_host(bool mk_new_store); static int sdebug_add_host_helper(int per_host_idx); @@ -629,113 +643,121 @@ static void sdebug_erase_all_stores(bool apart_from_first); * should be placed in opcode_info_arr[], the others should be placed here. */ static const struct opcode_info_t msense_iarr[] = { - {0, 0x1a, 0, F_D_IN, NULL, NULL, + {0, 0x1a, 0, DS_ALL, F_D_IN, NULL, NULL, {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, }; static const struct opcode_info_t mselect_iarr[] = { - {0, 0x15, 0, F_D_OUT, NULL, NULL, + {0, 0x15, 0, DS_ALL, F_D_OUT, NULL, NULL, {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, }; static const struct opcode_info_t read_iarr[] = { - {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */ + {0, 0x28, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */ + {0, 0x8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) disk */ {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */ + {0, 0x8, 0, DS_SSC, F_D_IN | FF_MEDIA_IO, resp_read_tape, NULL, /* READ(6) tape */ + {6, 0x03, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0xa8, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */ {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7, 0, 0, 0, 0} }, }; static const struct opcode_info_t write_iarr[] = { - {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */ + {0, 0x2a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */ NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */ + {0, 0xa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) disk */ NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */ + {0, 0xa, 0, DS_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_tape, /* WRITE(6) tape */ + NULL, {6, 0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0} }, + {0, 0xaa, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */ NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7, 0, 0, 0, 0} }, }; static const struct opcode_info_t verify_iarr[] = { - {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */ + {0, 0x2f, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */ NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, }; static const struct opcode_info_t sa_in_16_iarr[] = { - {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL, + {0, 0x9e, 0x12, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL, {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */ - {0, 0x9e, 0x16, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL, + {0, 0x9e, 0x16, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL, {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0} }, /* GET STREAM STATUS */ }; static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */ - {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0, + {0, 0x7f, 0xb, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0, NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa, 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */ - {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat, + {0, 0x7f, 0x11, DS_NO_SSC, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat, NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8, 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */ }; static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */ - {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL, + {0, 0xa3, 0xc, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL, {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */ - {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL, + {0, 0xa3, 0xd, DS_ALL, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL, {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */ }; static const struct opcode_info_t write_same_iarr[] = { - {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL, + {0, 0x93, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL, {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */ }; static const struct opcode_info_t reserve_iarr[] = { - {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */ + {0, 0x16, 0, DS_ALL, F_D_OUT, NULL, NULL, /* RESERVE(6) */ {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, }; static const struct opcode_info_t release_iarr[] = { - {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */ + {0, 0x17, 0, DS_ALL, F_D_OUT, NULL, NULL, /* RELEASE(6) */ {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, }; static const struct opcode_info_t sync_cache_iarr[] = { - {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL, + {0, 0x91, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL, {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */ }; static const struct opcode_info_t pre_fetch_iarr[] = { - {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL, + {0, 0x90, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL, {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */ + {0, 0x34, 0, DS_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_read_position, NULL, + {10, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc7, 0, 0, + 0, 0, 0, 0} }, /* READ POSITION (10) */ }; static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */ - {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL, + {0, 0x94, 0x1, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL, {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */ - {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL, + {0, 0x94, 0x2, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL, {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */ - {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL, + {0, 0x94, 0x4, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL, {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */ }; static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */ - {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL, + {0, 0x95, 0x6, DS_NO_SSC, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL, {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */ }; @@ -746,130 +768,132 @@ static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */ * REPORT SUPPORTED OPERATION CODES. */ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = { /* 0 */ - {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */ + {0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */ + {0, 0x12, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */ {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL, + {0, 0xa0, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_report_luns, NULL, {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0, 0} }, /* REPORT LUNS */ - {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL, + {0, 0x3, 0, DS_ALL, FF_RESPOND | F_D_IN, resp_requests, NULL, {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */ + {0, 0x0, 0, DS_ALL, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */ {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, /* 5 */ - {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */ + {ARRAY_SIZE(msense_iarr), 0x5a, 0, DS_ALL, F_D_IN, /* MODE SENSE(10) */ resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */ + {ARRAY_SIZE(mselect_iarr), 0x55, 0, DS_ALL, F_D_OUT, /* MODE SELECT(10) */ resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */ + {0, 0x4d, 0, DS_NO_SSC, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */ {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */ + {0, 0x25, 0, DS_NO_SSC, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */ {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0, 0, 0} }, - {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */ + {ARRAY_SIZE(read_iarr), 0x88, 0, DS_NO_SSC, F_D_IN | FF_MEDIA_IO, /* READ(16) */ resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* 10 */ - {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO, + {ARRAY_SIZE(write_iarr), 0x8a, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, write_iarr, /* WRITE(16) */ {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, - {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */ + {0, 0x1b, 0, DS_ALL, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */ {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN, + {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, DS_NO_SSC, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */ {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} }, - {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat, + {0, 0x9f, 0x12, DS_NO_SSC, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat, NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */ - {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN, + {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, DS_ALL, F_SA_LOW | F_D_IN, resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */ maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0, 0} }, /* 15 */ - {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */ + {0, 0, 0, DS_ALL, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {ARRAY_SIZE(verify_iarr), 0x8f, 0, + {ARRAY_SIZE(verify_iarr), 0x8f, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */ verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, - {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO, + {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, DS_NO_SSC, F_SA_HIGH | F_D_IN | FF_MEDIA_IO, resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */ {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff, 0xff, 0xff} }, - {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT, + {ARRAY_SIZE(reserve_iarr), 0x56, 0, DS_ALL, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) <no response function> */ {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT, + {ARRAY_SIZE(release_iarr), 0x57, 0, DS_ALL, F_D_OUT, NULL, release_iarr, /* RELEASE(10) <no response function> */ {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, /* 20 */ - {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */ + {0, 0x1e, 0, DS_ALL, 0, NULL, NULL, /* ALLOW REMOVAL */ {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x1, 0, 0, resp_rewind, NULL, + {0, 0x1, 0, DS_SSC, 0, resp_rewind, NULL, {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */ + {0, 0, 0, DS_NO_SSC, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */ + {0, 0x1d, 0, DS_ALL, F_D_OUT, NULL, NULL, /* SEND DIAGNOSTIC */ {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */ + {0, 0x42, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */ {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, /* 25 */ - {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL, + {0, 0x3b, 0, DS_NO_SSC, F_D_OUT_MAYBE, resp_write_buffer, NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, /* WRITE_BUFFER */ - {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, + {ARRAY_SIZE(write_same_iarr), 0x41, 0, DS_NO_SSC, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS, + {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, DS_NO_SSC, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, sync_cache_iarr, {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, /* SYNC_CACHE (10) */ - {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL, + {0, 0x89, 0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL, {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */ - {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO, + {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, DS_NO_SSC, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, pre_fetch_iarr, {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, /* PRE-FETCH (10) */ /* READ POSITION (10) */ /* 30 */ - {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS, + {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */ {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} }, - {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS, + {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, DS_NO_SSC, F_SA_LOW | F_M_ACCESS, resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */ {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} }, /* 32 */ - {0, 0x0, 0x0, F_D_OUT | FF_MEDIA_IO, + {0, 0x9c, 0x0, DS_NO_SSC, F_D_OUT | FF_MEDIA_IO, resp_atomic_write, NULL, /* ATOMIC WRITE 16 */ {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} }, - {0, 0x05, 0, F_D_IN, resp_read_blklimits, NULL, /* READ BLOCK LIMITS (6) */ + {0, 0x05, 0, DS_SSC, F_D_IN, resp_read_blklimits, NULL, /* READ BLOCK LIMITS (6) */ {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x2b, 0, F_D_UNKN, resp_locate, NULL, /* LOCATE (10) */ + {0, 0x2b, 0, DS_SSC, F_D_UNKN, resp_locate, NULL, /* LOCATE (10) */ {10, 0x07, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, - {0, 0x10, 0, F_D_IN, resp_write_filemarks, NULL, /* WRITE FILEMARKS (6) */ + {0, 0x10, 0, DS_SSC, F_D_IN, resp_write_filemarks, NULL, /* WRITE FILEMARKS (6) */ {6, 0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x11, 0, F_D_IN, resp_space, NULL, /* SPACE (6) */ + {0, 0x11, 0, DS_SSC, F_D_IN, resp_space, NULL, /* SPACE (6) */ {6, 0x07, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, - {0, 0x4, 0, 0, resp_format_medium, NULL, /* FORMAT MEDIUM (6) */ + {0, 0x4, 0, DS_SSC, 0, resp_format_medium, NULL, /* FORMAT MEDIUM (6) */ {6, 0x3, 0x7, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, -/* 38 */ + {0, 0x19, 0, DS_SSC, F_D_IN, resp_erase, NULL, /* ERASE (6) */ + {6, 0x03, 0x33, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, +/* 39 */ /* sentinel */ - {0xff, 0, 0, 0, NULL, NULL, /* terminating element */ + {0xff, 0, 0, 0, 0, NULL, NULL, /* terminating element */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, }; @@ -1015,6 +1039,19 @@ static const int condition_met_result = SAM_STAT_CONDITION_MET; static struct dentry *sdebug_debugfs_root; static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain); +static u32 sdebug_get_devsel(struct scsi_device *sdp) +{ + unsigned char devtype = sdp->type; + u32 devsel; + + if (devtype < 32) + devsel = (1 << devtype); + else + devsel = DS_ALL; + + return devsel; +} + static void sdebug_err_free(struct rcu_head *head) { struct sdebug_err_inject *inject = @@ -2032,13 +2069,19 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) unsigned char *cmd = scp->cmnd; u32 alloc_len, n; int ret; - bool have_wlun, is_disk, is_zbc, is_disk_zbc; + bool have_wlun, is_disk, is_zbc, is_disk_zbc, is_tape; alloc_len = get_unaligned_be16(cmd + 3); arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC); if (! arr) return DID_REQUEUE << 16; - is_disk = (sdebug_ptype == TYPE_DISK); + if (scp->device->type >= 32) { + is_disk = (sdebug_ptype == TYPE_DISK); + is_tape = (sdebug_ptype == TYPE_TAPE); + } else { + is_disk = (scp->device->type == TYPE_DISK); + is_tape = (scp->device->type == TYPE_TAPE); + } is_zbc = devip->zoned; is_disk_zbc = (is_disk || is_zbc); have_wlun = scsi_is_wlun(scp->device->lun); @@ -2047,7 +2090,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL)) pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */ else - pq_pdt = (sdebug_ptype & 0x1f); + pq_pdt = ((scp->device->type >= 32 ? + sdebug_ptype : scp->device->type) & 0x1f); arr[0] = pq_pdt; if (0x2 & cmd[1]) { /* CMDDT bit set */ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1); @@ -2170,7 +2214,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) if (is_disk) { /* SBC-4 no version claimed */ put_unaligned_be16(0x600, arr + n); n += 2; - } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */ + } else if (is_tape) { /* SSC-4 rev 3 */ put_unaligned_be16(0x525, arr + n); n += 2; } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */ @@ -2279,7 +2323,7 @@ static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) changing = (stopped_state != want_stop); if (changing) atomic_xchg(&devip->stopped, want_stop); - if (sdebug_ptype == TYPE_TAPE && !want_stop) { + if (scp->device->type == TYPE_TAPE && !want_stop) { int i; set_bit(SDEBUG_UA_NOT_READY_TO_READY, devip->uas_bm); /* not legal! */ @@ -2454,11 +2498,12 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp, u8 reporting_opts, req_opcode, sdeb_i, supp; u16 req_sa, u; u32 alloc_len, a_len; - int k, offset, len, errsts, count, bump, na; + int k, offset, len, errsts, bump, na; const struct opcode_info_t *oip; const struct opcode_info_t *r_oip; u8 *arr; u8 *cmd = scp->cmnd; + u32 devsel = sdebug_get_devsel(scp->device); rctd = !!(cmd[2] & 0x80); reporting_opts = cmd[2] & 0x7; @@ -2481,34 +2526,30 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp, } switch (reporting_opts) { case 0: /* all commands */ - /* count number of commands */ - for (count = 0, oip = opcode_info_arr; - oip->num_attached != 0xff; ++oip) { - if (F_INV_OP & oip->flags) - continue; - count += (oip->num_attached + 1); - } bump = rctd ? 20 : 8; - put_unaligned_be32(count * bump, arr); for (offset = 4, oip = opcode_info_arr; oip->num_attached != 0xff && offset < a_len; ++oip) { if (F_INV_OP & oip->flags) continue; + if ((devsel & oip->devsel) != 0) { + arr[offset] = oip->opcode; + put_unaligned_be16(oip->sa, arr + offset + 2); + if (rctd) + arr[offset + 5] |= 0x2; + if (FF_SA & oip->flags) + arr[offset + 5] |= 0x1; + put_unaligned_be16(oip->len_mask[0], arr + offset + 6); + if (rctd) + put_unaligned_be16(0xa, arr + offset + 8); + offset += bump; + } na = oip->num_attached; - arr[offset] = oip->opcode; - put_unaligned_be16(oip->sa, arr + offset + 2); - if (rctd) - arr[offset + 5] |= 0x2; - if (FF_SA & oip->flags) - arr[offset + 5] |= 0x1; - put_unaligned_be16(oip->len_mask[0], arr + offset + 6); - if (rctd) - put_unaligned_be16(0xa, arr + offset + 8); r_oip = oip; for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) { if (F_INV_OP & oip->flags) continue; - offset += bump; + if ((devsel & oip->devsel) == 0) + continue; arr[offset] = oip->opcode; put_unaligned_be16(oip->sa, arr + offset + 2); if (rctd) @@ -2516,14 +2557,15 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp, if (FF_SA & oip->flags) arr[offset + 5] |= 0x1; put_unaligned_be16(oip->len_mask[0], - arr + offset + 6); + arr + offset + 6); if (rctd) put_unaligned_be16(0xa, arr + offset + 8); + offset += bump; } oip = r_oip; - offset += bump; } + put_unaligned_be32(offset - 4, arr); break; case 1: /* one command: opcode only */ case 2: /* one command: opcode plus service action */ @@ -2549,13 +2591,15 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp, return check_condition_result; } if (0 == (FF_SA & oip->flags) && - req_opcode == oip->opcode) + (devsel & oip->devsel) != 0 && + req_opcode == oip->opcode) supp = 3; else if (0 == (FF_SA & oip->flags)) { na = oip->num_attached; for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) { - if (req_opcode == oip->opcode) + if (req_opcode == oip->opcode && + (devsel & oip->devsel) != 0) break; } supp = (k >= na) ? 1 : 3; @@ -2563,7 +2607,8 @@ static int resp_rsup_opcodes(struct scsi_cmnd *scp, na = oip->num_attached; for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) { - if (req_sa == oip->sa) + if (req_sa == oip->sa && + (devsel & oip->devsel) != 0) break; } supp = (k >= na) ? 1 : 3; @@ -2914,9 +2959,9 @@ static int resp_mode_sense(struct scsi_cmnd *scp, subpcode = cmd[3]; msense_6 = (MODE_SENSE == cmd[0]); llbaa = msense_6 ? false : !!(cmd[1] & 0x10); - is_disk = (sdebug_ptype == TYPE_DISK); + is_disk = (scp->device->type == TYPE_DISK); is_zbc = devip->zoned; - is_tape = (sdebug_ptype == TYPE_TAPE); + is_tape = (scp->device->type == TYPE_TAPE); if ((is_disk || is_zbc || is_tape) && !dbd) bd_len = llbaa ? 16 : 8; else @@ -3131,7 +3176,7 @@ static int resp_mode_select(struct scsi_cmnd *scp, md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2); bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6); off = (mselect6 ? 4 : 8); - if (sdebug_ptype == TYPE_TAPE) { + if (scp->device->type == TYPE_TAPE) { int blksize; if (bd_len != 8) { @@ -3196,7 +3241,7 @@ static int resp_mode_select(struct scsi_cmnd *scp, } break; case 0xf: /* Compression mode page */ - if (sdebug_ptype != TYPE_TAPE) + if (scp->device->type != TYPE_TAPE) goto bad_pcode; if ((arr[off + 2] & 0x40) != 0) { devip->tape_dce = (arr[off + 2] & 0x80) != 0; @@ -3204,7 +3249,7 @@ static int resp_mode_select(struct scsi_cmnd *scp, } break; case 0x11: /* Medium Partition Mode Page (tape) */ - if (sdebug_ptype == TYPE_TAPE) { + if (scp->device->type == TYPE_TAPE) { int fld; fld = process_medium_part_m_pg(devip, &arr[off], pg_len); @@ -3563,6 +3608,30 @@ is_eop: return check_condition_result; } +enum {SDEBUG_READ_POSITION_ARR_SZ = 20}; +static int resp_read_position(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + u8 *cmd = scp->cmnd; + int all_length; + unsigned char arr[20]; + unsigned int pos; + + all_length = get_unaligned_be16(cmd + 7); + if ((cmd[1] & 0xfe) != 0 || + all_length != 0) { /* only short form */ + mk_sense_invalid_fld(scp, SDEB_IN_CDB, + all_length ? 7 : 1, 0); + return check_condition_result; + } + memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ); + arr[1] = devip->tape_partition; + pos = devip->tape_location[devip->tape_partition]; + put_unaligned_be32(pos, arr + 4); + put_unaligned_be32(pos, arr + 8); + return fill_from_dev_buffer(scp, arr, SDEBUG_READ_POSITION_ARR_SZ); +} + static int resp_rewind(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { @@ -3604,10 +3673,6 @@ static int resp_format_medium(struct scsi_cmnd *scp, int res = 0; unsigned char *cmd = scp->cmnd; - if (sdebug_ptype != TYPE_TAPE) { - mk_sense_invalid_fld(scp, SDEB_IN_CDB, 0, -1); - return check_condition_result; - } if (cmd[2] > 2) { mk_sense_invalid_fld(scp, SDEB_IN_DATA, 2, -1); return check_condition_result; @@ -3631,6 +3696,19 @@ static int resp_format_medium(struct scsi_cmnd *scp, return 0; } +static int resp_erase(struct scsi_cmnd *scp, + struct sdebug_dev_info *devip) +{ + int partition = devip->tape_partition; + int pos = devip->tape_location[partition]; + struct tape_block *blp; + + blp = devip->tape_blocks[partition] + pos; + blp->fl_size = TAPE_BLOCK_EOD_FLAG; + + return 0; +} + static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip) { return devip->nr_zones != 0; @@ -4467,9 +4545,6 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) u8 *cmd = scp->cmnd; bool meta_data_locked = false; - if (sdebug_ptype == TYPE_TAPE) - return resp_read_tape(scp, devip); - switch (cmd[0]) { case READ_16: ei_lba = 0; @@ -4839,9 +4914,6 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) u8 *cmd = scp->cmnd; bool meta_data_locked = false; - if (sdebug_ptype == TYPE_TAPE) - return resp_write_tape(scp, devip); - switch (cmd[0]) { case WRITE_16: ei_lba = 0; @@ -5573,7 +5645,6 @@ static int resp_sync_cache(struct scsi_cmnd *scp, * * The pcode 0x34 is also used for READ POSITION by tape devices. */ -enum {SDEBUG_READ_POSITION_ARR_SZ = 20}; static int resp_pre_fetch(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { @@ -5585,31 +5656,6 @@ static int resp_pre_fetch(struct scsi_cmnd *scp, struct sdeb_store_info *sip = devip2sip(devip, true); u8 *fsp = sip->storep; - if (sdebug_ptype == TYPE_TAPE) { - if (cmd[0] == PRE_FETCH) { /* READ POSITION (10) */ - int all_length; - unsigned char arr[20]; - unsigned int pos; - - all_length = get_unaligned_be16(cmd + 7); - if ((cmd[1] & 0xfe) != 0 || - all_length != 0) { /* only short form */ - mk_sense_invalid_fld(scp, SDEB_IN_CDB, - all_length ? 7 : 1, 0); - return check_condition_result; - } - memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ); - arr[1] = devip->tape_partition; - pos = devip->tape_location[devip->tape_partition]; - put_unaligned_be32(pos, arr + 4); - put_unaligned_be32(pos, arr + 8); - return fill_from_dev_buffer(scp, arr, - SDEBUG_READ_POSITION_ARR_SZ); - } - mk_sense_invalid_opcode(scp); - return check_condition_result; - } - if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */ lba = get_unaligned_be32(cmd + 2); nblks = get_unaligned_be16(cmd + 7); @@ -6645,7 +6691,7 @@ static void scsi_debug_sdev_destroy(struct scsi_device *sdp) debugfs_remove(devip->debugfs_entry); - if (sdebug_ptype == TYPE_TAPE) { + if (sdp->type == TYPE_TAPE) { kfree(devip->tape_blocks[0]); devip->tape_blocks[0] = NULL; } @@ -6833,18 +6879,16 @@ static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd) static void scsi_tape_reset_clear(struct sdebug_dev_info *devip) { - if (sdebug_ptype == TYPE_TAPE) { - int i; + int i; - devip->tape_blksize = TAPE_DEF_BLKSIZE; - devip->tape_density = TAPE_DEF_DENSITY; - devip->tape_partition = 0; - devip->tape_dce = 0; - for (i = 0; i < TAPE_MAX_PARTITIONS; i++) - devip->tape_location[i] = 0; - devip->tape_pending_nbr_partitions = -1; - /* Don't reset partitioning? */ - } + devip->tape_blksize = TAPE_DEF_BLKSIZE; + devip->tape_density = TAPE_DEF_DENSITY; + devip->tape_partition = 0; + devip->tape_dce = 0; + for (i = 0; i < TAPE_MAX_PARTITIONS; i++) + devip->tape_location[i] = 0; + devip->tape_pending_nbr_partitions = -1; + /* Don't reset partitioning? */ } static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt) @@ -6862,7 +6906,8 @@ static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt) scsi_debug_stop_all_queued(sdp); if (devip) { set_bit(SDEBUG_UA_POR, devip->uas_bm); - scsi_tape_reset_clear(devip); + if (SCpnt->device->type == TYPE_TAPE) + scsi_tape_reset_clear(devip); } if (sdebug_fail_lun_reset(SCpnt)) { @@ -6901,7 +6946,8 @@ static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt) list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { if (devip->target == sdp->id) { set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); - scsi_tape_reset_clear(devip); + if (SCpnt->device->type == TYPE_TAPE) + scsi_tape_reset_clear(devip); ++k; } } @@ -6933,7 +6979,8 @@ static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt) list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); - scsi_tape_reset_clear(devip); + if (SCpnt->device->type == TYPE_TAPE) + scsi_tape_reset_clear(devip); ++k; } @@ -6957,7 +7004,8 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt) list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); - scsi_tape_reset_clear(devip); + if (SCpnt->device->type == TYPE_TAPE) + scsi_tape_reset_clear(devip); ++k; } } @@ -9173,6 +9221,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost, u32 flags; u16 sa; u8 opcode = cmd[0]; + u32 devsel = sdebug_get_devsel(scp->device); bool has_wlun_rl; bool inject_now; int ret = 0; @@ -9252,12 +9301,14 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost, else sa = get_unaligned_be16(cmd + 8); for (k = 0; k <= na; oip = r_oip->arrp + k++) { - if (opcode == oip->opcode && sa == oip->sa) + if (opcode == oip->opcode && sa == oip->sa && + (devsel & oip->devsel) != 0) break; } } else { /* since no service action only check opcode */ for (k = 0; k <= na; oip = r_oip->arrp + k++) { - if (opcode == oip->opcode) + if (opcode == oip->opcode && + (devsel & oip->devsel) != 0) break; } } diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 90f1393a23f8..a348df895dca 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -486,33 +486,6 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, } /** - * scsi_dev_info_list_del_keyed - remove one dev_info list entry. - * @vendor: vendor string - * @model: model (product) string - * @key: specify list to use - * - * Description: - * Remove and destroy one dev_info entry for @vendor, @model - * in list specified by @key. - * - * Returns: 0 OK, -error on failure. - **/ -int scsi_dev_info_list_del_keyed(char *vendor, char *model, - enum scsi_devinfo_key key) -{ - struct scsi_dev_info_list *found; - - found = scsi_dev_info_list_find(vendor, model, key); - if (IS_ERR(found)) - return PTR_ERR(found); - - list_del(&found->dev_info_list); - kfree(found); - return 0; -} -EXPORT_SYMBOL(scsi_dev_info_list_del_keyed); - -/** * scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list. * @dev_list: string of device flags to add * diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 9fc397a9ce7a..5b2b19f5e8ec 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -79,8 +79,6 @@ extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model, char *strflags, blist_flags_t flags, enum scsi_devinfo_key key); -extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, - enum scsi_devinfo_key key); extern int scsi_dev_info_add_list(enum scsi_devinfo_key key, const char *name); extern int scsi_dev_info_remove_list(enum scsi_devinfo_key key); diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 082f76e76721..6b165a3ec6de 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -3509,7 +3509,7 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles) * state as the LLDD would not have had an rport * reference to pass us. * - * Take no action on the del_timer failure as the state + * Take no action on the timer_delete() failure as the state * machine state change will validate the * transaction. */ diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 950d8c9fb884..3f6e87705b62 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3215,7 +3215,7 @@ static bool sd_is_perm_stream(struct scsi_disk *sdkp, unsigned int stream_id) return false; if (get_unaligned_be32(&buf.h.len) < sizeof(struct scsi_stream_status)) return false; - return buf.h.stream_status[0].perm; + return buf.s.perm; } static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer) diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index effb7e768165..3c02a5f7b5f3 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1658,8 +1658,7 @@ static void register_sg_sysctls(void) static void unregister_sg_sysctls(void) { - if (hdr) - unregister_sysctl_table(hdr); + unregister_sysctl_table(hdr); } #else #define register_sg_sysctls() do { } while (0) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 8a26eca4fdc9..1d784ee7671c 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -33,11 +33,11 @@ #define BUILD_TIMESTAMP #endif -#define DRIVER_VERSION "2.1.30-031" +#define DRIVER_VERSION "2.1.34-035" #define DRIVER_MAJOR 2 #define DRIVER_MINOR 1 -#define DRIVER_RELEASE 30 -#define DRIVER_REVISION 31 +#define DRIVER_RELEASE 34 +#define DRIVER_REVISION 35 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \ DRIVER_VERSION BUILD_TIMESTAMP ")" @@ -68,6 +68,7 @@ static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd) static void pqi_verify_structures(void); static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason); +static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info); static void pqi_ctrl_offline_worker(struct work_struct *work); static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); static void pqi_scan_start(struct Scsi_Host *shost); @@ -2011,18 +2012,31 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, PQI_DEV_INFO_BUFFER_LENGTH - count, "-:-"); - if (pqi_is_logical_device(device)) + if (pqi_is_logical_device(device)) { count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, " %08x%08x", *((u32 *)&device->scsi3addr), *((u32 *)&device->scsi3addr[4])); - else + } else if (ctrl_info->rpl_extended_format_4_5_supported) { + if (device->device_type == SA_DEVICE_TYPE_NVME) + count += scnprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + " %016llx%016llx", + get_unaligned_be64(&device->wwid[0]), + get_unaligned_be64(&device->wwid[8])); + else + count += scnprintf(buffer + count, + PQI_DEV_INFO_BUFFER_LENGTH - count, + " %016llx", + get_unaligned_be64(&device->wwid[0])); + } else { count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, - " %016llx%016llx", - get_unaligned_be64(&device->wwid[0]), - get_unaligned_be64(&device->wwid[8])); + " %016llx", + get_unaligned_be64(&device->wwid[0])); + } + count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, " %s %.8s %.16s ", @@ -5990,7 +6004,7 @@ static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt; pqi_stream_data->last_accessed = jiffies; - per_cpu_ptr(device->raid_io_stats, smp_processor_id())->write_stream_cnt++; + per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->write_stream_cnt++; return true; } @@ -6069,7 +6083,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { raid_bypassed = true; - per_cpu_ptr(device->raid_io_stats, smp_processor_id())->raid_bypass_cnt++; + per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->raid_bypass_cnt++; } } if (!raid_bypassed) @@ -9129,6 +9143,7 @@ static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) pqi_ctrl_wait_until_quiesced(ctrl_info); pqi_fail_all_outstanding_requests(ctrl_info); pqi_ctrl_unblock_requests(ctrl_info); + pqi_take_ctrl_devices_offline(ctrl_info); } static void pqi_ctrl_offline_worker(struct work_struct *work) @@ -9203,6 +9218,27 @@ static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, schedule_work(&ctrl_info->ctrl_offline_work); } +static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + unsigned long flags; + struct pqi_scsi_dev *device; + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { + rc = list_is_last(&device->scsi_device_list_entry, &ctrl_info->scsi_device_list); + if (rc) + continue; + + /* + * Is the sdev pointer NULL? + */ + if (device->sdev) + scsi_device_set_state(device->sdev, SDEV_OFFLINE); + } + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); +} + static void pqi_print_ctrl_info(struct pci_dev *pci_dev, const struct pci_device_id *id) { @@ -9711,6 +9747,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x00a3) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1ff9, 0x00a1) }, { @@ -10047,6 +10087,30 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4044) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4054) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4084) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4094) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4140) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x207d, 0x4240) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADVANTECH, 0x8312) }, { @@ -10263,6 +10327,14 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1018, 0x8238) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f3f, 0x0610) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_LENOVO, 0x0220) }, { @@ -10271,10 +10343,30 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0222) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0223) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0224) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0225) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_LENOVO, 0x0520) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0521) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_LENOVO, 0x0522) }, { @@ -10295,6 +10387,26 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0624) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0625) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0626) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0627) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_LENOVO, 0x0628) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1014, 0x0718) }, { @@ -10323,6 +10435,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1ded, 0x3301) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1ff9, 0x0045) }, { @@ -10471,6 +10587,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x100b) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1f51, 0x100e) }, { diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index 6a8daeb8c4b9..a2d65adffb80 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig @@ -23,11 +23,13 @@ source "drivers/soc/qcom/Kconfig" source "drivers/soc/renesas/Kconfig" source "drivers/soc/rockchip/Kconfig" source "drivers/soc/samsung/Kconfig" +source "drivers/soc/sophgo/Kconfig" source "drivers/soc/sunxi/Kconfig" source "drivers/soc/tegra/Kconfig" source "drivers/soc/ti/Kconfig" source "drivers/soc/ux500/Kconfig" source "drivers/soc/versatile/Kconfig" +source "drivers/soc/vt8500/Kconfig" source "drivers/soc/xilinx/Kconfig" endmenu diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 2037a8695cb2..c9e689080ceb 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -29,9 +29,11 @@ obj-y += qcom/ obj-y += renesas/ obj-y += rockchip/ obj-$(CONFIG_SOC_SAMSUNG) += samsung/ +obj-y += sophgo/ obj-y += sunxi/ obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-y += ti/ obj-$(CONFIG_ARCH_U8500) += ux500/ obj-y += versatile/ +obj-y += vt8500/ obj-y += xilinx/ diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c index a6453ffeb753..d862e30a244e 100644 --- a/drivers/soc/amlogic/meson-clk-measure.c +++ b/drivers/soc/amlogic/meson-clk-measure.c @@ -14,11 +14,6 @@ static DEFINE_MUTEX(measure_lock); -#define MSR_CLK_DUTY 0x0 -#define MSR_CLK_REG0 0x4 -#define MSR_CLK_REG1 0x8 -#define MSR_CLK_REG2 0xc - #define MSR_DURATION GENMASK(15, 0) #define MSR_ENABLE BIT(16) #define MSR_CONT BIT(17) /* continuous measurement */ @@ -33,23 +28,34 @@ static DEFINE_MUTEX(measure_lock); #define DIV_STEP 32 #define DIV_MAX 640 -#define CLK_MSR_MAX 128 - struct meson_msr_id { struct meson_msr *priv; unsigned int id; const char *name; }; +struct msr_reg_offset { + unsigned int duty_val; + unsigned int freq_ctrl; + unsigned int duty_ctrl; + unsigned int freq_val; +}; + +struct meson_msr_data { + struct meson_msr_id *msr_table; + unsigned int msr_count; + const struct msr_reg_offset *reg; +}; + struct meson_msr { struct regmap *regmap; - struct meson_msr_id msr_table[CLK_MSR_MAX]; + struct meson_msr_data data; }; #define CLK_MSR_ID(__id, __name) \ [__id] = {.id = __id, .name = __name,} -static struct meson_msr_id clk_msr_m8[CLK_MSR_MAX] = { +static const struct meson_msr_id clk_msr_m8[] = { CLK_MSR_ID(0, "ring_osc_out_ee0"), CLK_MSR_ID(1, "ring_osc_out_ee1"), CLK_MSR_ID(2, "ring_osc_out_ee2"), @@ -98,7 +104,7 @@ static struct meson_msr_id clk_msr_m8[CLK_MSR_MAX] = { CLK_MSR_ID(63, "mipi_csi_cfg"), }; -static struct meson_msr_id clk_msr_gx[CLK_MSR_MAX] = { +static const struct meson_msr_id clk_msr_gx[] = { CLK_MSR_ID(0, "ring_osc_out_ee_0"), CLK_MSR_ID(1, "ring_osc_out_ee_1"), CLK_MSR_ID(2, "ring_osc_out_ee_2"), @@ -168,7 +174,7 @@ static struct meson_msr_id clk_msr_gx[CLK_MSR_MAX] = { CLK_MSR_ID(82, "ge2d"), }; -static struct meson_msr_id clk_msr_axg[CLK_MSR_MAX] = { +static const struct meson_msr_id clk_msr_axg[] = { CLK_MSR_ID(0, "ring_osc_out_ee_0"), CLK_MSR_ID(1, "ring_osc_out_ee_1"), CLK_MSR_ID(2, "ring_osc_out_ee_2"), @@ -242,7 +248,7 @@ static struct meson_msr_id clk_msr_axg[CLK_MSR_MAX] = { CLK_MSR_ID(109, "audio_locker_in"), }; -static struct meson_msr_id clk_msr_g12a[CLK_MSR_MAX] = { +static const struct meson_msr_id clk_msr_g12a[] = { CLK_MSR_ID(0, "ring_osc_out_ee_0"), CLK_MSR_ID(1, "ring_osc_out_ee_1"), CLK_MSR_ID(2, "ring_osc_out_ee_2"), @@ -358,7 +364,7 @@ static struct meson_msr_id clk_msr_g12a[CLK_MSR_MAX] = { CLK_MSR_ID(122, "audio_pdm_dclk"), }; -static struct meson_msr_id clk_msr_sm1[CLK_MSR_MAX] = { +static const struct meson_msr_id clk_msr_sm1[] = { CLK_MSR_ID(0, "ring_osc_out_ee_0"), CLK_MSR_ID(1, "ring_osc_out_ee_1"), CLK_MSR_ID(2, "ring_osc_out_ee_2"), @@ -488,10 +494,304 @@ static struct meson_msr_id clk_msr_sm1[CLK_MSR_MAX] = { CLK_MSR_ID(127, "csi2_data"), }; +static const struct meson_msr_id clk_msr_c3[] = { + CLK_MSR_ID(0, "sys_clk"), + CLK_MSR_ID(1, "axi_clk"), + CLK_MSR_ID(2, "rtc_clk"), + CLK_MSR_ID(3, "p20_usb2_ckout"), + CLK_MSR_ID(4, "eth_mpll_test"), + CLK_MSR_ID(5, "sys_pll"), + CLK_MSR_ID(6, "cpu_clk_div16"), + CLK_MSR_ID(7, "ts_pll"), + CLK_MSR_ID(8, "fclk_div2"), + CLK_MSR_ID(9, "fclk_div2p5"), + CLK_MSR_ID(10, "fclk_div3"), + CLK_MSR_ID(11, "fclk_div4"), + CLK_MSR_ID(12, "fclk_div5"), + CLK_MSR_ID(13, "fclk_div7"), + CLK_MSR_ID(15, "fclk_50m"), + CLK_MSR_ID(16, "sys_oscin32k_i"), + CLK_MSR_ID(17, "mclk_pll"), + CLK_MSR_ID(19, "hifi_pll"), + CLK_MSR_ID(20, "gp0_pll"), + CLK_MSR_ID(21, "gp1_pll"), + CLK_MSR_ID(22, "eth_mppll_50m_ckout"), + CLK_MSR_ID(23, "sys_pll_div16"), + CLK_MSR_ID(24, "ddr_dpll_pt_clk"), + CLK_MSR_ID(26, "nna_core"), + CLK_MSR_ID(27, "rtc_sec_pulse_out"), + CLK_MSR_ID(28, "rtc_osc_clk_out"), + CLK_MSR_ID(29, "debug_in_clk"), + CLK_MSR_ID(30, "mod_eth_phy_ref_clk"), + CLK_MSR_ID(31, "mod_eth_tx_clk"), + CLK_MSR_ID(32, "eth_125m"), + CLK_MSR_ID(33, "eth_rmii"), + CLK_MSR_ID(34, "co_clkin_to_mac"), + CLK_MSR_ID(36, "co_rx_clk"), + CLK_MSR_ID(37, "co_tx_clk"), + CLK_MSR_ID(38, "eth_phy_rxclk"), + CLK_MSR_ID(39, "eth_phy_plltxclk"), + CLK_MSR_ID(40, "ephy_test_clk"), + CLK_MSR_ID(66, "vapb"), + CLK_MSR_ID(67, "ge2d"), + CLK_MSR_ID(68, "dewarpa"), + CLK_MSR_ID(70, "mipi_dsi_meas"), + CLK_MSR_ID(71, "dsi_phy"), + CLK_MSR_ID(79, "rama"), + CLK_MSR_ID(94, "vc9000e_core"), + CLK_MSR_ID(95, "vc9000e_sys"), + CLK_MSR_ID(96, "vc9000e_aclk"), + CLK_MSR_ID(97, "hcodec"), + CLK_MSR_ID(106, "deskew_pll_clk_div32_out"), + CLK_MSR_ID(107, "mipi_csi_phy_clk_out[0]"), + CLK_MSR_ID(108, "mipi_csi_phy_clk_out[1]"), + CLK_MSR_ID(110, "spifc"), + CLK_MSR_ID(111, "saradc"), + CLK_MSR_ID(112, "ts"), + CLK_MSR_ID(113, "sd_emmc_c"), + CLK_MSR_ID(114, "sd_emmc_b"), + CLK_MSR_ID(115, "sd_emmc_a"), + CLK_MSR_ID(116, "gpio_msr_clk"), + CLK_MSR_ID(117, "spicc_b"), + CLK_MSR_ID(118, "spicc_a"), + CLK_MSR_ID(122, "mod_audio_pdm_dclk_o"), + CLK_MSR_ID(124, "o_earcrx_dmac_clk"), + CLK_MSR_ID(125, "o_earcrx_cmdc_clk"), + CLK_MSR_ID(126, "o_earctx_dmac_clk"), + CLK_MSR_ID(127, "o_earctx_cmdc_clk"), + CLK_MSR_ID(128, "o_tohdmitx_bclk"), + CLK_MSR_ID(129, "o_tohdmitx_mclk"), + CLK_MSR_ID(130, "o_tohdmitx_spdif_clk"), + CLK_MSR_ID(131, "o_toacodec_bclk"), + CLK_MSR_ID(132, "o_toacodec_mclk"), + CLK_MSR_ID(133, "o_spdifout_b_mst_clk"), + CLK_MSR_ID(134, "o_spdifout_mst_clk"), + CLK_MSR_ID(135, "o_spdifin_mst_clk"), + CLK_MSR_ID(136, "o_audio_mclk"), + CLK_MSR_ID(137, "o_vad_clk"), + CLK_MSR_ID(138, "o_tdmout_d_sclk"), + CLK_MSR_ID(139, "o_tdmout_c_sclk"), + CLK_MSR_ID(140, "o_tdmout_b_sclk"), + CLK_MSR_ID(141, "o_tdmout_a_sclk"), + CLK_MSR_ID(142, "o_tdminb_1b_sclk"), + CLK_MSR_ID(143, "o_tdmin_1b_sclk"), + CLK_MSR_ID(144, "o_tdmin_d_sclk"), + CLK_MSR_ID(145, "o_tdmin_c_sclk"), + CLK_MSR_ID(146, "o_tdmin_b_sclk"), + CLK_MSR_ID(147, "o_tdmin_a_sclk"), + CLK_MSR_ID(148, "o_resampleb_clk"), + CLK_MSR_ID(149, "o_resamplea_clk"), + CLK_MSR_ID(150, "o_pdmb_sysclk"), + CLK_MSR_ID(151, "o_pdmb_dclk"), + CLK_MSR_ID(152, "o_pdm_sysclk"), + CLK_MSR_ID(153, "o_pdm_dclk"), + CLK_MSR_ID(154, "c_alockerb_out_clk"), + CLK_MSR_ID(155, "c_alockerb_in_clk"), + CLK_MSR_ID(156, "c_alocker_out_clk"), + CLK_MSR_ID(157, "c_alocker_in_clk"), + CLK_MSR_ID(158, "audio_mst_clk[34]"), + CLK_MSR_ID(159, "audio_mst_clk[35]"), + CLK_MSR_ID(160, "pwm_n"), + CLK_MSR_ID(161, "pwm_m"), + CLK_MSR_ID(162, "pwm_l"), + CLK_MSR_ID(163, "pwm_k"), + CLK_MSR_ID(164, "pwm_j"), + CLK_MSR_ID(165, "pwm_i"), + CLK_MSR_ID(166, "pwm_h"), + CLK_MSR_ID(167, "pwm_g"), + CLK_MSR_ID(168, "pwm_f"), + CLK_MSR_ID(169, "pwm_e"), + CLK_MSR_ID(170, "pwm_d"), + CLK_MSR_ID(171, "pwm_c"), + CLK_MSR_ID(172, "pwm_b"), + CLK_MSR_ID(173, "pwm_a"), + CLK_MSR_ID(174, "AU_DAC1_CLK_TO_GPIO"), + CLK_MSR_ID(175, "AU_ADC_CLK_TO_GPIO"), + CLK_MSR_ID(176, "rng_ring_osc_clk[0]"), + CLK_MSR_ID(177, "rng_ring_osc_clk[1]"), + CLK_MSR_ID(178, "rng_ring_osc_clk[2]"), + CLK_MSR_ID(179, "rng_ring_osc_clk[3]"), + CLK_MSR_ID(180, "sys_cpu_ring_osc_clk[0]"), + CLK_MSR_ID(181, "sys_cpu_ring_osc_clk[1]"), + CLK_MSR_ID(182, "sys_cpu_ring_osc_clk[2]"), + CLK_MSR_ID(183, "sys_cpu_ring_osc_clk[3]"), + CLK_MSR_ID(184, "sys_cpu_ring_osc_clk[4]"), + CLK_MSR_ID(185, "sys_cpu_ring_osc_clk[5]"), + CLK_MSR_ID(186, "sys_cpu_ring_osc_clk[6]"), + CLK_MSR_ID(187, "sys_cpu_ring_osc_clk[7]"), + CLK_MSR_ID(188, "sys_cpu_ring_osc_clk[8]"), + CLK_MSR_ID(189, "sys_cpu_ring_osc_clk[9]"), + CLK_MSR_ID(190, "sys_cpu_ring_osc_clk[10]"), + CLK_MSR_ID(191, "sys_cpu_ring_osc_clk[11]"), + CLK_MSR_ID(192, "am_ring_osc_clk_out[12](dmc)"), + CLK_MSR_ID(193, "am_ring_osc_clk_out[13](rama)"), + CLK_MSR_ID(194, "am_ring_osc_clk_out[14](nna)"), + CLK_MSR_ID(195, "am_ring_osc_clk_out[15](nna)"), + CLK_MSR_ID(200, "rng_ring_osc_clk_1[0]"), + CLK_MSR_ID(201, "rng_ring_osc_clk_1[1]"), + CLK_MSR_ID(202, "rng_ring_osc_clk_1[2]"), + CLK_MSR_ID(203, "rng_ring_osc_clk_1[3]"), + +}; + +static const struct meson_msr_id clk_msr_s4[] = { + CLK_MSR_ID(0, "sys_clk"), + CLK_MSR_ID(1, "axi_clk"), + CLK_MSR_ID(2, "rtc_clk"), + CLK_MSR_ID(5, "mali"), + CLK_MSR_ID(6, "cpu_clk_div16"), + CLK_MSR_ID(7, "ceca_clk"), + CLK_MSR_ID(8, "cecb_clk"), + CLK_MSR_ID(10, "fclk_div5"), + CLK_MSR_ID(11, "mpll0"), + CLK_MSR_ID(12, "mpll1"), + CLK_MSR_ID(13, "mpll2"), + CLK_MSR_ID(14, "mpll3"), + CLK_MSR_ID(15, "fclk_50m"), + CLK_MSR_ID(16, "pcie_clk_inp"), + CLK_MSR_ID(17, "pcie_clk_inn"), + CLK_MSR_ID(18, "mpll_clk_test_out"), + CLK_MSR_ID(19, "hifi_pll"), + CLK_MSR_ID(20, "gp0_pll"), + CLK_MSR_ID(21, "gp1_pll"), + CLK_MSR_ID(22, "eth_mppll_50m_ckout"), + CLK_MSR_ID(23, "sys_pll_div16"), + CLK_MSR_ID(24, "ddr_dpll_pt_clk"), + CLK_MSR_ID(30, "mod_eth_phy_ref_clk"), + CLK_MSR_ID(31, "mod_eth_tx_clk"), + CLK_MSR_ID(32, "eth_125m"), + CLK_MSR_ID(33, "eth_rmii"), + CLK_MSR_ID(34, "co_clkin_to_mac"), + CLK_MSR_ID(35, "mod_eth_rx_clk_rmii"), + CLK_MSR_ID(36, "co_rx_clk"), + CLK_MSR_ID(37, "co_tx_clk"), + CLK_MSR_ID(38, "eth_phy_rxclk"), + CLK_MSR_ID(39, "eth_phy_plltxclk"), + CLK_MSR_ID(40, "ephy_test_clk"), + CLK_MSR_ID(50, "vid_pll_div_clk_out"), + CLK_MSR_ID(51, "enci"), + CLK_MSR_ID(52, "encp"), + CLK_MSR_ID(53, "encl"), + CLK_MSR_ID(54, "vdac"), + CLK_MSR_ID(55, "cdac_clk_c"), + CLK_MSR_ID(56, "mod_tcon_clko"), + CLK_MSR_ID(57, "lcd_an_clk_ph2"), + CLK_MSR_ID(58, "lcd_an_clk_ph3"), + CLK_MSR_ID(59, "hdmitx_pixel"), + CLK_MSR_ID(60, "vdin_meas"), + CLK_MSR_ID(61, "vpu"), + CLK_MSR_ID(62, "vpu_clkb"), + CLK_MSR_ID(63, "vpu_clkb_tmp"), + CLK_MSR_ID(64, "vpu_clkc"), + CLK_MSR_ID(65, "vid_lock"), + CLK_MSR_ID(66, "vapb"), + CLK_MSR_ID(67, "ge2d"), + CLK_MSR_ID(68, "cts_hdcp22_esmclk"), + CLK_MSR_ID(69, "cts_hdcp22_skpclk"), + CLK_MSR_ID(76, "hdmitx_tmds"), + CLK_MSR_ID(77, "hdmitx_sys_clk"), + CLK_MSR_ID(78, "hdmitx_fe_clk"), + CLK_MSR_ID(79, "rama"), + CLK_MSR_ID(93, "vdec"), + CLK_MSR_ID(99, "hevcf"), + CLK_MSR_ID(100, "demod_core"), + CLK_MSR_ID(101, "adc_extclk_in"), + CLK_MSR_ID(102, "cts_demod_core_t2_clk"), + CLK_MSR_ID(103, "adc_dpll_intclk"), + CLK_MSR_ID(104, "adc_dpll_clk_b3"), + CLK_MSR_ID(105, "s2_adc_clk"), + CLK_MSR_ID(106, "deskew_pll_clk_div32_out"), + CLK_MSR_ID(110, "sc"), + CLK_MSR_ID(111, "sar_adc"), + CLK_MSR_ID(113, "sd_emmc_c"), + CLK_MSR_ID(114, "sd_emmc_b"), + CLK_MSR_ID(115, "sd_emmc_a"), + CLK_MSR_ID(116, "gpio_msr_clk"), + CLK_MSR_ID(118, "spicc0"), + CLK_MSR_ID(121, "ts"), + CLK_MSR_ID(130, "audio_vad_clk"), + CLK_MSR_ID(131, "acodec_dac_clk_x128"), + CLK_MSR_ID(132, "audio_locker_in_clk"), + CLK_MSR_ID(133, "audio_locker_out_clk"), + CLK_MSR_ID(134, "audio_tdmout_c_sclk"), + CLK_MSR_ID(135, "audio_tdmout_b_sclk"), + CLK_MSR_ID(136, "audio_tdmout_a_sclk"), + CLK_MSR_ID(137, "audio_tdmin_lb_sclk"), + CLK_MSR_ID(138, "audio_tdmin_c_sclk"), + CLK_MSR_ID(139, "audio_tdmin_b_sclk"), + CLK_MSR_ID(140, "audio_tdmin_a_sclk"), + CLK_MSR_ID(141, "audio_resamplea_clk"), + CLK_MSR_ID(142, "audio_pdm_sysclk"), + CLK_MSR_ID(143, "audio_spdifout_b_mst_clk"), + CLK_MSR_ID(144, "audio_spdifout_mst_clk"), + CLK_MSR_ID(145, "audio_spdifin_mst_clk"), + CLK_MSR_ID(146, "audio_pdm_dclk"), + CLK_MSR_ID(147, "audio_resampleb_clk"), + CLK_MSR_ID(160, "pwm_j"), + CLK_MSR_ID(161, "pwm_i"), + CLK_MSR_ID(162, "pwm_h"), + CLK_MSR_ID(163, "pwm_g"), + CLK_MSR_ID(164, "pwm_f"), + CLK_MSR_ID(165, "pwm_e"), + CLK_MSR_ID(166, "pwm_d"), + CLK_MSR_ID(167, "pwm_c"), + CLK_MSR_ID(168, "pwm_b"), + CLK_MSR_ID(169, "pwm_a"), + CLK_MSR_ID(176, "rng_ring_0"), + CLK_MSR_ID(177, "rng_ring_1"), + CLK_MSR_ID(178, "rng_ring_2"), + CLK_MSR_ID(179, "rng_ring_3"), + CLK_MSR_ID(180, "dmc_osc_ring(LVT16)"), + CLK_MSR_ID(181, "gpu_osc_ring0(LVT16)"), + CLK_MSR_ID(182, "gpu_osc_ring1(ULVT16)"), + CLK_MSR_ID(183, "gpu_osc_ring2(SLVT16)"), + CLK_MSR_ID(184, "vpu_osc_ring0(SVT24)"), + CLK_MSR_ID(185, "vpu_osc_ring1(LVT20)"), + CLK_MSR_ID(186, "vpu_osc_ring2(LVT16)"), + CLK_MSR_ID(187, "dos_osc_ring0(SVT24)"), + CLK_MSR_ID(188, "dos_osc_ring1(SVT16)"), + CLK_MSR_ID(189, "dos_osc_ring2(LVT16)"), + CLK_MSR_ID(190, "dos_osc_ring3(ULVT20)"), + CLK_MSR_ID(192, "axi_sram_osc_ring(SVT16)"), + CLK_MSR_ID(193, "demod_osc_ring0"), + CLK_MSR_ID(194, "demod_osc_ring1"), + CLK_MSR_ID(195, "sar_osc_ring"), + CLK_MSR_ID(196, "sys_cpu_osc_ring0"), + CLK_MSR_ID(197, "sys_cpu_osc_ring1"), + CLK_MSR_ID(198, "sys_cpu_osc_ring2"), + CLK_MSR_ID(199, "sys_cpu_osc_ring3"), + CLK_MSR_ID(200, "sys_cpu_osc_ring4"), + CLK_MSR_ID(201, "sys_cpu_osc_ring5"), + CLK_MSR_ID(202, "sys_cpu_osc_ring6"), + CLK_MSR_ID(203, "sys_cpu_osc_ring7"), + CLK_MSR_ID(204, "sys_cpu_osc_ring8"), + CLK_MSR_ID(205, "sys_cpu_osc_ring9"), + CLK_MSR_ID(206, "sys_cpu_osc_ring10"), + CLK_MSR_ID(207, "sys_cpu_osc_ring11"), + CLK_MSR_ID(208, "sys_cpu_osc_ring12"), + CLK_MSR_ID(209, "sys_cpu_osc_ring13"), + CLK_MSR_ID(210, "sys_cpu_osc_ring14"), + CLK_MSR_ID(211, "sys_cpu_osc_ring15"), + CLK_MSR_ID(212, "sys_cpu_osc_ring16"), + CLK_MSR_ID(213, "sys_cpu_osc_ring17"), + CLK_MSR_ID(214, "sys_cpu_osc_ring18"), + CLK_MSR_ID(215, "sys_cpu_osc_ring19"), + CLK_MSR_ID(216, "sys_cpu_osc_ring20"), + CLK_MSR_ID(217, "sys_cpu_osc_ring21"), + CLK_MSR_ID(218, "sys_cpu_osc_ring22"), + CLK_MSR_ID(219, "sys_cpu_osc_ring23"), + CLK_MSR_ID(220, "sys_cpu_osc_ring24"), + CLK_MSR_ID(221, "sys_cpu_osc_ring25"), + CLK_MSR_ID(222, "sys_cpu_osc_ring26"), + CLK_MSR_ID(223, "sys_cpu_osc_ring27"), + +}; + static int meson_measure_id(struct meson_msr_id *clk_msr_id, - unsigned int duration) + unsigned int duration) { struct meson_msr *priv = clk_msr_id->priv; + const struct msr_reg_offset *reg = priv->data.reg; unsigned int val; int ret; @@ -499,22 +799,22 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id, if (ret) return ret; - regmap_write(priv->regmap, MSR_CLK_REG0, 0); + regmap_write(priv->regmap, reg->freq_ctrl, 0); /* Set measurement duration */ - regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_DURATION, + regmap_update_bits(priv->regmap, reg->freq_ctrl, MSR_DURATION, FIELD_PREP(MSR_DURATION, duration - 1)); /* Set ID */ - regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_CLK_SRC, + regmap_update_bits(priv->regmap, reg->freq_ctrl, MSR_CLK_SRC, FIELD_PREP(MSR_CLK_SRC, clk_msr_id->id)); /* Enable & Start */ - regmap_update_bits(priv->regmap, MSR_CLK_REG0, + regmap_update_bits(priv->regmap, reg->freq_ctrl, MSR_RUN | MSR_ENABLE, MSR_RUN | MSR_ENABLE); - ret = regmap_read_poll_timeout(priv->regmap, MSR_CLK_REG0, + ret = regmap_read_poll_timeout(priv->regmap, reg->freq_ctrl, val, !(val & MSR_BUSY), 10, 10000); if (ret) { mutex_unlock(&measure_lock); @@ -522,10 +822,10 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id, } /* Disable */ - regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_ENABLE, 0); + regmap_update_bits(priv->regmap, reg->freq_ctrl, MSR_ENABLE, 0); /* Get the value in multiple of gate time counts */ - regmap_read(priv->regmap, MSR_CLK_REG2, &val); + regmap_read(priv->regmap, reg->freq_val, &val); mutex_unlock(&measure_lock); @@ -573,13 +873,14 @@ DEFINE_SHOW_ATTRIBUTE(clk_msr); static int clk_msr_summary_show(struct seq_file *s, void *data) { struct meson_msr_id *msr_table = s->private; + unsigned int msr_count = msr_table->priv->data.msr_count; unsigned int precision = 0; int val, i; seq_puts(s, " clock rate precision\n"); seq_puts(s, "---------------------------------------------\n"); - for (i = 0 ; i < CLK_MSR_MAX ; ++i) { + for (i = 0 ; i < msr_count ; ++i) { if (!msr_table[i].name) continue; @@ -595,18 +896,18 @@ static int clk_msr_summary_show(struct seq_file *s, void *data) } DEFINE_SHOW_ATTRIBUTE(clk_msr_summary); -static const struct regmap_config meson_clk_msr_regmap_config = { +static struct regmap_config meson_clk_msr_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, - .max_register = MSR_CLK_REG2, }; static int meson_msr_probe(struct platform_device *pdev) { - const struct meson_msr_id *match_data; + const struct meson_msr_data *match_data; struct meson_msr *priv; struct dentry *root, *clks; + struct resource *res; void __iomem *base; int i; @@ -621,60 +922,142 @@ static int meson_msr_probe(struct platform_device *pdev) return -ENODEV; } - memcpy(priv->msr_table, match_data, sizeof(priv->msr_table)); + priv->data.msr_table = devm_kcalloc(&pdev->dev, + match_data->msr_count, + sizeof(struct meson_msr_id), + GFP_KERNEL); + if (!priv->data.msr_table) + return -ENOMEM; - base = devm_platform_ioremap_resource(pdev, 0); + memcpy(priv->data.msr_table, match_data->msr_table, + match_data->msr_count * sizeof(struct meson_msr_id)); + priv->data.msr_count = match_data->msr_count; + + base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(base)) return PTR_ERR(base); + meson_clk_msr_regmap_config.max_register = resource_size(res) - 4; priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, &meson_clk_msr_regmap_config); if (IS_ERR(priv->regmap)) return PTR_ERR(priv->regmap); + priv->data.reg = devm_kzalloc(&pdev->dev, sizeof(struct msr_reg_offset), + GFP_KERNEL); + if (!priv->data.reg) + return -ENOMEM; + + memcpy((void *)priv->data.reg, match_data->reg, + sizeof(struct msr_reg_offset)); + root = debugfs_create_dir("meson-clk-msr", NULL); clks = debugfs_create_dir("clks", root); debugfs_create_file("measure_summary", 0444, root, - priv->msr_table, &clk_msr_summary_fops); + priv->data.msr_table, &clk_msr_summary_fops); - for (i = 0 ; i < CLK_MSR_MAX ; ++i) { - if (!priv->msr_table[i].name) + for (i = 0 ; i < priv->data.msr_count ; ++i) { + if (!priv->data.msr_table[i].name) continue; - priv->msr_table[i].priv = priv; + priv->data.msr_table[i].priv = priv; - debugfs_create_file(priv->msr_table[i].name, 0444, clks, - &priv->msr_table[i], &clk_msr_fops); + debugfs_create_file(priv->data.msr_table[i].name, 0444, clks, + &priv->data.msr_table[i], &clk_msr_fops); } return 0; } +static const struct msr_reg_offset msr_reg_offset = { + .duty_val = 0x0, + .freq_ctrl = 0x4, + .duty_ctrl = 0x8, + .freq_val = 0xc, +}; + +static const struct meson_msr_data clk_msr_gx_data = { + .msr_table = (void *)clk_msr_gx, + .msr_count = ARRAY_SIZE(clk_msr_gx), + .reg = &msr_reg_offset, +}; + +static const struct meson_msr_data clk_msr_m8_data = { + .msr_table = (void *)clk_msr_m8, + .msr_count = ARRAY_SIZE(clk_msr_m8), + .reg = &msr_reg_offset, +}; + +static const struct meson_msr_data clk_msr_axg_data = { + .msr_table = (void *)clk_msr_axg, + .msr_count = ARRAY_SIZE(clk_msr_axg), + .reg = &msr_reg_offset, +}; + +static const struct meson_msr_data clk_msr_g12a_data = { + .msr_table = (void *)clk_msr_g12a, + .msr_count = ARRAY_SIZE(clk_msr_g12a), + .reg = &msr_reg_offset, +}; + +static const struct meson_msr_data clk_msr_sm1_data = { + .msr_table = (void *)clk_msr_sm1, + .msr_count = ARRAY_SIZE(clk_msr_sm1), + .reg = &msr_reg_offset, +}; + +static const struct msr_reg_offset msr_reg_offset_v2 = { + .freq_ctrl = 0x0, + .duty_ctrl = 0x4, + .freq_val = 0x8, + .duty_val = 0x18, +}; + +static const struct meson_msr_data clk_msr_c3_data = { + .msr_table = (void *)clk_msr_c3, + .msr_count = ARRAY_SIZE(clk_msr_c3), + .reg = &msr_reg_offset_v2, +}; + +static const struct meson_msr_data clk_msr_s4_data = { + .msr_table = (void *)clk_msr_s4, + .msr_count = ARRAY_SIZE(clk_msr_s4), + .reg = &msr_reg_offset_v2, +}; + static const struct of_device_id meson_msr_match_table[] = { { .compatible = "amlogic,meson-gx-clk-measure", - .data = (void *)clk_msr_gx, + .data = &clk_msr_gx_data, }, { .compatible = "amlogic,meson8-clk-measure", - .data = (void *)clk_msr_m8, + .data = &clk_msr_m8_data, }, { .compatible = "amlogic,meson8b-clk-measure", - .data = (void *)clk_msr_m8, + .data = &clk_msr_m8_data, }, { .compatible = "amlogic,meson-axg-clk-measure", - .data = (void *)clk_msr_axg, + .data = &clk_msr_axg_data, }, { .compatible = "amlogic,meson-g12a-clk-measure", - .data = (void *)clk_msr_g12a, + .data = &clk_msr_g12a_data, }, { .compatible = "amlogic,meson-sm1-clk-measure", - .data = (void *)clk_msr_sm1, + .data = &clk_msr_sm1_data, + }, + { + .compatible = "amlogic,c3-clk-measure", + .data = &clk_msr_c3_data, + }, + { + .compatible = "amlogic,s4-clk-measure", + .data = &clk_msr_s4_data, }, { /* sentinel */ } }; diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c index 9ab5ba9cf1d6..ef8f355589a5 100644 --- a/drivers/soc/aspeed/aspeed-lpc-snoop.c +++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c @@ -166,7 +166,7 @@ static int aspeed_lpc_snoop_config_irq(struct aspeed_lpc_snoop *lpc_snoop, int rc; lpc_snoop->irq = platform_get_irq(pdev, 0); - if (!lpc_snoop->irq) + if (lpc_snoop->irq < 0) return -ENODEV; rc = devm_request_irq(dev, lpc_snoop->irq, @@ -200,11 +200,15 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop, lpc_snoop->chan[channel].miscdev.minor = MISC_DYNAMIC_MINOR; lpc_snoop->chan[channel].miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%d", DEVICE_NAME, channel); + if (!lpc_snoop->chan[channel].miscdev.name) { + rc = -ENOMEM; + goto err_free_fifo; + } lpc_snoop->chan[channel].miscdev.fops = &snoop_fops; lpc_snoop->chan[channel].miscdev.parent = dev; rc = misc_register(&lpc_snoop->chan[channel].miscdev); if (rc) - return rc; + goto err_free_fifo; /* Enable LPC snoop channel at requested port */ switch (channel) { @@ -221,7 +225,8 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop, hicrb_en = HICRB_ENSNP1D; break; default: - return -EINVAL; + rc = -EINVAL; + goto err_misc_deregister; } regmap_update_bits(lpc_snoop->regmap, HICR5, hicr5_en, hicr5_en); @@ -231,6 +236,12 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop, regmap_update_bits(lpc_snoop->regmap, HICRB, hicrb_en, hicrb_en); + return 0; + +err_misc_deregister: + misc_deregister(&lpc_snoop->chan[channel].miscdev); +err_free_fifo: + kfifo_free(&lpc_snoop->chan[channel].fifo); return rc; } diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig index a1e0bc8c1757..47870e29c290 100644 --- a/drivers/soc/fsl/Kconfig +++ b/drivers/soc/fsl/Kconfig @@ -36,7 +36,7 @@ config FSL_MC_DPIO config DPAA2_CONSOLE tristate "QorIQ DPAA2 console driver" depends on OF && (ARCH_LAYERSCAPE || COMPILE_TEST) - default y + default ARCH_LAYERSCAPE help Console driver for DPAA2 platforms. Exports 2 char devices, /dev/dpaa2_mc_console and /dev/dpaa2_aiop_console, diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 4dc8aba33d9b..9be240999f87 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -1270,7 +1270,7 @@ static int qman_create_portal(struct qman_portal *portal, qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH); qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH); qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD); - portal->cgrs = kmalloc_array(2, sizeof(*cgrs), GFP_KERNEL); + portal->cgrs = kmalloc_array(2, sizeof(*portal->cgrs), GFP_KERNEL); if (!portal->cgrs) goto fail_cgrs; /* initial snapshot is no-depletion */ diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c index e4b6ff2cc76b..4068b501a3a3 100644 --- a/drivers/soc/fsl/qe/qe_ic.c +++ b/drivers/soc/fsl/qe/qe_ic.c @@ -232,11 +232,6 @@ static inline void qe_ic_write(__be32 __iomem *base, unsigned int reg, iowrite32be(value, base + (reg >> 2)); } -static inline struct qe_ic *qe_ic_from_irq(unsigned int virq) -{ - return irq_get_chip_data(virq); -} - static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d) { return irq_data_get_irq_chip_data(d); @@ -455,13 +450,11 @@ static int qe_ic_init(struct platform_device *pdev) qe_ic_write(qe_ic->regs, QEIC_CICR, 0); - irq_set_handler_data(qe_ic->virq_low, qe_ic); - irq_set_chained_handler(qe_ic->virq_low, low_handler); + irq_set_chained_handler_and_data(qe_ic->virq_low, low_handler, qe_ic); - if (high_handler) { - irq_set_handler_data(qe_ic->virq_high, qe_ic); - irq_set_chained_handler(qe_ic->virq_high, high_handler); - } + if (high_handler) + irq_set_chained_handler_and_data(qe_ic->virq_high, + high_handler, qe_ic); return 0; } static const struct of_device_id qe_ic_ids[] = { diff --git a/drivers/soc/hisilicon/kunpeng_hccs.c b/drivers/soc/hisilicon/kunpeng_hccs.c index 444a8f59b7da..7fc353732d55 100644 --- a/drivers/soc/hisilicon/kunpeng_hccs.c +++ b/drivers/soc/hisilicon/kunpeng_hccs.c @@ -167,10 +167,6 @@ static void hccs_pcc_rx_callback(struct mbox_client *cl, void *mssg) static void hccs_unregister_pcc_channel(struct hccs_dev *hdev) { - struct hccs_mbox_client_info *cl_info = &hdev->cl_info; - - if (cl_info->pcc_comm_addr) - iounmap(cl_info->pcc_comm_addr); pcc_mbox_free_channel(hdev->cl_info.pcc_chan); } @@ -179,6 +175,7 @@ static int hccs_register_pcc_channel(struct hccs_dev *hdev) struct hccs_mbox_client_info *cl_info = &hdev->cl_info; struct mbox_client *cl = &cl_info->client; struct pcc_mbox_chan *pcc_chan; + struct mbox_chan *mbox_chan; struct device *dev = hdev->dev; int rc; @@ -196,7 +193,7 @@ static int hccs_register_pcc_channel(struct hccs_dev *hdev) goto out; } cl_info->pcc_chan = pcc_chan; - cl_info->mbox_chan = pcc_chan->mchan; + mbox_chan = pcc_chan->mchan; /* * pcc_chan->latency is just a nominal value. In reality the remote @@ -206,34 +203,24 @@ static int hccs_register_pcc_channel(struct hccs_dev *hdev) cl_info->deadline_us = HCCS_PCC_CMD_WAIT_RETRIES_NUM * pcc_chan->latency; if (!hdev->verspec_data->has_txdone_irq && - cl_info->mbox_chan->mbox->txdone_irq) { + mbox_chan->mbox->txdone_irq) { dev_err(dev, "PCC IRQ in PCCT is enabled.\n"); rc = -EINVAL; goto err_mbx_channel_free; } else if (hdev->verspec_data->has_txdone_irq && - !cl_info->mbox_chan->mbox->txdone_irq) { + !mbox_chan->mbox->txdone_irq) { dev_err(dev, "PCC IRQ in PCCT isn't supported.\n"); rc = -EINVAL; goto err_mbx_channel_free; } - if (!pcc_chan->shmem_base_addr || - pcc_chan->shmem_size != HCCS_PCC_SHARE_MEM_BYTES) { - dev_err(dev, "The base address or size (%llu) of PCC communication region is invalid.\n", - pcc_chan->shmem_size); + if (pcc_chan->shmem_size != HCCS_PCC_SHARE_MEM_BYTES) { + dev_err(dev, "Base size (%llu) of PCC communication region must be %d bytes.\n", + pcc_chan->shmem_size, HCCS_PCC_SHARE_MEM_BYTES); rc = -EINVAL; goto err_mbx_channel_free; } - cl_info->pcc_comm_addr = ioremap(pcc_chan->shmem_base_addr, - pcc_chan->shmem_size); - if (!cl_info->pcc_comm_addr) { - dev_err(dev, "Failed to ioremap PCC communication region for channel-%u.\n", - hdev->chan_id); - rc = -ENOMEM; - goto err_mbx_channel_free; - } - return 0; err_mbx_channel_free: @@ -246,7 +233,7 @@ static int hccs_wait_cmd_complete_by_poll(struct hccs_dev *hdev) { struct hccs_mbox_client_info *cl_info = &hdev->cl_info; struct acpi_pcct_shared_memory __iomem *comm_base = - cl_info->pcc_comm_addr; + cl_info->pcc_chan->shmem; u16 status; int ret; @@ -289,7 +276,7 @@ static inline void hccs_fill_pcc_shared_mem_region(struct hccs_dev *hdev, .status = 0, }; - memcpy_toio(hdev->cl_info.pcc_comm_addr, (void *)&tmp, + memcpy_toio(hdev->cl_info.pcc_chan->shmem, (void *)&tmp, sizeof(struct acpi_pcct_shared_memory)); /* Copy the message to the PCC comm space */ @@ -309,7 +296,7 @@ static inline void hccs_fill_ext_pcc_shared_mem_region(struct hccs_dev *hdev, .command = cmd, }; - memcpy_toio(hdev->cl_info.pcc_comm_addr, (void *)&tmp, + memcpy_toio(hdev->cl_info.pcc_chan->shmem, (void *)&tmp, sizeof(struct acpi_pcct_ext_pcc_shared_memory)); /* Copy the message to the PCC comm space */ @@ -321,12 +308,13 @@ static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd, { const struct hccs_verspecific_data *verspec_data = hdev->verspec_data; struct hccs_mbox_client_info *cl_info = &hdev->cl_info; + struct mbox_chan *mbox_chan = cl_info->pcc_chan->mchan; struct hccs_fw_inner_head *fw_inner_head; void __iomem *comm_space; u16 space_size; int ret; - comm_space = cl_info->pcc_comm_addr + verspec_data->shared_mem_size; + comm_space = cl_info->pcc_chan->shmem + verspec_data->shared_mem_size; space_size = HCCS_PCC_SHARE_MEM_BYTES - verspec_data->shared_mem_size; verspec_data->fill_pcc_shared_mem(hdev, cmd, desc, comm_space, space_size); @@ -334,7 +322,7 @@ static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd, reinit_completion(&cl_info->done); /* Ring doorbell */ - ret = mbox_send_message(cl_info->mbox_chan, &cmd); + ret = mbox_send_message(mbox_chan, &cmd); if (ret < 0) { dev_err(hdev->dev, "Send PCC mbox message failed, ret = %d.\n", ret); @@ -356,9 +344,9 @@ static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd, end: if (verspec_data->has_txdone_irq) - mbox_chan_txdone(cl_info->mbox_chan, ret); + mbox_chan_txdone(mbox_chan, ret); else - mbox_client_txdone(cl_info->mbox_chan, ret); + mbox_client_txdone(mbox_chan, ret); return ret; } diff --git a/drivers/soc/hisilicon/kunpeng_hccs.h b/drivers/soc/hisilicon/kunpeng_hccs.h index dc267136919b..f0a9a5618d97 100644 --- a/drivers/soc/hisilicon/kunpeng_hccs.h +++ b/drivers/soc/hisilicon/kunpeng_hccs.h @@ -60,10 +60,8 @@ struct hccs_chip_info { struct hccs_mbox_client_info { struct mbox_client client; - struct mbox_chan *mbox_chan; struct pcc_mbox_chan *pcc_chan; u64 deadline_us; - void __iomem *pcc_comm_addr; struct completion done; }; diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c index 3ed8161d7d28..04a1b60f2f2b 100644 --- a/drivers/soc/imx/soc-imx8m.c +++ b/drivers/soc/imx/soc-imx8m.c @@ -24,13 +24,21 @@ #define OCOTP_UID_HIGH 0x420 #define IMX8MP_OCOTP_UID_OFFSET 0x10 +#define IMX8MP_OCOTP_UID_HIGH 0xE00 /* Same as ANADIG_DIGPROG_IMX7D */ #define ANADIG_DIGPROG_IMX8MM 0x800 struct imx8_soc_data { char *name; - int (*soc_revision)(u32 *socrev, u64 *socuid); + const char *ocotp_compatible; + int (*soc_revision)(struct platform_device *pdev, u32 *socrev); + int (*soc_uid)(struct platform_device *pdev, u64 *socuid); +}; + +struct imx8_soc_drvdata { + void __iomem *ocotp_base; + struct clk *clk; }; #ifdef CONFIG_HAVE_ARM_SMCCC @@ -49,30 +57,24 @@ static u32 imx8mq_soc_revision_from_atf(void) static inline u32 imx8mq_soc_revision_from_atf(void) { return 0; }; #endif -static int imx8mq_soc_revision(u32 *socrev, u64 *socuid) +static int imx8m_soc_uid(struct platform_device *pdev, u64 *socuid) { - struct device_node *np __free(device_node) = - of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp"); - void __iomem *ocotp_base; - u32 magic; - u32 rev; - struct clk *clk; - int ret; + struct imx8_soc_drvdata *drvdata = platform_get_drvdata(pdev); + void __iomem *ocotp_base = drvdata->ocotp_base; - if (!np) - return -EINVAL; - - ocotp_base = of_iomap(np, 0); - if (!ocotp_base) - return -EINVAL; + *socuid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH); + *socuid <<= 32; + *socuid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW); - clk = of_clk_get_by_name(np, NULL); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - goto err_clk; - } + return 0; +} - clk_prepare_enable(clk); +static int imx8mq_soc_revision(struct platform_device *pdev, u32 *socrev) +{ + struct imx8_soc_drvdata *drvdata = platform_get_drvdata(pdev); + void __iomem *ocotp_base = drvdata->ocotp_base; + u32 magic; + u32 rev; /* * SOC revision on older imx8mq is not available in fuses so query @@ -85,98 +87,109 @@ static int imx8mq_soc_revision(u32 *socrev, u64 *socuid) rev = REV_B1; } - *socuid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH); - *socuid <<= 32; - *socuid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW); - *socrev = rev; - clk_disable_unprepare(clk); - clk_put(clk); - iounmap(ocotp_base); - return 0; +} -err_clk: - iounmap(ocotp_base); - return ret; +static int imx8mp_soc_uid(struct platform_device *pdev, u64 *socuid) +{ + struct imx8_soc_drvdata *drvdata = platform_get_drvdata(pdev); + void __iomem *ocotp_base = drvdata->ocotp_base; + + socuid[0] = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + IMX8MP_OCOTP_UID_OFFSET); + socuid[0] <<= 32; + socuid[0] |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + IMX8MP_OCOTP_UID_OFFSET); + + socuid[1] = readl_relaxed(ocotp_base + IMX8MP_OCOTP_UID_HIGH + 0x10); + socuid[1] <<= 32; + socuid[1] |= readl_relaxed(ocotp_base + IMX8MP_OCOTP_UID_HIGH); + + return 0; } -static int imx8mm_soc_uid(u64 *socuid) +static int imx8mm_soc_revision(struct platform_device *pdev, u32 *socrev) { struct device_node *np __free(device_node) = - of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp"); - void __iomem *ocotp_base; - struct clk *clk; - int ret = 0; - u32 offset = of_machine_is_compatible("fsl,imx8mp") ? - IMX8MP_OCOTP_UID_OFFSET : 0; + of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop"); + void __iomem *anatop_base; if (!np) return -EINVAL; - ocotp_base = of_iomap(np, 0); - if (!ocotp_base) + anatop_base = of_iomap(np, 0); + if (!anatop_base) return -EINVAL; - clk = of_clk_get_by_name(np, NULL); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - goto err_clk; - } - - clk_prepare_enable(clk); - - *socuid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset); - *socuid <<= 32; - *socuid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset); + *socrev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM); - clk_disable_unprepare(clk); - clk_put(clk); + iounmap(anatop_base); -err_clk: - iounmap(ocotp_base); - return ret; + return 0; } -static int imx8mm_soc_revision(u32 *socrev, u64 *socuid) +static int imx8m_soc_prepare(struct platform_device *pdev, const char *ocotp_compatible) { struct device_node *np __free(device_node) = - of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop"); - void __iomem *anatop_base; + of_find_compatible_node(NULL, NULL, ocotp_compatible); + struct imx8_soc_drvdata *drvdata = platform_get_drvdata(pdev); + int ret = 0; if (!np) return -EINVAL; - anatop_base = of_iomap(np, 0); - if (!anatop_base) + drvdata->ocotp_base = of_iomap(np, 0); + if (!drvdata->ocotp_base) return -EINVAL; - *socrev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM); + drvdata->clk = of_clk_get_by_name(np, NULL); + if (IS_ERR(drvdata->clk)) { + ret = PTR_ERR(drvdata->clk); + goto err_clk; + } - iounmap(anatop_base); + return clk_prepare_enable(drvdata->clk); + +err_clk: + iounmap(drvdata->ocotp_base); + return ret; +} + +static void imx8m_soc_unprepare(struct platform_device *pdev) +{ + struct imx8_soc_drvdata *drvdata = platform_get_drvdata(pdev); - return imx8mm_soc_uid(socuid); + clk_disable_unprepare(drvdata->clk); + clk_put(drvdata->clk); + iounmap(drvdata->ocotp_base); } static const struct imx8_soc_data imx8mq_soc_data = { .name = "i.MX8MQ", + .ocotp_compatible = "fsl,imx8mq-ocotp", .soc_revision = imx8mq_soc_revision, + .soc_uid = imx8m_soc_uid, }; static const struct imx8_soc_data imx8mm_soc_data = { .name = "i.MX8MM", + .ocotp_compatible = "fsl,imx8mm-ocotp", .soc_revision = imx8mm_soc_revision, + .soc_uid = imx8m_soc_uid, }; static const struct imx8_soc_data imx8mn_soc_data = { .name = "i.MX8MN", + .ocotp_compatible = "fsl,imx8mm-ocotp", .soc_revision = imx8mm_soc_revision, + .soc_uid = imx8m_soc_uid, }; static const struct imx8_soc_data imx8mp_soc_data = { .name = "i.MX8MP", + .ocotp_compatible = "fsl,imx8mm-ocotp", .soc_revision = imx8mm_soc_revision, + .soc_uid = imx8mp_soc_uid, }; static __maybe_unused const struct of_device_id imx8_soc_match[] = { @@ -207,17 +220,24 @@ static int imx8m_soc_probe(struct platform_device *pdev) struct soc_device_attribute *soc_dev_attr; struct platform_device *cpufreq_dev; const struct imx8_soc_data *data; + struct imx8_soc_drvdata *drvdata; struct device *dev = &pdev->dev; const struct of_device_id *id; struct soc_device *soc_dev; u32 soc_rev = 0; - u64 soc_uid = 0; + u64 soc_uid[2] = {0, 0}; int ret; soc_dev_attr = devm_kzalloc(dev, sizeof(*soc_dev_attr), GFP_KERNEL); if (!soc_dev_attr) return -ENOMEM; + drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + + platform_set_drvdata(pdev, drvdata); + soc_dev_attr->family = "Freescale i.MX"; ret = of_property_read_string(of_root, "model", &soc_dev_attr->machine); @@ -231,18 +251,37 @@ static int imx8m_soc_probe(struct platform_device *pdev) data = id->data; if (data) { soc_dev_attr->soc_id = data->name; + ret = imx8m_soc_prepare(pdev, data->ocotp_compatible); + if (ret) + return ret; + if (data->soc_revision) { - ret = data->soc_revision(&soc_rev, &soc_uid); - if (ret) + ret = data->soc_revision(pdev, &soc_rev); + if (ret) { + imx8m_soc_unprepare(pdev); + return ret; + } + } + if (data->soc_uid) { + ret = data->soc_uid(pdev, soc_uid); + if (ret) { + imx8m_soc_unprepare(pdev); return ret; + } } + imx8m_soc_unprepare(pdev); } soc_dev_attr->revision = imx8_revision(dev, soc_rev); if (!soc_dev_attr->revision) return -ENOMEM; - soc_dev_attr->serial_number = devm_kasprintf(dev, GFP_KERNEL, "%016llX", soc_uid); + if (soc_uid[1]) + soc_dev_attr->serial_number = devm_kasprintf(dev, GFP_KERNEL, "%016llX%016llX", + soc_uid[1], soc_uid[0]); + else + soc_dev_attr->serial_number = devm_kasprintf(dev, GFP_KERNEL, "%016llX", + soc_uid[0]); if (!soc_dev_attr->serial_number) return -ENOMEM; diff --git a/drivers/soc/mediatek/mtk-dvfsrc.c b/drivers/soc/mediatek/mtk-dvfsrc.c index 83bf46fdcf2d..41add5636b03 100644 --- a/drivers/soc/mediatek/mtk-dvfsrc.c +++ b/drivers/soc/mediatek/mtk-dvfsrc.c @@ -446,6 +446,46 @@ static int mtk_dvfsrc_probe(struct platform_device *pdev) return 0; } +static const struct dvfsrc_bw_constraints dvfsrc_bw_constr_v1 = { 0, 0, 0 }; +static const struct dvfsrc_bw_constraints dvfsrc_bw_constr_v2 = { + .max_dram_nom_bw = 255, + .max_dram_peak_bw = 255, + .max_dram_hrt_bw = 1023, +}; + +static const struct dvfsrc_opp dvfsrc_opp_mt6893_lp4[] = { + { 0, 0 }, { 1, 0 }, { 2, 0 }, { 3, 0 }, + { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1 }, + { 0, 2 }, { 1, 2 }, { 2, 2 }, { 3, 2 }, + { 0, 3 }, { 1, 3 }, { 2, 3 }, { 3, 3 }, + { 1, 4 }, { 2, 4 }, { 3, 4 }, { 2, 5 }, + { 3, 5 }, { 3, 6 }, { 4, 6 }, { 4, 7 }, +}; + +static const struct dvfsrc_opp_desc dvfsrc_opp_mt6893_desc[] = { + [0] = { + .opps = dvfsrc_opp_mt6893_lp4, + .num_opp = ARRAY_SIZE(dvfsrc_opp_mt6893_lp4), + } +}; + +static const struct dvfsrc_soc_data mt6893_data = { + .opps_desc = dvfsrc_opp_mt6893_desc, + .regs = dvfsrc_mt8195_regs, + .get_target_level = dvfsrc_get_target_level_v2, + .get_current_level = dvfsrc_get_current_level_v2, + .get_vcore_level = dvfsrc_get_vcore_level_v2, + .get_vscp_level = dvfsrc_get_vscp_level_v2, + .set_dram_bw = dvfsrc_set_dram_bw_v1, + .set_dram_peak_bw = dvfsrc_set_dram_peak_bw_v1, + .set_dram_hrt_bw = dvfsrc_set_dram_hrt_bw_v1, + .set_vcore_level = dvfsrc_set_vcore_level_v2, + .set_vscp_level = dvfsrc_set_vscp_level_v2, + .wait_for_opp_level = dvfsrc_wait_for_opp_level_v2, + .wait_for_vcore_level = dvfsrc_wait_for_vcore_level_v1, + .bw_constraints = &dvfsrc_bw_constr_v2, +}; + static const struct dvfsrc_opp dvfsrc_opp_mt8183_lp4[] = { { 0, 0 }, { 0, 1 }, { 0, 2 }, { 1, 2 }, }; @@ -469,8 +509,6 @@ static const struct dvfsrc_opp_desc dvfsrc_opp_mt8183_desc[] = { } }; -static const struct dvfsrc_bw_constraints dvfsrc_bw_constr_mt8183 = { 0, 0, 0 }; - static const struct dvfsrc_soc_data mt8183_data = { .opps_desc = dvfsrc_opp_mt8183_desc, .regs = dvfsrc_mt8183_regs, @@ -482,7 +520,7 @@ static const struct dvfsrc_soc_data mt8183_data = { .set_vcore_level = dvfsrc_set_vcore_level_v1, .wait_for_opp_level = dvfsrc_wait_for_opp_level_v1, .wait_for_vcore_level = dvfsrc_wait_for_vcore_level_v1, - .bw_constraints = &dvfsrc_bw_constr_mt8183, + .bw_constraints = &dvfsrc_bw_constr_v1, }; static const struct dvfsrc_opp dvfsrc_opp_mt8195_lp4[] = { @@ -501,12 +539,6 @@ static const struct dvfsrc_opp_desc dvfsrc_opp_mt8195_desc[] = { } }; -static const struct dvfsrc_bw_constraints dvfsrc_bw_constr_mt8195 = { - .max_dram_nom_bw = 255, - .max_dram_peak_bw = 255, - .max_dram_hrt_bw = 1023, -}; - static const struct dvfsrc_soc_data mt8195_data = { .opps_desc = dvfsrc_opp_mt8195_desc, .regs = dvfsrc_mt8195_regs, @@ -521,10 +553,11 @@ static const struct dvfsrc_soc_data mt8195_data = { .set_vscp_level = dvfsrc_set_vscp_level_v2, .wait_for_opp_level = dvfsrc_wait_for_opp_level_v2, .wait_for_vcore_level = dvfsrc_wait_for_vcore_level_v1, - .bw_constraints = &dvfsrc_bw_constr_mt8195, + .bw_constraints = &dvfsrc_bw_constr_v2, }; static const struct of_device_id mtk_dvfsrc_of_match[] = { + { .compatible = "mediatek,mt6893-dvfsrc", .data = &mt6893_data }, { .compatible = "mediatek,mt8183-dvfsrc", .data = &mt8183_data }, { .compatible = "mediatek,mt8195-dvfsrc", .data = &mt8195_data }, { /* sentinel */ } diff --git a/drivers/soc/qcom/ice.c b/drivers/soc/qcom/ice.c index 2310afa77b76..c467b55b4174 100644 --- a/drivers/soc/qcom/ice.c +++ b/drivers/soc/qcom/ice.c @@ -21,20 +21,63 @@ #include <soc/qcom/ice.h> -#define AES_256_XTS_KEY_SIZE 64 +#define AES_256_XTS_KEY_SIZE 64 /* for raw keys only */ +#define QCOM_ICE_HWKM_WRAPPED_KEY_SIZE 100 /* assuming HWKM v2 */ /* QCOM ICE registers */ + +#define QCOM_ICE_REG_CONTROL 0x0000 +#define QCOM_ICE_LEGACY_MODE_ENABLED BIT(0) + #define QCOM_ICE_REG_VERSION 0x0008 + #define QCOM_ICE_REG_FUSE_SETTING 0x0010 +#define QCOM_ICE_FUSE_SETTING_MASK BIT(0) +#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK BIT(1) +#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK BIT(2) + #define QCOM_ICE_REG_BIST_STATUS 0x0070 +#define QCOM_ICE_BIST_STATUS_MASK GENMASK(31, 28) + #define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000 -/* BIST ("built-in self-test") status flags */ -#define QCOM_ICE_BIST_STATUS_MASK GENMASK(31, 28) +#define QCOM_ICE_REG_CRYPTOCFG_BASE 0x4040 +#define QCOM_ICE_REG_CRYPTOCFG_SIZE 0x80 +#define QCOM_ICE_REG_CRYPTOCFG(slot) (QCOM_ICE_REG_CRYPTOCFG_BASE + \ + QCOM_ICE_REG_CRYPTOCFG_SIZE * (slot)) +union crypto_cfg { + __le32 regval; + struct { + u8 dusize; + u8 capidx; + u8 reserved; +#define QCOM_ICE_HWKM_CFG_ENABLE_VAL BIT(7) + u8 cfge; + }; +}; -#define QCOM_ICE_FUSE_SETTING_MASK 0x1 -#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2 -#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4 +/* QCOM ICE HWKM (Hardware Key Manager) registers */ + +#define HWKM_OFFSET 0x8000 + +#define QCOM_ICE_REG_HWKM_TZ_KM_CTL (HWKM_OFFSET + 0x1000) +#define QCOM_ICE_HWKM_DISABLE_CRC_CHECKS_VAL (BIT(1) | BIT(2)) + +#define QCOM_ICE_REG_HWKM_TZ_KM_STATUS (HWKM_OFFSET + 0x1004) +#define QCOM_ICE_HWKM_KT_CLEAR_DONE BIT(0) +#define QCOM_ICE_HWKM_BOOT_CMD_LIST0_DONE BIT(1) +#define QCOM_ICE_HWKM_BOOT_CMD_LIST1_DONE BIT(2) +#define QCOM_ICE_HWKM_CRYPTO_BIST_DONE_V2 BIT(7) +#define QCOM_ICE_HWKM_BIST_DONE_V2 BIT(9) + +#define QCOM_ICE_REG_HWKM_BANK0_BANKN_IRQ_STATUS (HWKM_OFFSET + 0x2008) +#define QCOM_ICE_HWKM_RSP_FIFO_CLEAR_VAL BIT(3) + +#define QCOM_ICE_REG_HWKM_BANK0_BBAC_0 (HWKM_OFFSET + 0x5000) +#define QCOM_ICE_REG_HWKM_BANK0_BBAC_1 (HWKM_OFFSET + 0x5004) +#define QCOM_ICE_REG_HWKM_BANK0_BBAC_2 (HWKM_OFFSET + 0x5008) +#define QCOM_ICE_REG_HWKM_BANK0_BBAC_3 (HWKM_OFFSET + 0x500C) +#define QCOM_ICE_REG_HWKM_BANK0_BBAC_4 (HWKM_OFFSET + 0x5010) #define qcom_ice_writel(engine, val, reg) \ writel((val), (engine)->base + (reg)) @@ -42,11 +85,18 @@ #define qcom_ice_readl(engine, reg) \ readl((engine)->base + (reg)) +static bool qcom_ice_use_wrapped_keys; +module_param_named(use_wrapped_keys, qcom_ice_use_wrapped_keys, bool, 0660); +MODULE_PARM_DESC(use_wrapped_keys, + "Support wrapped keys instead of raw keys, if available on the platform"); + struct qcom_ice { struct device *dev; void __iomem *base; struct clk *core_clk; + bool use_hwkm; + bool hwkm_init_complete; }; static bool qcom_ice_check_supported(struct qcom_ice *ice) @@ -76,6 +126,35 @@ static bool qcom_ice_check_supported(struct qcom_ice *ice) return false; } + /* + * Check for HWKM support and decide whether to use it or not. ICE + * v3.2.1 and later have HWKM v2. ICE v3.2.0 has HWKM v1. Earlier ICE + * versions don't have HWKM at all. However, for HWKM to be fully + * usable by Linux, the TrustZone software also needs to support certain + * SCM calls including the ones to generate and prepare keys. That + * effectively makes the earliest supported SoC be SM8650, which has + * HWKM v2. Therefore, this driver doesn't include support for HWKM v1, + * and it checks for the SCM call support before it decides to use HWKM. + * + * Also, since HWKM and legacy mode are mutually exclusive, and + * ICE-capable storage driver(s) need to know early on whether to + * advertise support for raw keys or wrapped keys, HWKM cannot be used + * unconditionally. A module parameter is used to opt into using it. + */ + if ((major >= 4 || + (major == 3 && (minor >= 3 || (minor == 2 && step >= 1)))) && + qcom_scm_has_wrapped_key_support()) { + if (qcom_ice_use_wrapped_keys) { + dev_info(dev, "Using HWKM. Supporting wrapped keys only.\n"); + ice->use_hwkm = true; + } else { + dev_info(dev, "Not using HWKM. Supporting raw keys only.\n"); + } + } else if (qcom_ice_use_wrapped_keys) { + dev_warn(dev, "A supported HWKM is not present. Ignoring qcom_ice.use_wrapped_keys=1.\n"); + } else { + dev_info(dev, "A supported HWKM is not present. Supporting raw keys only.\n"); + } return true; } @@ -123,17 +202,71 @@ static int qcom_ice_wait_bist_status(struct qcom_ice *ice) err = readl_poll_timeout(ice->base + QCOM_ICE_REG_BIST_STATUS, regval, !(regval & QCOM_ICE_BIST_STATUS_MASK), 50, 5000); - if (err) + if (err) { dev_err(ice->dev, "Timed out waiting for ICE self-test to complete\n"); + return err; + } - return err; + if (ice->use_hwkm && + qcom_ice_readl(ice, QCOM_ICE_REG_HWKM_TZ_KM_STATUS) != + (QCOM_ICE_HWKM_KT_CLEAR_DONE | + QCOM_ICE_HWKM_BOOT_CMD_LIST0_DONE | + QCOM_ICE_HWKM_BOOT_CMD_LIST1_DONE | + QCOM_ICE_HWKM_CRYPTO_BIST_DONE_V2 | + QCOM_ICE_HWKM_BIST_DONE_V2)) { + dev_err(ice->dev, "HWKM self-test error!\n"); + /* + * Too late to revoke use_hwkm here, as it was already + * propagated up the stack into the crypto capabilities. + */ + } + return 0; +} + +static void qcom_ice_hwkm_init(struct qcom_ice *ice) +{ + u32 regval; + + if (!ice->use_hwkm) + return; + + BUILD_BUG_ON(QCOM_ICE_HWKM_WRAPPED_KEY_SIZE > + BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE); + /* + * When ICE is in HWKM mode, it only supports wrapped keys. + * When ICE is in legacy mode, it only supports raw keys. + * + * Put ICE in HWKM mode. ICE defaults to legacy mode. + */ + regval = qcom_ice_readl(ice, QCOM_ICE_REG_CONTROL); + regval &= ~QCOM_ICE_LEGACY_MODE_ENABLED; + qcom_ice_writel(ice, regval, QCOM_ICE_REG_CONTROL); + + /* Disable CRC checks. This HWKM feature is not used. */ + qcom_ice_writel(ice, QCOM_ICE_HWKM_DISABLE_CRC_CHECKS_VAL, + QCOM_ICE_REG_HWKM_TZ_KM_CTL); + + /* + * Allow the HWKM slave to read and write the keyslots in the ICE HWKM + * slave. Without this, TrustZone cannot program keys into ICE. + */ + qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_0); + qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_1); + qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_2); + qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_3); + qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_4); + + /* Clear the HWKM response FIFO. */ + qcom_ice_writel(ice, QCOM_ICE_HWKM_RSP_FIFO_CLEAR_VAL, + QCOM_ICE_REG_HWKM_BANK0_BANKN_IRQ_STATUS); + ice->hwkm_init_complete = true; } int qcom_ice_enable(struct qcom_ice *ice) { qcom_ice_low_power_mode_enable(ice); qcom_ice_optimization_enable(ice); - + qcom_ice_hwkm_init(ice); return qcom_ice_wait_bist_status(ice); } EXPORT_SYMBOL_GPL(qcom_ice_enable); @@ -149,7 +282,7 @@ int qcom_ice_resume(struct qcom_ice *ice) err); return err; } - + qcom_ice_hwkm_init(ice); return qcom_ice_wait_bist_status(ice); } EXPORT_SYMBOL_GPL(qcom_ice_resume); @@ -157,15 +290,58 @@ EXPORT_SYMBOL_GPL(qcom_ice_resume); int qcom_ice_suspend(struct qcom_ice *ice) { clk_disable_unprepare(ice->core_clk); + ice->hwkm_init_complete = false; return 0; } EXPORT_SYMBOL_GPL(qcom_ice_suspend); -int qcom_ice_program_key(struct qcom_ice *ice, - u8 algorithm_id, u8 key_size, - const u8 crypto_key[], u8 data_unit_size, - int slot) +static unsigned int translate_hwkm_slot(struct qcom_ice *ice, unsigned int slot) +{ + return slot * 2; +} + +static int qcom_ice_program_wrapped_key(struct qcom_ice *ice, unsigned int slot, + const struct blk_crypto_key *bkey) +{ + struct device *dev = ice->dev; + union crypto_cfg cfg = { + .dusize = bkey->crypto_cfg.data_unit_size / 512, + .capidx = QCOM_SCM_ICE_CIPHER_AES_256_XTS, + .cfge = QCOM_ICE_HWKM_CFG_ENABLE_VAL, + }; + int err; + + if (!ice->use_hwkm) { + dev_err_ratelimited(dev, "Got wrapped key when not using HWKM\n"); + return -EINVAL; + } + if (!ice->hwkm_init_complete) { + dev_err_ratelimited(dev, "HWKM not yet initialized\n"); + return -EINVAL; + } + + /* Clear CFGE before programming the key. */ + qcom_ice_writel(ice, 0x0, QCOM_ICE_REG_CRYPTOCFG(slot)); + + /* Call into TrustZone to program the wrapped key using HWKM. */ + err = qcom_scm_ice_set_key(translate_hwkm_slot(ice, slot), bkey->bytes, + bkey->size, cfg.capidx, cfg.dusize); + if (err) { + dev_err_ratelimited(dev, + "qcom_scm_ice_set_key failed; err=%d, slot=%u\n", + err, slot); + return err; + } + + /* Set CFGE after programming the key. */ + qcom_ice_writel(ice, le32_to_cpu(cfg.regval), + QCOM_ICE_REG_CRYPTOCFG(slot)); + return 0; +} + +int qcom_ice_program_key(struct qcom_ice *ice, unsigned int slot, + const struct blk_crypto_key *blk_key) { struct device *dev = ice->dev; union { @@ -176,15 +352,26 @@ int qcom_ice_program_key(struct qcom_ice *ice, int err; /* Only AES-256-XTS has been tested so far. */ - if (algorithm_id != QCOM_ICE_CRYPTO_ALG_AES_XTS || - key_size != QCOM_ICE_CRYPTO_KEY_SIZE_256) { - dev_err_ratelimited(dev, - "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n", - algorithm_id, key_size); + if (blk_key->crypto_cfg.crypto_mode != + BLK_ENCRYPTION_MODE_AES_256_XTS) { + dev_err_ratelimited(dev, "Unsupported crypto mode: %d\n", + blk_key->crypto_cfg.crypto_mode); + return -EINVAL; + } + + if (blk_key->crypto_cfg.key_type == BLK_CRYPTO_KEY_TYPE_HW_WRAPPED) + return qcom_ice_program_wrapped_key(ice, slot, blk_key); + + if (ice->use_hwkm) { + dev_err_ratelimited(dev, "Got raw key when using HWKM\n"); return -EINVAL; } - memcpy(key.bytes, crypto_key, AES_256_XTS_KEY_SIZE); + if (blk_key->size != AES_256_XTS_KEY_SIZE) { + dev_err_ratelimited(dev, "Incorrect key size\n"); + return -EINVAL; + } + memcpy(key.bytes, blk_key->bytes, AES_256_XTS_KEY_SIZE); /* The SCM call requires that the key words are encoded in big endian */ for (i = 0; i < ARRAY_SIZE(key.words); i++) @@ -192,7 +379,7 @@ int qcom_ice_program_key(struct qcom_ice *ice, err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE, QCOM_SCM_ICE_CIPHER_AES_256_XTS, - data_unit_size); + blk_key->crypto_cfg.data_unit_size / 512); memzero_explicit(&key, sizeof(key)); @@ -202,10 +389,131 @@ EXPORT_SYMBOL_GPL(qcom_ice_program_key); int qcom_ice_evict_key(struct qcom_ice *ice, int slot) { + if (ice->hwkm_init_complete) + slot = translate_hwkm_slot(ice, slot); return qcom_scm_ice_invalidate_key(slot); } EXPORT_SYMBOL_GPL(qcom_ice_evict_key); +/** + * qcom_ice_get_supported_key_type() - Get the supported key type + * @ice: ICE driver data + * + * Return: the blk-crypto key type that the ICE driver is configured to use. + * This is the key type that ICE-capable storage drivers should advertise as + * supported in the crypto capabilities of any disks they register. + */ +enum blk_crypto_key_type qcom_ice_get_supported_key_type(struct qcom_ice *ice) +{ + if (ice->use_hwkm) + return BLK_CRYPTO_KEY_TYPE_HW_WRAPPED; + return BLK_CRYPTO_KEY_TYPE_RAW; +} +EXPORT_SYMBOL_GPL(qcom_ice_get_supported_key_type); + +/** + * qcom_ice_derive_sw_secret() - Derive software secret from wrapped key + * @ice: ICE driver data + * @eph_key: an ephemerally-wrapped key + * @eph_key_size: size of @eph_key in bytes + * @sw_secret: output buffer for the software secret + * + * Use HWKM to derive the "software secret" from a hardware-wrapped key that is + * given in ephemerally-wrapped form. + * + * Return: 0 on success; -EBADMSG if the given ephemerally-wrapped key is + * invalid; or another -errno value. + */ +int qcom_ice_derive_sw_secret(struct qcom_ice *ice, + const u8 *eph_key, size_t eph_key_size, + u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]) +{ + int err = qcom_scm_derive_sw_secret(eph_key, eph_key_size, + sw_secret, + BLK_CRYPTO_SW_SECRET_SIZE); + if (err == -EIO || err == -EINVAL) + err = -EBADMSG; /* probably invalid key */ + return err; +} +EXPORT_SYMBOL_GPL(qcom_ice_derive_sw_secret); + +/** + * qcom_ice_generate_key() - Generate a wrapped key for inline encryption + * @ice: ICE driver data + * @lt_key: output buffer for the long-term wrapped key + * + * Use HWKM to generate a new key and return it as a long-term wrapped key. + * + * Return: the size of the resulting wrapped key on success; -errno on failure. + */ +int qcom_ice_generate_key(struct qcom_ice *ice, + u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + int err; + + err = qcom_scm_generate_ice_key(lt_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE); + if (err) + return err; + + return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE; +} +EXPORT_SYMBOL_GPL(qcom_ice_generate_key); + +/** + * qcom_ice_prepare_key() - Prepare a wrapped key for inline encryption + * @ice: ICE driver data + * @lt_key: a long-term wrapped key + * @lt_key_size: size of @lt_key in bytes + * @eph_key: output buffer for the ephemerally-wrapped key + * + * Use HWKM to re-wrap a long-term wrapped key with the per-boot ephemeral key. + * + * Return: the size of the resulting wrapped key on success; -EBADMSG if the + * given long-term wrapped key is invalid; or another -errno value. + */ +int qcom_ice_prepare_key(struct qcom_ice *ice, + const u8 *lt_key, size_t lt_key_size, + u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + int err; + + err = qcom_scm_prepare_ice_key(lt_key, lt_key_size, + eph_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE); + if (err == -EIO || err == -EINVAL) + err = -EBADMSG; /* probably invalid key */ + if (err) + return err; + + return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE; +} +EXPORT_SYMBOL_GPL(qcom_ice_prepare_key); + +/** + * qcom_ice_import_key() - Import a raw key for inline encryption + * @ice: ICE driver data + * @raw_key: the raw key to import + * @raw_key_size: size of @raw_key in bytes + * @lt_key: output buffer for the long-term wrapped key + * + * Use HWKM to import a raw key and return it as a long-term wrapped key. + * + * Return: the size of the resulting wrapped key on success; -errno on failure. + */ +int qcom_ice_import_key(struct qcom_ice *ice, + const u8 *raw_key, size_t raw_key_size, + u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + int err; + + err = qcom_scm_import_ice_key(raw_key, raw_key_size, + lt_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE); + if (err) + return err; + + return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE; +} +EXPORT_SYMBOL_GPL(qcom_ice_import_key); + static struct qcom_ice *qcom_ice_create(struct device *dev, void __iomem *base) { diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index 56823b6a2fac..192edc3f64dc 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -35,6 +35,11 @@ #define ATTR0_RES_WAYS_MASK GENMASK(15, 0) #define ATTR0_BONUS_WAYS_MASK GENMASK(31, 16) #define ATTR0_BONUS_WAYS_SHIFT 16 +#define ATTR2_PROBE_TARGET_WAYS_MASK BIT(4) +#define ATTR2_FIXED_SIZE_MASK BIT(8) +#define ATTR2_PRIORITY_MASK GENMASK(14, 12) +#define ATTR2_PARENT_SCID_MASK GENMASK(21, 16) +#define ATTR2_IN_A_GROUP_MASK BIT(24) #define LLCC_STATUS_READ_DELAY 100 #define CACHE_LINE_SIZE_SHIFT 6 @@ -49,6 +54,10 @@ #define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + SZ_8 * n) #define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + SZ_8 * n) #define LLCC_TRP_ATTR2_CFGn(n) (0x21100 + SZ_4 * n) +#define LLCC_V6_TRP_ATTR0_CFGn(n) (cfg->reg_offset[LLCC_TRP_ATTR0_CFG] + SZ_64 * (n)) +#define LLCC_V6_TRP_ATTR1_CFGn(n) (cfg->reg_offset[LLCC_TRP_ATTR1_CFG] + SZ_64 * (n)) +#define LLCC_V6_TRP_ATTR2_CFGn(n) (cfg->reg_offset[LLCC_TRP_ATTR2_CFG] + SZ_64 * (n)) +#define LLCC_V6_TRP_ATTR3_CFGn(n) (cfg->reg_offset[LLCC_TRP_ATTR3_CFG] + SZ_64 * (n)) #define LLCC_TRP_SCID_DIS_CAP_ALLOC 0x21f00 #define LLCC_TRP_PCB_ACT 0x21f04 @@ -66,6 +75,7 @@ #define LLCC_VERSION_2_0_0_0 0x02000000 #define LLCC_VERSION_2_1_0_0 0x02010000 #define LLCC_VERSION_4_1_0_0 0x04010000 +#define LLCC_VERSION_6_0_0_0 0X06000000 /** * struct llcc_slice_config - Data associated with the llcc slice @@ -106,6 +116,7 @@ * ovcap_en. * @vict_prio: When current scid is under-capacity, allocate over other * lower-than victim priority-line threshold scid. + * @parent_slice_id: For grouped slices, specifies the slice id of the parent. */ struct llcc_slice_config { u32 usecase_id; @@ -130,6 +141,7 @@ struct llcc_slice_config { bool ovcap_en; bool ovcap_prio; bool vict_prio; + u32 parent_slice_id; }; struct qcom_llcc_config { @@ -153,6 +165,21 @@ struct qcom_sct_config { enum llcc_reg_offset { LLCC_COMMON_HW_INFO, LLCC_COMMON_STATUS0, + LLCC_TRP_ATTR0_CFG, + LLCC_TRP_ATTR1_CFG, + LLCC_TRP_ATTR2_CFG, + LLCC_TRP_ATTR3_CFG, + LLCC_TRP_SID_DIS_CAP_ALLOC, + LLCC_TRP_ALGO_STALE_EN, + LLCC_TRP_ALGO_STALE_CAP_EN, + LLCC_TRP_ALGO_MRU0, + LLCC_TRP_ALGO_MRU1, + LLCC_TRP_ALGO_ALLOC0, + LLCC_TRP_ALGO_ALLOC1, + LLCC_TRP_ALGO_ALLOC2, + LLCC_TRP_ALGO_ALLOC3, + LLCC_TRP_WRS_EN, + LLCC_TRP_WRS_CACHEABLE_EN, }; static const struct llcc_slice_config ipq5424_data[] = { @@ -2662,6 +2689,263 @@ static const struct llcc_slice_config sm8650_data[] = { }, }; +static const struct llcc_slice_config sm8750_data[] = { + { + .usecase_id = LLCC_CPUSS, + .slice_id = 1, + .max_cap = 5120, + .priority = 1, + .bonus_ways = 0xffffffff, + .activate_on_init = true, + .write_scid_en = true, + }, { + .usecase_id = LLCC_MDMHPFX, + .slice_id = 24, + .max_cap = 1024, + .priority = 5, + .fixed_size = true, + .bonus_ways = 0xffffffff, + }, { + .usecase_id = LLCC_VIDSC0, + .slice_id = 2, + .max_cap = 512, + .priority = 4, + .fixed_size = true, + .bonus_ways = 0xffffffff, + }, { + .usecase_id = LLCC_AUDIO, + .slice_id = 35, + .max_cap = 512, + .priority = 1, + .fixed_size = true, + .bonus_ways = 0xffffffff, + }, { + .usecase_id = LLCC_MDMHPGRW, + .slice_id = 25, + .max_cap = 1024, + .priority = 5, + .bonus_ways = 0xffffffff, + }, { + .usecase_id = LLCC_MODHW, + .slice_id = 26, + .max_cap = 1024, + .priority = 1, + .fixed_size = true, + .bonus_ways = 0xffffffff, + }, { + .usecase_id = LLCC_CMPT, + .slice_id = 34, + .max_cap = 4096, + .priority = 1, + .fixed_size = true, + .bonus_ways = 0xffffffff, + }, { + .usecase_id = LLCC_GPUHTW, + .slice_id = 11, + .max_cap = 512, + .priority = 1, + .fixed_size = true, + .bonus_ways = 0xffffffff, + }, { + .usecase_id = LLCC_GPU, + .slice_id = 9, + .max_cap = 5632, + .priority = 1, + .fixed_size = true, + .bonus_ways = 0xffffffff, + .write_scid_en = true, + .write_scid_cacheable_en = true + }, { + .usecase_id = LLCC_MMUHWT, + .slice_id = 18, + .max_cap = 768, + .priority = 1, + .fixed_size = true, + .bonus_ways = 0xffffffff, + .activate_on_init = true, + }, { + .usecase_id = LLCC_DISP, + .slice_id = 16, + .max_cap = 7168, + .priority = 1, + .fixed_size = true, + .bonus_ways = 0xffffffff, + .cache_mode = 2, + .stale_en = true, + }, { + .usecase_id = LLCC_VIDFW, + .slice_id = 17, + .priority = 4, + .fixed_size = true, + .bonus_ways = 0xffffffff, + }, { + .usecase_id = LLCC_CAMFW, + .slice_id = 20, + .priority = 4, + .fixed_size = true, + .bonus_ways = 0xffffffff, + }, { + .usecase_id = LLCC_MDMPNG, + .slice_id = 27, + .max_cap = 256, + .priority = 5, + .fixed_size = true, + .bonus_ways = 0xf0000000, + }, { + .usecase_id = LLCC_AUDHW, + .slice_id = 22, + .max_cap = 512, + .priority = 1, + .fixed_size = true, + .bonus_ways = 0xffffffff, + }, { + .usecase_id = LLCC_CVP, + .slice_id = 8, + .max_cap = 800, + .priority = 5, + .fixed_size = true, + .bonus_ways = 0xffffffff, + .vict_prio = true, + }, { + .usecase_id = LLCC_MODPE, + .slice_id = 29, + .max_cap = 256, + .priority = 1, + .fixed_size = true, + .bonus_ways = 0xf0000000, + .alloc_oneway_en = true, + }, { + .usecase_id = LLCC_WRCACHE, + .slice_id = 31, + .max_cap = 512, + .priority = 1, + .fixed_size = true, + .bonus_ways = 0xffffffff, + .activate_on_init = true, + }, { + .usecase_id = LLCC_CVPFW, + .slice_id = 19, + .max_cap = 64, + .priority = 4, + .fixed_size = true, + .bonus_ways = 0xffffffff, + }, { + .usecase_id = LLCC_CMPTHCP, + .slice_id = 15, + .max_cap = 256, + .priority = 4, + .fixed_size = true, + .bonus_ways = 0xffffffff, + }, { + .usecase_id = LLCC_LCPDARE, + .slice_id = 30, + .max_cap = 128, + .priority = 5, + .fixed_size = true, + .bonus_ways = 0xffffffff, + .activate_on_init = true, + .alloc_oneway_en = true, + }, { + .usecase_id = LLCC_AENPU, + .slice_id = 3, + .max_cap = 3072, + .priority = 1, + .fixed_size = true, + .bonus_ways = 0xffffffff, + .cache_mode = 2, + }, { + .usecase_id = LLCC_ISLAND1, + .slice_id = 12, + .max_cap = 7936, + .priority = 7, + .fixed_size = true, + .bonus_ways = 0x7fffffff, + }, { + .usecase_id = LLCC_DISP_WB, + .slice_id = 23, + .max_cap = 512, + .priority = 4, + .fixed_size = true, + .bonus_ways = 0xffffffff, + }, { + .usecase_id = LLCC_VIDVSP, + .slice_id = 4, + .max_cap = 256, + .priority = 4, + .fixed_size = true, + .bonus_ways = 0xffffffff, + }, { + .usecase_id = LLCC_VIDDEC, + .slice_id = 5, + .max_cap = 6144, + .priority = 4, + .fixed_size = true, + .bonus_ways = 0xffffffff, + .cache_mode = 2, + .ovcap_prio = true, + .parent_slice_id = 33, + }, { + .usecase_id = LLCC_CAMOFE, + .slice_id = 33, + .max_cap = 6144, + .priority = 4, + .fixed_size = true, + .bonus_ways = 0xffffffff, + .stale_en = true, + .ovcap_prio = true, + .parent_slice_id = 33, + }, { + .usecase_id = LLCC_CAMRTIP, + .slice_id = 13, + .max_cap = 1024, + .priority = 4, + .fixed_size = true, + .bonus_ways = 0xffffffff, + .stale_en = true, + .ovcap_prio = true, + .parent_slice_id = 33, + }, { + .usecase_id = LLCC_CAMSRTIP, + .slice_id = 14, + .max_cap = 6144, + .priority = 4, + .fixed_size = true, + .bonus_ways = 0xffffffff, + .stale_en = true, + .ovcap_prio = true, + .parent_slice_id = 33, + }, { + .usecase_id = LLCC_CAMRTRF, + .slice_id = 7, + .max_cap = 3584, + .priority = 1, + .fixed_size = true, + .bonus_ways = 0xffffffff, + .stale_en = true, + .ovcap_prio = true, + .parent_slice_id = 33, + }, { + .usecase_id = LLCC_CAMSRTRF, + .slice_id = 21, + .max_cap = 6144, + .priority = 1, + .fixed_size = true, + .bonus_ways = 0xffffffff, + .stale_en = true, + .ovcap_prio = true, + .parent_slice_id = 33, + }, { + .usecase_id = LLCC_CPUSSMPAM, + .slice_id = 6, + .max_cap = 2048, + .priority = 1, + .fixed_size = true, + .bonus_ways = 0xffffffff, + .activate_on_init = true, + .write_scid_en = true, + }, +}; + static const struct llcc_slice_config qcs615_data[] = { { .usecase_id = LLCC_CPUSS, @@ -3161,6 +3445,33 @@ static const struct llcc_edac_reg_offset llcc_v2_1_edac_reg_offset = { .drp_ecc_db_err_syn0 = 0x52120, }; +static const struct llcc_edac_reg_offset llcc_v6_edac_reg_offset = { + .trp_ecc_error_status0 = 0x47448, + .trp_ecc_error_status1 = 0x47450, + .trp_ecc_sb_err_syn0 = 0x47490, + .trp_ecc_db_err_syn0 = 0x474d0, + .trp_ecc_error_cntr_clear = 0x47444, + .trp_interrupt_0_status = 0x47600, + .trp_interrupt_0_clear = 0x47604, + .trp_interrupt_0_enable = 0x47608, + + /* LLCC Common registers */ + .cmn_status0 = 0x6400c, + .cmn_interrupt_0_enable = 0x6401c, + .cmn_interrupt_2_enable = 0x6403c, + + /* LLCC DRP registers */ + .drp_ecc_error_cfg = 0x80000, + .drp_ecc_error_cntr_clear = 0x80004, + .drp_interrupt_status = 0x80020, + .drp_interrupt_clear = 0x80028, + .drp_interrupt_enable = 0x8002c, + .drp_ecc_error_status0 = 0x820f4, + .drp_ecc_error_status1 = 0x820f8, + .drp_ecc_sb_err_syn0 = 0x820fc, + .drp_ecc_db_err_syn0 = 0x82120, +}; + /* LLCC register offset starting from v1.0.0 */ static const u32 llcc_v1_reg_offset[] = { [LLCC_COMMON_HW_INFO] = 0x00030000, @@ -3173,6 +3484,27 @@ static const u32 llcc_v2_1_reg_offset[] = { [LLCC_COMMON_STATUS0] = 0x0003400c, }; +/* LLCC register offset starting from v6.0.0 */ +static const u32 llcc_v6_reg_offset[] = { + [LLCC_COMMON_HW_INFO] = 0x00064000, + [LLCC_COMMON_STATUS0] = 0x0006400c, + [LLCC_TRP_ATTR0_CFG] = 0x00041000, + [LLCC_TRP_ATTR1_CFG] = 0x00041008, + [LLCC_TRP_ATTR2_CFG] = 0x00041010, + [LLCC_TRP_ATTR3_CFG] = 0x00041014, + [LLCC_TRP_SID_DIS_CAP_ALLOC] = 0x00042000, + [LLCC_TRP_ALGO_STALE_EN] = 0x00042008, + [LLCC_TRP_ALGO_STALE_CAP_EN] = 0x00042010, + [LLCC_TRP_ALGO_MRU0] = 0x00042018, + [LLCC_TRP_ALGO_MRU1] = 0x00042020, + [LLCC_TRP_ALGO_ALLOC0] = 0x00042028, + [LLCC_TRP_ALGO_ALLOC1] = 0x00042030, + [LLCC_TRP_ALGO_ALLOC2] = 0x00042038, + [LLCC_TRP_ALGO_ALLOC3] = 0x00042040, + [LLCC_TRP_WRS_EN] = 0x00042080, + [LLCC_TRP_WRS_CACHEABLE_EN] = 0x00042088, +}; + static const struct qcom_llcc_config qcs615_cfg[] = { { .sct_data = qcs615_data, @@ -3379,6 +3711,16 @@ static const struct qcom_llcc_config sm8650_cfg[] = { }, }; +static const struct qcom_llcc_config sm8750_cfg[] = { + { + .sct_data = sm8750_data, + .size = ARRAY_SIZE(sm8750_data), + .skip_llcc_cfg = false, + .reg_offset = llcc_v6_reg_offset, + .edac_reg_offset = &llcc_v6_edac_reg_offset, + }, +}; + static const struct qcom_llcc_config x1e80100_cfg[] = { { .sct_data = x1e80100_data, @@ -3489,6 +3831,11 @@ static const struct qcom_sct_config sm8650_cfgs = { .num_config = ARRAY_SIZE(sm8650_cfg), }; +static const struct qcom_sct_config sm8750_cfgs = { + .llcc_config = sm8750_cfg, + .num_config = ARRAY_SIZE(sm8750_cfg), +}; + static const struct qcom_sct_config x1e80100_cfgs = { .llcc_config = x1e80100_cfg, .num_config = ARRAY_SIZE(x1e80100_cfg), @@ -3869,6 +4216,139 @@ static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config, return ret; } +static int _qcom_llcc_cfg_program_v6(const struct llcc_slice_config *config, + const struct qcom_llcc_config *cfg) +{ + u32 stale_en, stale_cap_en, mru_uncap_en, mru_rollover; + u32 alloc_oneway_en, ovcap_en, ovcap_prio, vict_prio; + u32 attr0_cfg, attr1_cfg, attr2_cfg, attr3_cfg; + u32 attr0_val, attr1_val, attr2_val, attr3_val; + u32 slice_offset, reg_offset; + struct llcc_slice_desc *desc; + u32 wren, wr_cache_en; + int ret; + + attr0_cfg = LLCC_V6_TRP_ATTR0_CFGn(config->slice_id); + attr1_cfg = LLCC_V6_TRP_ATTR1_CFGn(config->slice_id); + attr2_cfg = LLCC_V6_TRP_ATTR2_CFGn(config->slice_id); + attr3_cfg = LLCC_V6_TRP_ATTR3_CFGn(config->slice_id); + + attr0_val = config->res_ways; + attr1_val = config->bonus_ways; + attr2_val = config->cache_mode; + attr2_val |= FIELD_PREP(ATTR2_PROBE_TARGET_WAYS_MASK, config->probe_target_ways); + attr2_val |= FIELD_PREP(ATTR2_FIXED_SIZE_MASK, config->fixed_size); + attr2_val |= FIELD_PREP(ATTR2_PRIORITY_MASK, config->priority); + + if (config->parent_slice_id && config->fixed_size) { + attr2_val |= FIELD_PREP(ATTR2_PARENT_SCID_MASK, config->parent_slice_id); + attr2_val |= ATTR2_IN_A_GROUP_MASK; + } + + attr3_val = MAX_CAP_TO_BYTES(config->max_cap); + attr3_val /= drv_data->num_banks; + attr3_val >>= CACHE_LINE_SIZE_SHIFT; + + ret = regmap_write(drv_data->bcast_regmap, attr0_cfg, attr0_val); + if (ret) + return ret; + + ret = regmap_write(drv_data->bcast_regmap, attr1_cfg, attr1_val); + if (ret) + return ret; + + ret = regmap_write(drv_data->bcast_regmap, attr2_cfg, attr2_val); + if (ret) + return ret; + + ret = regmap_write(drv_data->bcast_regmap, attr3_cfg, attr3_val); + if (ret) + return ret; + + slice_offset = config->slice_id % 32; + reg_offset = (config->slice_id / 32) * 4; + + wren = config->write_scid_en << slice_offset; + ret = regmap_update_bits(drv_data->bcast_regmap, + cfg->reg_offset[LLCC_TRP_WRS_EN] + reg_offset, + BIT(slice_offset), wren); + if (ret) + return ret; + + wr_cache_en = config->write_scid_cacheable_en << slice_offset; + ret = regmap_update_bits(drv_data->bcast_regmap, + cfg->reg_offset[LLCC_TRP_WRS_CACHEABLE_EN] + reg_offset, + BIT(slice_offset), wr_cache_en); + if (ret) + return ret; + + stale_en = config->stale_en << slice_offset; + ret = regmap_update_bits(drv_data->bcast_regmap, + cfg->reg_offset[LLCC_TRP_ALGO_STALE_EN] + reg_offset, + BIT(slice_offset), stale_en); + if (ret) + return ret; + + stale_cap_en = config->stale_cap_en << slice_offset; + ret = regmap_update_bits(drv_data->bcast_regmap, + cfg->reg_offset[LLCC_TRP_ALGO_STALE_CAP_EN] + reg_offset, + BIT(slice_offset), stale_cap_en); + if (ret) + return ret; + + mru_uncap_en = config->mru_uncap_en << slice_offset; + ret = regmap_update_bits(drv_data->bcast_regmap, + cfg->reg_offset[LLCC_TRP_ALGO_MRU0] + reg_offset, + BIT(slice_offset), mru_uncap_en); + if (ret) + return ret; + + mru_rollover = config->mru_rollover << slice_offset; + ret = regmap_update_bits(drv_data->bcast_regmap, + cfg->reg_offset[LLCC_TRP_ALGO_MRU1] + reg_offset, + BIT(slice_offset), mru_rollover); + if (ret) + return ret; + + alloc_oneway_en = config->alloc_oneway_en << slice_offset; + ret = regmap_update_bits(drv_data->bcast_regmap, + cfg->reg_offset[LLCC_TRP_ALGO_ALLOC0] + reg_offset, + BIT(slice_offset), alloc_oneway_en); + if (ret) + return ret; + + ovcap_en = config->ovcap_en << slice_offset; + ret = regmap_update_bits(drv_data->bcast_regmap, + cfg->reg_offset[LLCC_TRP_ALGO_ALLOC1] + reg_offset, + BIT(slice_offset), ovcap_en); + if (ret) + return ret; + + ovcap_prio = config->ovcap_prio << slice_offset; + ret = regmap_update_bits(drv_data->bcast_regmap, + cfg->reg_offset[LLCC_TRP_ALGO_ALLOC2] + reg_offset, + BIT(slice_offset), ovcap_prio); + if (ret) + return ret; + + vict_prio = config->vict_prio << slice_offset; + ret = regmap_update_bits(drv_data->bcast_regmap, + cfg->reg_offset[LLCC_TRP_ALGO_ALLOC3] + reg_offset, + BIT(slice_offset), vict_prio); + if (ret) + return ret; + + if (config->activate_on_init) { + desc = llcc_slice_getd(config->usecase_id); + if (PTR_ERR_OR_ZERO(desc)) + return -EINVAL; + + ret = llcc_slice_activate(desc); + } + + return ret; +} + static int qcom_llcc_cfg_program(struct platform_device *pdev, const struct qcom_llcc_config *cfg) { @@ -3880,10 +4360,18 @@ static int qcom_llcc_cfg_program(struct platform_device *pdev, sz = drv_data->cfg_size; llcc_table = drv_data->cfg; - for (i = 0; i < sz; i++) { - ret = _qcom_llcc_cfg_program(&llcc_table[i], cfg); - if (ret) - return ret; + if (drv_data->version >= LLCC_VERSION_6_0_0_0) { + for (i = 0; i < sz; i++) { + ret = _qcom_llcc_cfg_program_v6(&llcc_table[i], cfg); + if (ret) + return ret; + } + } else { + for (i = 0; i < sz; i++) { + ret = _qcom_llcc_cfg_program(&llcc_table[i], cfg); + if (ret) + return ret; + } } return ret; @@ -4102,6 +4590,7 @@ static const struct of_device_id qcom_llcc_of_match[] = { { .compatible = "qcom,sm8450-llcc", .data = &sm8450_cfgs }, { .compatible = "qcom,sm8550-llcc", .data = &sm8550_cfgs }, { .compatible = "qcom,sm8650-llcc", .data = &sm8650_cfgs }, + { .compatible = "qcom,sm8750-llcc", .data = &sm8750_cfgs }, { .compatible = "qcom,x1e80100-llcc", .data = &x1e80100_cfgs }, { } }; diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c index cde19cdfd3c7..0a6d325b195c 100644 --- a/drivers/soc/qcom/pmic_glink.c +++ b/drivers/soc/qcom/pmic_glink.c @@ -371,15 +371,11 @@ static void pmic_glink_remove(struct platform_device *pdev) __pmic_glink = NULL; } -static const unsigned long pmic_glink_sc8280xp_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) | - BIT(PMIC_GLINK_CLIENT_ALTMODE); - static const unsigned long pmic_glink_sm8450_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) | BIT(PMIC_GLINK_CLIENT_ALTMODE) | BIT(PMIC_GLINK_CLIENT_UCSI); static const struct of_device_id pmic_glink_of_match[] = { - { .compatible = "qcom,sc8280xp-pmic-glink", .data = &pmic_glink_sc8280xp_client_mask }, { .compatible = "qcom,pmic-glink", .data = &pmic_glink_sm8450_client_mask }, {} }; diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c index bd06ce161804..7f11acd33323 100644 --- a/drivers/soc/qcom/pmic_glink_altmode.c +++ b/drivers/soc/qcom/pmic_glink_altmode.c @@ -218,21 +218,29 @@ static void pmic_glink_altmode_worker(struct work_struct *work) { struct pmic_glink_altmode_port *alt_port = work_to_altmode_port(work); struct pmic_glink_altmode *altmode = alt_port->altmode; + enum drm_connector_status conn_status; typec_switch_set(alt_port->typec_switch, alt_port->orientation); - if (alt_port->svid == USB_TYPEC_DP_SID && alt_port->mode == 0xff) - pmic_glink_altmode_safe(altmode, alt_port); - else if (alt_port->svid == USB_TYPEC_DP_SID) - pmic_glink_altmode_enable_dp(altmode, alt_port, alt_port->mode, - alt_port->hpd_state, alt_port->hpd_irq); - else - pmic_glink_altmode_enable_usb(altmode, alt_port); + if (alt_port->svid == USB_TYPEC_DP_SID) { + if (alt_port->mode == 0xff) { + pmic_glink_altmode_safe(altmode, alt_port); + } else { + pmic_glink_altmode_enable_dp(altmode, alt_port, + alt_port->mode, + alt_port->hpd_state, + alt_port->hpd_irq); + } - drm_aux_hpd_bridge_notify(&alt_port->bridge->dev, - alt_port->hpd_state ? - connector_status_connected : - connector_status_disconnected); + if (alt_port->hpd_state) + conn_status = connector_status_connected; + else + conn_status = connector_status_disconnected; + + drm_aux_hpd_bridge_notify(&alt_port->bridge->dev, conn_status); + } else { + pmic_glink_altmode_enable_usb(altmode, alt_port); + } pmic_glink_altmode_request(altmode, ALTMODE_PAN_ACK, alt_port->index); } diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c index 1d1c438be3e7..3abea241b1c4 100644 --- a/drivers/soc/qcom/qcom_pd_mapper.c +++ b/drivers/soc/qcom/qcom_pd_mapper.c @@ -488,6 +488,16 @@ static const struct qcom_pdm_domain_data *sm6350_domains[] = { NULL, }; +static const struct qcom_pdm_domain_data *sm7150_domains[] = { + &adsp_audio_pd, + &adsp_root_pd, + &adsp_sensor_pd, + &cdsp_root_pd, + &mpss_root_pd_gps, + &mpss_wlan_pd, + NULL, +}; + static const struct qcom_pdm_domain_data *sm8150_domains[] = { &adsp_audio_pd, &adsp_root_pd, @@ -565,6 +575,7 @@ static const struct of_device_id qcom_pdm_domains[] __maybe_unused = { { .compatible = "qcom,sm4250", .data = sm6115_domains, }, { .compatible = "qcom,sm6115", .data = sm6115_domains, }, { .compatible = "qcom,sm6350", .data = sm6350_domains, }, + { .compatible = "qcom,sm7150", .data = sm7150_domains, }, { .compatible = "qcom,sm7225", .data = sm6350_domains, }, { .compatible = "qcom,sm7325", .data = sc7280_domains, }, { .compatible = "qcom,sm8150", .data = sm8150_domains, }, diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c index 592819701809..cf425930539e 100644 --- a/drivers/soc/qcom/smem.c +++ b/drivers/soc/qcom/smem.c @@ -86,7 +86,7 @@ #define SMEM_GLOBAL_HOST 0xfffe /* Max number of processors/hosts in a system */ -#define SMEM_HOST_COUNT 20 +#define SMEM_HOST_COUNT 25 /** * struct smem_proc_comm - proc_comm communication struct (legacy) diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c index 8c8878bc87f5..cb515c2340c1 100644 --- a/drivers/soc/qcom/smp2p.c +++ b/drivers/soc/qcom/smp2p.c @@ -575,7 +575,7 @@ static int qcom_smp2p_probe(struct platform_device *pdev) smp2p->mbox_client.knows_txdone = true; smp2p->mbox_chan = mbox_request_channel(&smp2p->mbox_client, 0); if (IS_ERR(smp2p->mbox_chan)) { - if (PTR_ERR(smp2p->mbox_chan) != -ENODEV) + if (PTR_ERR(smp2p->mbox_chan) != -ENOENT) return PTR_ERR(smp2p->mbox_chan); smp2p->mbox_chan = NULL; diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index 18d7f1be9093..8c4147737c35 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -444,6 +444,7 @@ static const struct soc_id soc_id[] = { { qcom_board_id(IPQ5302) }, { qcom_board_id(QCS8550) }, { qcom_board_id(QCM8550) }, + { qcom_board_id(SM8750) }, { qcom_board_id(IPQ5300) }, { qcom_board_id(IPQ5321) }, { qcom_board_id(IPQ5424) }, diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig index 4990b85d7df7..fbc3b69d21a7 100644 --- a/drivers/soc/renesas/Kconfig +++ b/drivers/soc/renesas/Kconfig @@ -393,6 +393,13 @@ config ARCH_R9A09G047 help This enables support for the Renesas RZ/G3E SoC variants. +config ARCH_R9A09G056 + bool "ARM64 Platform support for RZ/V2N" + default y if ARCH_RENESAS + select SYS_R9A09G056 + help + This enables support for the Renesas RZ/V2N SoC variants. + config ARCH_R9A09G057 bool "ARM64 Platform support for RZ/V2H(P)" default y if ARCH_RENESAS @@ -439,6 +446,10 @@ config SYS_R9A09G047 bool "Renesas RZ/G3E System controller support" if COMPILE_TEST select SYSC_RZ +config SYS_R9A09G056 + bool "Renesas RZ/V2N System controller support" if COMPILE_TEST + select SYSC_RZ + config SYS_R9A09G057 bool "Renesas RZ/V2H System controller support" if COMPILE_TEST select SYSC_RZ diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile index 81d4c5726e4c..3bdcc6a395d5 100644 --- a/drivers/soc/renesas/Makefile +++ b/drivers/soc/renesas/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o endif obj-$(CONFIG_SYSC_R9A08G045) += r9a08g045-sysc.o obj-$(CONFIG_SYS_R9A09G047) += r9a09g047-sys.o +obj-$(CONFIG_SYS_R9A09G056) += r9a09g056-sys.o obj-$(CONFIG_SYS_R9A09G057) += r9a09g057-sys.o # Family diff --git a/drivers/soc/renesas/r9a09g056-sys.c b/drivers/soc/renesas/r9a09g056-sys.c new file mode 100644 index 000000000000..3ad1422eba36 --- /dev/null +++ b/drivers/soc/renesas/r9a09g056-sys.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RZ/V2N System controller (SYS) driver + * + * Copyright (C) 2025 Renesas Electronics Corp. + */ + +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/io.h> + +#include "rz-sysc.h" + +/* Register Offsets */ +#define SYS_LSI_MODE 0x300 +#define SYS_LSI_MODE_SEC_EN BIT(16) +/* + * BOOTPLLCA[1:0] + * [0,0] => 1.1GHZ + * [0,1] => 1.5GHZ + * [1,0] => 1.6GHZ + * [1,1] => 1.7GHZ + */ +#define SYS_LSI_MODE_STAT_BOOTPLLCA55 GENMASK(12, 11) +#define SYS_LSI_MODE_CA55_1_7GHZ 0x3 + +#define SYS_LSI_PRR 0x308 +#define SYS_LSI_PRR_GPU_DIS BIT(0) +#define SYS_LSI_PRR_ISP_DIS BIT(4) + +#define SYS_RZV2N_FEATURE_G31 BIT(0) +#define SYS_RZV2N_FEATURE_C55 BIT(1) +#define SYS_RZV2N_FEATURE_SEC BIT(2) + +static void rzv2n_sys_print_id(struct device *dev, + void __iomem *sysc_base, + struct soc_device_attribute *soc_dev_attr) +{ + u32 prr_val, mode_val; + u8 feature_flags; + + prr_val = readl(sysc_base + SYS_LSI_PRR); + mode_val = readl(sysc_base + SYS_LSI_MODE); + + /* Check GPU, ISP and Cryptographic configuration */ + feature_flags = !(prr_val & SYS_LSI_PRR_GPU_DIS) ? SYS_RZV2N_FEATURE_G31 : 0; + feature_flags |= !(prr_val & SYS_LSI_PRR_ISP_DIS) ? SYS_RZV2N_FEATURE_C55 : 0; + feature_flags |= (mode_val & SYS_LSI_MODE_SEC_EN) ? SYS_RZV2N_FEATURE_SEC : 0; + + dev_info(dev, "Detected Renesas %s %sn%d Rev %s%s%s%s%s\n", soc_dev_attr->family, + soc_dev_attr->soc_id, 41 + feature_flags, soc_dev_attr->revision, + feature_flags ? " with" : "", + feature_flags & SYS_RZV2N_FEATURE_G31 ? " GE3D (Mali-G31)" : "", + feature_flags & SYS_RZV2N_FEATURE_SEC ? " Cryptographic engine" : "", + feature_flags & SYS_RZV2N_FEATURE_C55 ? " ISP (Mali-C55)" : ""); + + /* Check CA55 PLL configuration */ + if (FIELD_GET(SYS_LSI_MODE_STAT_BOOTPLLCA55, mode_val) != SYS_LSI_MODE_CA55_1_7GHZ) + dev_warn(dev, "CA55 PLL is not set to 1.7GHz\n"); +} + +static const struct rz_sysc_soc_id_init_data rzv2n_sys_soc_id_init_data __initconst = { + .family = "RZ/V2N", + .id = 0x867d447, + .devid_offset = 0x304, + .revision_mask = GENMASK(31, 28), + .specific_id_mask = GENMASK(27, 0), + .print_id = rzv2n_sys_print_id, +}; + +const struct rz_sysc_init_data rzv2n_sys_init_data = { + .soc_id_init_data = &rzv2n_sys_soc_id_init_data, +}; diff --git a/drivers/soc/renesas/rz-sysc.c b/drivers/soc/renesas/rz-sysc.c index 14db508f669f..ffa65fb4dade 100644 --- a/drivers/soc/renesas/rz-sysc.c +++ b/drivers/soc/renesas/rz-sysc.c @@ -88,6 +88,9 @@ static const struct of_device_id rz_sysc_match[] = { #ifdef CONFIG_SYS_R9A09G047 { .compatible = "renesas,r9a09g047-sys", .data = &rzg3e_sys_init_data }, #endif +#ifdef CONFIG_SYS_R9A09G056 + { .compatible = "renesas,r9a09g056-sys", .data = &rzv2n_sys_init_data }, +#endif #ifdef CONFIG_SYS_R9A09G057 { .compatible = "renesas,r9a09g057-sys", .data = &rzv2h_sys_init_data }, #endif diff --git a/drivers/soc/renesas/rz-sysc.h b/drivers/soc/renesas/rz-sysc.h index aa83948c5117..56bc047a1bff 100644 --- a/drivers/soc/renesas/rz-sysc.h +++ b/drivers/soc/renesas/rz-sysc.h @@ -42,5 +42,6 @@ struct rz_sysc_init_data { extern const struct rz_sysc_init_data rzg3e_sys_init_data; extern const struct rz_sysc_init_data rzg3s_sysc_init_data; extern const struct rz_sysc_init_data rzv2h_sys_init_data; +extern const struct rz_sysc_init_data rzv2n_sys_init_data; #endif /* __SOC_RENESAS_RZ_SYSC_H__ */ diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c index c40313886a01..a77288f49d24 100644 --- a/drivers/soc/samsung/exynos-pmu.c +++ b/drivers/soc/samsung/exynos-pmu.c @@ -7,6 +7,7 @@ #include <linux/array_size.h> #include <linux/arm-smccc.h> +#include <linux/cpuhotplug.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/mfd/core.h> @@ -33,6 +34,7 @@ struct exynos_pmu_context { struct device *dev; const struct exynos_pmu_data *pmu_data; struct regmap *pmureg; + struct regmap *pmuintrgen; }; void __iomem *pmu_base_addr; @@ -222,7 +224,8 @@ static const struct regmap_config regmap_smccfg = { }; static const struct exynos_pmu_data gs101_pmu_data = { - .pmu_secure = true + .pmu_secure = true, + .pmu_cpuhp = true, }; /* @@ -326,6 +329,59 @@ struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np, } EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle); +/* + * CPU_INFORM register hint values which are used by + * EL3 firmware (el3mon). + */ +#define CPU_INFORM_CLEAR 0 +#define CPU_INFORM_C2 1 + +static int gs101_cpuhp_pmu_online(unsigned int cpu) +{ + unsigned int cpuhint = smp_processor_id(); + u32 reg, mask; + + /* clear cpu inform hint */ + regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint), + CPU_INFORM_CLEAR); + + mask = BIT(cpu); + + regmap_update_bits(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_ENABLE, + mask, (0 << cpu)); + + regmap_read(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_UPEND, ®); + + regmap_write(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_CLEAR, + reg & mask); + + return 0; +} + +static int gs101_cpuhp_pmu_offline(unsigned int cpu) +{ + u32 reg, mask; + unsigned int cpuhint = smp_processor_id(); + + /* set cpu inform hint */ + regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint), + CPU_INFORM_C2); + + mask = BIT(cpu); + regmap_update_bits(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_ENABLE, + mask, BIT(cpu)); + + regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, ®); + regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR, + reg & mask); + + mask = (BIT(cpu + 8)); + regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, ®); + regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR, + reg & mask); + return 0; +} + static int exynos_pmu_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -378,6 +434,26 @@ static int exynos_pmu_probe(struct platform_device *pdev) pmu_context->pmureg = regmap; pmu_context->dev = dev; + if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_cpuhp) { + pmu_context->pmuintrgen = syscon_regmap_lookup_by_phandle(dev->of_node, + "google,pmu-intr-gen-syscon"); + if (IS_ERR(pmu_context->pmuintrgen)) { + /* + * To maintain support for older DTs that didn't specify syscon phandle + * just issue a warning rather than fail to probe. + */ + dev_warn(&pdev->dev, "pmu-intr-gen syscon unavailable\n"); + } else { + cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, + "soc/exynos-pmu:prepare", + gs101_cpuhp_pmu_online, NULL); + + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "soc/exynos-pmu:online", + NULL, gs101_cpuhp_pmu_offline); + } + } + if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init) pmu_context->pmu_data->pmu_init(); diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h index 0a49a2c9a08e..0938bb4fe15f 100644 --- a/drivers/soc/samsung/exynos-pmu.h +++ b/drivers/soc/samsung/exynos-pmu.h @@ -22,6 +22,7 @@ struct exynos_pmu_data { const struct exynos_pmu_conf *pmu_config; const struct exynos_pmu_conf *pmu_config_extra; bool pmu_secure; + bool pmu_cpuhp; void (*pmu_init)(void); void (*powerdown_conf)(enum sys_powerdown); diff --git a/drivers/soc/sophgo/Kconfig b/drivers/soc/sophgo/Kconfig new file mode 100644 index 000000000000..45f78b270c91 --- /dev/null +++ b/drivers/soc/sophgo/Kconfig @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Sophgo SoC drivers +# + +if ARCH_SOPHGO || COMPILE_TEST +menu "Sophgo SoC drivers" + +config SOPHGO_CV1800_RTCSYS + tristate "Sophgo CV1800 RTC MFD" + select MFD_CORE + help + If you say yes here you get support the RTC MFD driver for Sophgo + CV1800 series SoC. The RTC module comprises a 32kHz oscillator, + Power-on-Reset (PoR) sub-module, HW state machine to control chip + power-on, power-off and reset. Furthermore, the 8051 subsystem is + located within RTCSYS including associated SRAM block. + + This driver can also be built as a module. If so, the module will be + called cv1800-rtcsys. + +config SOPHGO_SG2044_TOPSYS + tristate "Sophgo SG2044 TOP syscon driver" + select MFD_CORE + help + This is the core driver for the Sophgo SG2044 TOP system + controller device. This driver provide PLL clock device + for the SoC. + + This driver can also be built as a module. If so, the module + will be called sg2044-topsys. + +endmenu +endif diff --git a/drivers/soc/sophgo/Makefile b/drivers/soc/sophgo/Makefile new file mode 100644 index 000000000000..27f68df22c4d --- /dev/null +++ b/drivers/soc/sophgo/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_SOPHGO_CV1800_RTCSYS) += cv1800-rtcsys.o +obj-$(CONFIG_SOPHGO_SG2044_TOPSYS) += sg2044-topsys.o diff --git a/drivers/soc/sophgo/cv1800-rtcsys.c b/drivers/soc/sophgo/cv1800-rtcsys.c new file mode 100644 index 000000000000..fdae2e2a61c5 --- /dev/null +++ b/drivers/soc/sophgo/cv1800-rtcsys.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Sophgo CV1800 series SoC RTC subsystem + * + * The RTC module comprises a 32kHz oscillator, Power-on-Reset (PoR) sub-module, + * HW state machine to control chip power-on, power-off and reset. Furthermore, + * the 8051 subsystem is located within RTCSYS including associated SRAM block. + * + * Copyright (C) 2025 Alexander Sverdlin <alexander.sverdlin@gmail.com> + * + */ + +#include <linux/mfd/core.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/property.h> + +static struct resource cv1800_rtcsys_irq_resources[] = { + DEFINE_RES_IRQ_NAMED(0, "alarm"), +}; + +static const struct mfd_cell cv1800_rtcsys_subdev[] = { + { + .name = "cv1800b-rtc", + .num_resources = 1, + .resources = &cv1800_rtcsys_irq_resources[0], + }, +}; + +static int cv1800_rtcsys_probe(struct platform_device *pdev) +{ + int irq; + + irq = platform_get_irq_byname(pdev, "alarm"); + if (irq < 0) + return irq; + cv1800_rtcsys_irq_resources[0].start = irq; + cv1800_rtcsys_irq_resources[0].end = irq; + + return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO, + cv1800_rtcsys_subdev, + ARRAY_SIZE(cv1800_rtcsys_subdev), + NULL, 0, NULL); +} + +static const struct of_device_id cv1800_rtcsys_of_match[] = { + { .compatible = "sophgo,cv1800b-rtc" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, cv1800_rtcsys_of_match); + +static struct platform_driver cv1800_rtcsys_mfd = { + .probe = cv1800_rtcsys_probe, + .driver = { + .name = "cv1800_rtcsys", + .of_match_table = cv1800_rtcsys_of_match, + }, +}; +module_platform_driver(cv1800_rtcsys_mfd); + +MODULE_AUTHOR("Alexander Sverdlin <alexander.sverdlin@gmail.com>"); +MODULE_DESCRIPTION("Sophgo CV1800 series SoC RTC subsystem driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/sophgo/sg2044-topsys.c b/drivers/soc/sophgo/sg2044-topsys.c new file mode 100644 index 000000000000..179f2620b2a9 --- /dev/null +++ b/drivers/soc/sophgo/sg2044-topsys.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Sophgo SG2044 multi-function system controller driver + * + * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com> + */ + +#include <linux/mfd/core.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/property.h> +#include <linux/resource.h> + +static const struct mfd_cell sg2044_topsys_subdev[] = { + { + .name = "sg2044-pll", + }, +}; + +static int sg2044_topsys_probe(struct platform_device *pdev) +{ + return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO, + sg2044_topsys_subdev, + ARRAY_SIZE(sg2044_topsys_subdev), + NULL, 0, NULL); +} + +static const struct of_device_id sg2044_topsys_of_match[] = { + { .compatible = "sophgo,sg2044-top-syscon" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sg2044_topsys_of_match); + +static struct platform_driver sg2044_topsys_driver = { + .probe = sg2044_topsys_probe, + .driver = { + .name = "sg2044-topsys", + .of_match_table = sg2044_topsys_of_match, + }, +}; +module_platform_driver(sg2044_topsys_driver); + +MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>"); +MODULE_DESCRIPTION("Sophgo SG2044 multi-function system controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c index 82a15cad1c6c..7602b8a909b0 100644 --- a/drivers/soc/ti/k3-ringacc.c +++ b/drivers/soc/ti/k3-ringacc.c @@ -1291,7 +1291,7 @@ struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np, mutex_lock(&k3_ringacc_list_lock); list_for_each_entry(entry, &k3_ringacc_list, list) - if (entry->dev->of_node == ringacc_np) { + if (device_match_of_node(entry->dev, ringacc_np)) { ringacc = entry; break; } diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c index 704039eb3c07..d716be113c84 100644 --- a/drivers/soc/ti/k3-socinfo.c +++ b/drivers/soc/ti/k3-socinfo.c @@ -43,6 +43,7 @@ #define JTAG_ID_PARTNO_AM62AX 0xBB8D #define JTAG_ID_PARTNO_AM62PX 0xBB9D #define JTAG_ID_PARTNO_J722S 0xBBA0 +#define JTAG_ID_PARTNO_AM62LX 0xBBA7 static const struct k3_soc_id { unsigned int id; @@ -58,6 +59,7 @@ static const struct k3_soc_id { { JTAG_ID_PARTNO_AM62AX, "AM62AX" }, { JTAG_ID_PARTNO_AM62PX, "AM62PX" }, { JTAG_ID_PARTNO_J722S, "J722S" }, + { JTAG_ID_PARTNO_AM62LX, "AM62LX" }, }; static const char * const j721e_rev_string_map[] = { diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index ea52425864a9..6e56e7609ccd 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -252,8 +252,7 @@ static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst, return qh; err: - if (qh->stats) - free_percpu(qh->stats); + free_percpu(qh->stats); devm_kfree(inst->kdev->dev, qh); return ERR_PTR(ret); } diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c index 79dde9a7ec63..5845fc652adc 100644 --- a/drivers/soc/ti/wkup_m3_ipc.c +++ b/drivers/soc/ti/wkup_m3_ipc.c @@ -644,11 +644,9 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev) m3_ipc->mbox = mbox_request_channel(&m3_ipc->mbox_client, 0); - if (IS_ERR(m3_ipc->mbox)) { - dev_err(dev, "IPC Request for A8->M3 Channel failed! %ld\n", - PTR_ERR(m3_ipc->mbox)); - return PTR_ERR(m3_ipc->mbox); - } + if (IS_ERR(m3_ipc->mbox)) + return dev_err_probe(dev, PTR_ERR(m3_ipc->mbox), + "IPC Request for A8->M3 Channel failed!\n"); if (of_property_read_u32(dev->of_node, "ti,rproc", &rproc_phandle)) { dev_err(&pdev->dev, "could not get rproc phandle\n"); diff --git a/drivers/soc/vt8500/Kconfig b/drivers/soc/vt8500/Kconfig new file mode 100644 index 000000000000..b4cc0ba1128b --- /dev/null +++ b/drivers/soc/vt8500/Kconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0-only + +if ARCH_VT8500 || COMPILE_TEST + +menu "VIA/WonderMedia SoC drivers" + +config WMT_SOCINFO + bool "VIA/WonderMedia SoC Information driver" + default ARCH_VT8500 + select SOC_BUS + help + Say yes to support decoding of VIA/WonderMedia system configuration + register information. This currently includes just the chip ID register + which helps identify the exact hardware revision of the SoC the kernel + is running on (to know if any revision-specific quirks are required) + +endmenu + +endif diff --git a/drivers/soc/vt8500/Makefile b/drivers/soc/vt8500/Makefile new file mode 100644 index 000000000000..05964c5f2890 --- /dev/null +++ b/drivers/soc/vt8500/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_WMT_SOCINFO) += wmt-socinfo.o diff --git a/drivers/soc/vt8500/wmt-socinfo.c b/drivers/soc/vt8500/wmt-socinfo.c new file mode 100644 index 000000000000..461f8c1ae56e --- /dev/null +++ b/drivers/soc/vt8500/wmt-socinfo.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2025 Alexey Charkov <alchark@gmail.com> + * Based on aspeed-socinfo.c + */ + +#include <linux/dev_printk.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/sys_soc.h> + +static const struct { + const char *name; + const u32 id; +} chip_id_table[] = { + /* VIA */ + { "VT8420", 0x3300 }, + { "VT8430", 0x3357 }, + { "VT8500", 0x3400 }, + + /* WonderMedia */ + { "WM8425", 0x3429 }, + { "WM8435", 0x3437 }, + { "WM8440", 0x3451 }, + { "WM8505", 0x3426 }, + { "WM8650", 0x3465 }, + { "WM8750", 0x3445 }, + { "WM8850", 0x3481 }, + { "WM8880", 0x3498 }, +}; + +static const char *sccid_to_name(u32 sccid) +{ + u32 id = sccid >> 16; + unsigned int i; + + for (i = 0 ; i < ARRAY_SIZE(chip_id_table) ; ++i) { + if (chip_id_table[i].id == id) + return chip_id_table[i].name; + } + + return "Unknown"; +} + +static int wmt_socinfo_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct soc_device_attribute *attrs; + struct soc_device *soc_dev; + char letter, digit; + void __iomem *reg; + u32 sccid; + + reg = devm_of_iomap(&pdev->dev, np, 0, NULL); + if (IS_ERR(reg)) + return PTR_ERR(reg); + + sccid = readl(reg); + + attrs = devm_kzalloc(&pdev->dev, sizeof(*attrs), GFP_KERNEL); + if (!attrs) + return -ENOMEM; + + /* + * Machine: VIA APC Rock + * Family: WM8850 + * Revision: A2 + * SoC ID: raw silicon revision id (34810103 in hexadecimal) + */ + + attrs->family = sccid_to_name(sccid); + + letter = (sccid >> 8) & 0xf; + letter = (letter - 1) + 'A'; + digit = sccid & 0xff; + digit = (digit - 1) + '0'; + attrs->revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "%c%c", letter, digit); + + attrs->soc_id = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%08x", sccid); + + if (!attrs->revision || !attrs->soc_id) + return -ENOMEM; + + soc_dev = soc_device_register(attrs); + if (IS_ERR(soc_dev)) + return PTR_ERR(soc_dev); + + dev_info(&pdev->dev, + "VIA/WonderMedia %s rev %s (%s)\n", + attrs->family, + attrs->revision, + attrs->soc_id); + + platform_set_drvdata(pdev, soc_dev); + return 0; +} + +static void wmt_socinfo_remove(struct platform_device *pdev) +{ + struct soc_device *soc_dev = platform_get_drvdata(pdev); + + soc_device_unregister(soc_dev); +} + +static const struct of_device_id wmt_socinfo_ids[] = { + { .compatible = "via,vt8500-scc-id" }, + { /* Sentinel */ }, +}; + +static struct platform_driver wmt_socinfo = { + .probe = wmt_socinfo_probe, + .remove = wmt_socinfo_remove, + .driver = { + .name = "wmt-socinfo", + .of_match_table = wmt_socinfo_ids, + }, +}; +module_platform_driver(wmt_socinfo); + +MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>"); +MODULE_DESCRIPTION("VIA/WonderMedia socinfo driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 66804bf1ee32..0904ecae253a 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -673,12 +673,10 @@ static ssize_t emulate_model_alias_store(struct config_item *item, return ret; BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1); - if (flag) { + if (flag) dev_set_t10_wwn_model_alias(dev); - } else { - strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod, - sizeof(dev->t10_wwn.model)); - } + else + strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod); da->emulate_model_alias = flag; return count; } @@ -1433,7 +1431,7 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item, ssize_t len; ssize_t ret; - len = strscpy(buf, page, sizeof(buf)); + len = strscpy(buf, page); if (len > 0) { /* Strip any newline added from userspace. */ stripped = strstrip(buf); @@ -1464,7 +1462,7 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item, } BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1); - strscpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor)); + strscpy(dev->t10_wwn.vendor, stripped); pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:" " %s\n", dev->t10_wwn.vendor); @@ -1489,7 +1487,7 @@ static ssize_t target_wwn_product_id_store(struct config_item *item, ssize_t len; ssize_t ret; - len = strscpy(buf, page, sizeof(buf)); + len = strscpy(buf, page); if (len > 0) { /* Strip any newline added from userspace. */ stripped = strstrip(buf); @@ -1520,7 +1518,7 @@ static ssize_t target_wwn_product_id_store(struct config_item *item, } BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1); - strscpy(dev->t10_wwn.model, stripped, sizeof(dev->t10_wwn.model)); + strscpy(dev->t10_wwn.model, stripped); pr_debug("Target_Core_ConfigFS: Set emulated T10 Model Identification: %s\n", dev->t10_wwn.model); @@ -1545,7 +1543,7 @@ static ssize_t target_wwn_revision_store(struct config_item *item, ssize_t len; ssize_t ret; - len = strscpy(buf, page, sizeof(buf)); + len = strscpy(buf, page); if (len > 0) { /* Strip any newline added from userspace. */ stripped = strstrip(buf); @@ -1576,7 +1574,7 @@ static ssize_t target_wwn_revision_store(struct config_item *item, } BUILD_BUG_ON(sizeof(dev->t10_wwn.revision) != INQUIRY_REVISION_LEN + 1); - strscpy(dev->t10_wwn.revision, stripped, sizeof(dev->t10_wwn.revision)); + strscpy(dev->t10_wwn.revision, stripped); pr_debug("Target_Core_ConfigFS: Set emulated T10 Revision: %s\n", dev->t10_wwn.revision); diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index cc2da086f96e..7bb711b24c0d 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -55,14 +55,14 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd) rcu_read_lock(); deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); if (deve) { - atomic_long_inc(&deve->total_cmds); + this_cpu_inc(deve->stats->total_cmds); if (se_cmd->data_direction == DMA_TO_DEVICE) - atomic_long_add(se_cmd->data_length, - &deve->write_bytes); + this_cpu_add(deve->stats->write_bytes, + se_cmd->data_length); else if (se_cmd->data_direction == DMA_FROM_DEVICE) - atomic_long_add(se_cmd->data_length, - &deve->read_bytes); + this_cpu_add(deve->stats->read_bytes, + se_cmd->data_length); if ((se_cmd->data_direction == DMA_TO_DEVICE) && deve->lun_access_ro) { @@ -126,14 +126,14 @@ out_unlock: * target_core_fabric_configfs.c:target_fabric_port_release */ se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); - atomic_long_inc(&se_cmd->se_dev->num_cmds); + this_cpu_inc(se_cmd->se_dev->stats->total_cmds); if (se_cmd->data_direction == DMA_TO_DEVICE) - atomic_long_add(se_cmd->data_length, - &se_cmd->se_dev->write_bytes); + this_cpu_add(se_cmd->se_dev->stats->write_bytes, + se_cmd->data_length); else if (se_cmd->data_direction == DMA_FROM_DEVICE) - atomic_long_add(se_cmd->data_length, - &se_cmd->se_dev->read_bytes); + this_cpu_add(se_cmd->se_dev->stats->read_bytes, + se_cmd->data_length); return ret; } @@ -322,6 +322,7 @@ int core_enable_device_list_for_node( struct se_portal_group *tpg) { struct se_dev_entry *orig, *new; + int ret = 0; new = kzalloc(sizeof(*new), GFP_KERNEL); if (!new) { @@ -329,6 +330,12 @@ int core_enable_device_list_for_node( return -ENOMEM; } + new->stats = alloc_percpu(struct se_dev_entry_io_stats); + if (!new->stats) { + ret = -ENOMEM; + goto free_deve; + } + spin_lock_init(&new->ua_lock); INIT_LIST_HEAD(&new->ua_list); INIT_LIST_HEAD(&new->lun_link); @@ -351,8 +358,8 @@ int core_enable_device_list_for_node( " for dynamic -> explicit NodeACL conversion:" " %s\n", nacl->initiatorname); mutex_unlock(&nacl->lun_entry_mutex); - kfree(new); - return -EINVAL; + ret = -EINVAL; + goto free_stats; } if (orig->se_lun_acl != NULL) { pr_warn_ratelimited("Detected existing explicit" @@ -360,8 +367,8 @@ int core_enable_device_list_for_node( " mapped_lun: %llu, failing\n", nacl->initiatorname, mapped_lun); mutex_unlock(&nacl->lun_entry_mutex); - kfree(new); - return -EINVAL; + ret = -EINVAL; + goto free_stats; } new->se_lun = lun; @@ -394,6 +401,20 @@ int core_enable_device_list_for_node( target_luns_data_has_changed(nacl, new, true); return 0; + +free_stats: + free_percpu(new->stats); +free_deve: + kfree(new); + return ret; +} + +static void target_free_dev_entry(struct rcu_head *head) +{ + struct se_dev_entry *deve = container_of(head, struct se_dev_entry, + rcu_head); + free_percpu(deve->stats); + kfree(deve); } void core_disable_device_list_for_node( @@ -443,7 +464,7 @@ void core_disable_device_list_for_node( kref_put(&orig->pr_kref, target_pr_kref_release); wait_for_completion(&orig->pr_comp); - kfree_rcu(orig, rcu_head); + call_rcu(&orig->rcu_head, target_free_dev_entry); core_scsi3_free_pr_reg_from_nacl(dev, nacl); target_luns_data_has_changed(nacl, NULL, false); @@ -679,6 +700,18 @@ static void scsi_dump_inquiry(struct se_device *dev) pr_debug(" Type: %s ", scsi_device_type(device_type)); } +static void target_non_ordered_release(struct percpu_ref *ref) +{ + struct se_device *dev = container_of(ref, struct se_device, + non_ordered); + unsigned long flags; + + spin_lock_irqsave(&dev->delayed_cmd_lock, flags); + if (!list_empty(&dev->delayed_cmd_list)) + schedule_work(&dev->delayed_cmd_work); + spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags); +} + struct se_device *target_alloc_device(struct se_hba *hba, const char *name) { struct se_device *dev; @@ -689,11 +722,13 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) if (!dev) return NULL; + dev->stats = alloc_percpu(struct se_dev_io_stats); + if (!dev->stats) + goto free_device; + dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL); - if (!dev->queues) { - hba->backend->ops->free_device(dev); - return NULL; - } + if (!dev->queues) + goto free_stats; dev->queue_cnt = nr_cpu_ids; for (i = 0; i < dev->queue_cnt; i++) { @@ -707,6 +742,10 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) INIT_WORK(&q->sq.work, target_queued_submit_work); } + if (percpu_ref_init(&dev->non_ordered, target_non_ordered_release, + PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) + goto free_queues; + dev->se_hba = hba; dev->transport = hba->backend->ops; dev->transport_flags = dev->transport->transport_flags_default; @@ -791,6 +830,14 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) sizeof(dev->t10_wwn.revision)); return dev; + +free_queues: + kfree(dev->queues); +free_stats: + free_percpu(dev->stats); +free_device: + hba->backend->ops->free_device(dev); + return NULL; } /* @@ -980,6 +1027,9 @@ void target_free_device(struct se_device *dev) WARN_ON(!list_empty(&dev->dev_sep_list)); + percpu_ref_exit(&dev->non_ordered); + cancel_work_sync(&dev->delayed_cmd_work); + if (target_dev_configured(dev)) { dev->transport->destroy_device(dev); @@ -1001,6 +1051,7 @@ void target_free_device(struct se_device *dev) dev->transport->free_prot(dev); kfree(dev->queues); + free_percpu(dev->stats); dev->transport->free_device(dev); } diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 0a02492bef70..aad0096afa21 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -1325,7 +1325,7 @@ static void set_dpofua_usage_bits32(u8 *usage_bits, struct se_device *dev) usage_bits[10] |= 0x18; } -static struct target_opcode_descriptor tcm_opcode_read6 = { +static const struct target_opcode_descriptor tcm_opcode_read6 = { .support = SCSI_SUPPORT_FULL, .opcode = READ_6, .cdb_size = 6, @@ -1333,7 +1333,7 @@ static struct target_opcode_descriptor tcm_opcode_read6 = { 0xff, SCSI_CONTROL_MASK}, }; -static struct target_opcode_descriptor tcm_opcode_read10 = { +static const struct target_opcode_descriptor tcm_opcode_read10 = { .support = SCSI_SUPPORT_FULL, .opcode = READ_10, .cdb_size = 10, @@ -1343,7 +1343,7 @@ static struct target_opcode_descriptor tcm_opcode_read10 = { .update_usage_bits = set_dpofua_usage_bits, }; -static struct target_opcode_descriptor tcm_opcode_read12 = { +static const struct target_opcode_descriptor tcm_opcode_read12 = { .support = SCSI_SUPPORT_FULL, .opcode = READ_12, .cdb_size = 12, @@ -1353,7 +1353,7 @@ static struct target_opcode_descriptor tcm_opcode_read12 = { .update_usage_bits = set_dpofua_usage_bits, }; -static struct target_opcode_descriptor tcm_opcode_read16 = { +static const struct target_opcode_descriptor tcm_opcode_read16 = { .support = SCSI_SUPPORT_FULL, .opcode = READ_16, .cdb_size = 16, @@ -1364,7 +1364,7 @@ static struct target_opcode_descriptor tcm_opcode_read16 = { .update_usage_bits = set_dpofua_usage_bits, }; -static struct target_opcode_descriptor tcm_opcode_write6 = { +static const struct target_opcode_descriptor tcm_opcode_write6 = { .support = SCSI_SUPPORT_FULL, .opcode = WRITE_6, .cdb_size = 6, @@ -1372,7 +1372,7 @@ static struct target_opcode_descriptor tcm_opcode_write6 = { 0xff, SCSI_CONTROL_MASK}, }; -static struct target_opcode_descriptor tcm_opcode_write10 = { +static const struct target_opcode_descriptor tcm_opcode_write10 = { .support = SCSI_SUPPORT_FULL, .opcode = WRITE_10, .cdb_size = 10, @@ -1382,7 +1382,7 @@ static struct target_opcode_descriptor tcm_opcode_write10 = { .update_usage_bits = set_dpofua_usage_bits, }; -static struct target_opcode_descriptor tcm_opcode_write_verify10 = { +static const struct target_opcode_descriptor tcm_opcode_write_verify10 = { .support = SCSI_SUPPORT_FULL, .opcode = WRITE_VERIFY, .cdb_size = 10, @@ -1392,7 +1392,7 @@ static struct target_opcode_descriptor tcm_opcode_write_verify10 = { .update_usage_bits = set_dpofua_usage_bits, }; -static struct target_opcode_descriptor tcm_opcode_write12 = { +static const struct target_opcode_descriptor tcm_opcode_write12 = { .support = SCSI_SUPPORT_FULL, .opcode = WRITE_12, .cdb_size = 12, @@ -1402,7 +1402,7 @@ static struct target_opcode_descriptor tcm_opcode_write12 = { .update_usage_bits = set_dpofua_usage_bits, }; -static struct target_opcode_descriptor tcm_opcode_write16 = { +static const struct target_opcode_descriptor tcm_opcode_write16 = { .support = SCSI_SUPPORT_FULL, .opcode = WRITE_16, .cdb_size = 16, @@ -1413,7 +1413,7 @@ static struct target_opcode_descriptor tcm_opcode_write16 = { .update_usage_bits = set_dpofua_usage_bits, }; -static struct target_opcode_descriptor tcm_opcode_write_verify16 = { +static const struct target_opcode_descriptor tcm_opcode_write_verify16 = { .support = SCSI_SUPPORT_FULL, .opcode = WRITE_VERIFY_16, .cdb_size = 16, @@ -1424,7 +1424,7 @@ static struct target_opcode_descriptor tcm_opcode_write_verify16 = { .update_usage_bits = set_dpofua_usage_bits, }; -static bool tcm_is_ws_enabled(struct target_opcode_descriptor *descr, +static bool tcm_is_ws_enabled(const struct target_opcode_descriptor *descr, struct se_cmd *cmd) { struct exec_cmd_ops *ops = cmd->protocol_data; @@ -1434,7 +1434,7 @@ static bool tcm_is_ws_enabled(struct target_opcode_descriptor *descr, !!ops->execute_write_same; } -static struct target_opcode_descriptor tcm_opcode_write_same32 = { +static const struct target_opcode_descriptor tcm_opcode_write_same32 = { .support = SCSI_SUPPORT_FULL, .serv_action_valid = 1, .opcode = VARIABLE_LENGTH_CMD, @@ -1452,7 +1452,7 @@ static struct target_opcode_descriptor tcm_opcode_write_same32 = { .update_usage_bits = set_dpofua_usage_bits32, }; -static bool tcm_is_caw_enabled(struct target_opcode_descriptor *descr, +static bool tcm_is_caw_enabled(const struct target_opcode_descriptor *descr, struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; @@ -1460,7 +1460,7 @@ static bool tcm_is_caw_enabled(struct target_opcode_descriptor *descr, return dev->dev_attrib.emulate_caw; } -static struct target_opcode_descriptor tcm_opcode_compare_write = { +static const struct target_opcode_descriptor tcm_opcode_compare_write = { .support = SCSI_SUPPORT_FULL, .opcode = COMPARE_AND_WRITE, .cdb_size = 16, @@ -1472,7 +1472,7 @@ static struct target_opcode_descriptor tcm_opcode_compare_write = { .update_usage_bits = set_dpofua_usage_bits, }; -static struct target_opcode_descriptor tcm_opcode_read_capacity = { +static const struct target_opcode_descriptor tcm_opcode_read_capacity = { .support = SCSI_SUPPORT_FULL, .opcode = READ_CAPACITY, .cdb_size = 10, @@ -1481,7 +1481,7 @@ static struct target_opcode_descriptor tcm_opcode_read_capacity = { 0x01, SCSI_CONTROL_MASK}, }; -static struct target_opcode_descriptor tcm_opcode_read_capacity16 = { +static const struct target_opcode_descriptor tcm_opcode_read_capacity16 = { .support = SCSI_SUPPORT_FULL, .serv_action_valid = 1, .opcode = SERVICE_ACTION_IN_16, @@ -1493,7 +1493,7 @@ static struct target_opcode_descriptor tcm_opcode_read_capacity16 = { 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, }; -static bool tcm_is_rep_ref_enabled(struct target_opcode_descriptor *descr, +static bool tcm_is_rep_ref_enabled(const struct target_opcode_descriptor *descr, struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; @@ -1507,7 +1507,7 @@ static bool tcm_is_rep_ref_enabled(struct target_opcode_descriptor *descr, return true; } -static struct target_opcode_descriptor tcm_opcode_read_report_refferals = { +static const struct target_opcode_descriptor tcm_opcode_read_report_refferals = { .support = SCSI_SUPPORT_FULL, .serv_action_valid = 1, .opcode = SERVICE_ACTION_IN_16, @@ -1520,7 +1520,7 @@ static struct target_opcode_descriptor tcm_opcode_read_report_refferals = { .enabled = tcm_is_rep_ref_enabled, }; -static struct target_opcode_descriptor tcm_opcode_sync_cache = { +static const struct target_opcode_descriptor tcm_opcode_sync_cache = { .support = SCSI_SUPPORT_FULL, .opcode = SYNCHRONIZE_CACHE, .cdb_size = 10, @@ -1529,7 +1529,7 @@ static struct target_opcode_descriptor tcm_opcode_sync_cache = { 0xff, SCSI_CONTROL_MASK}, }; -static struct target_opcode_descriptor tcm_opcode_sync_cache16 = { +static const struct target_opcode_descriptor tcm_opcode_sync_cache16 = { .support = SCSI_SUPPORT_FULL, .opcode = SYNCHRONIZE_CACHE_16, .cdb_size = 16, @@ -1539,7 +1539,7 @@ static struct target_opcode_descriptor tcm_opcode_sync_cache16 = { 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, }; -static bool tcm_is_unmap_enabled(struct target_opcode_descriptor *descr, +static bool tcm_is_unmap_enabled(const struct target_opcode_descriptor *descr, struct se_cmd *cmd) { struct exec_cmd_ops *ops = cmd->protocol_data; @@ -1548,7 +1548,7 @@ static bool tcm_is_unmap_enabled(struct target_opcode_descriptor *descr, return ops->execute_unmap && dev->dev_attrib.emulate_tpu; } -static struct target_opcode_descriptor tcm_opcode_unmap = { +static const struct target_opcode_descriptor tcm_opcode_unmap = { .support = SCSI_SUPPORT_FULL, .opcode = UNMAP, .cdb_size = 10, @@ -1558,7 +1558,7 @@ static struct target_opcode_descriptor tcm_opcode_unmap = { .enabled = tcm_is_unmap_enabled, }; -static struct target_opcode_descriptor tcm_opcode_write_same = { +static const struct target_opcode_descriptor tcm_opcode_write_same = { .support = SCSI_SUPPORT_FULL, .opcode = WRITE_SAME, .cdb_size = 10, @@ -1568,7 +1568,7 @@ static struct target_opcode_descriptor tcm_opcode_write_same = { .enabled = tcm_is_ws_enabled, }; -static struct target_opcode_descriptor tcm_opcode_write_same16 = { +static const struct target_opcode_descriptor tcm_opcode_write_same16 = { .support = SCSI_SUPPORT_FULL, .opcode = WRITE_SAME_16, .cdb_size = 16, @@ -1579,7 +1579,7 @@ static struct target_opcode_descriptor tcm_opcode_write_same16 = { .enabled = tcm_is_ws_enabled, }; -static struct target_opcode_descriptor tcm_opcode_verify = { +static const struct target_opcode_descriptor tcm_opcode_verify = { .support = SCSI_SUPPORT_FULL, .opcode = VERIFY, .cdb_size = 10, @@ -1588,7 +1588,7 @@ static struct target_opcode_descriptor tcm_opcode_verify = { 0xff, SCSI_CONTROL_MASK}, }; -static struct target_opcode_descriptor tcm_opcode_verify16 = { +static const struct target_opcode_descriptor tcm_opcode_verify16 = { .support = SCSI_SUPPORT_FULL, .opcode = VERIFY_16, .cdb_size = 16, @@ -1598,7 +1598,7 @@ static struct target_opcode_descriptor tcm_opcode_verify16 = { 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, }; -static struct target_opcode_descriptor tcm_opcode_start_stop = { +static const struct target_opcode_descriptor tcm_opcode_start_stop = { .support = SCSI_SUPPORT_FULL, .opcode = START_STOP, .cdb_size = 6, @@ -1606,7 +1606,7 @@ static struct target_opcode_descriptor tcm_opcode_start_stop = { 0x01, SCSI_CONTROL_MASK}, }; -static struct target_opcode_descriptor tcm_opcode_mode_select = { +static const struct target_opcode_descriptor tcm_opcode_mode_select = { .support = SCSI_SUPPORT_FULL, .opcode = MODE_SELECT, .cdb_size = 6, @@ -1614,7 +1614,7 @@ static struct target_opcode_descriptor tcm_opcode_mode_select = { 0xff, SCSI_CONTROL_MASK}, }; -static struct target_opcode_descriptor tcm_opcode_mode_select10 = { +static const struct target_opcode_descriptor tcm_opcode_mode_select10 = { .support = SCSI_SUPPORT_FULL, .opcode = MODE_SELECT_10, .cdb_size = 10, @@ -1623,7 +1623,7 @@ static struct target_opcode_descriptor tcm_opcode_mode_select10 = { 0xff, SCSI_CONTROL_MASK}, }; -static struct target_opcode_descriptor tcm_opcode_mode_sense = { +static const struct target_opcode_descriptor tcm_opcode_mode_sense = { .support = SCSI_SUPPORT_FULL, .opcode = MODE_SENSE, .cdb_size = 6, @@ -1631,7 +1631,7 @@ static struct target_opcode_descriptor tcm_opcode_mode_sense = { 0xff, SCSI_CONTROL_MASK}, }; -static struct target_opcode_descriptor tcm_opcode_mode_sense10 = { +static const struct target_opcode_descriptor tcm_opcode_mode_sense10 = { .support = SCSI_SUPPORT_FULL, .opcode = MODE_SENSE_10, .cdb_size = 10, @@ -1640,7 +1640,7 @@ static struct target_opcode_descriptor tcm_opcode_mode_sense10 = { 0xff, SCSI_CONTROL_MASK}, }; -static struct target_opcode_descriptor tcm_opcode_pri_read_keys = { +static const struct target_opcode_descriptor tcm_opcode_pri_read_keys = { .support = SCSI_SUPPORT_FULL, .serv_action_valid = 1, .opcode = PERSISTENT_RESERVE_IN, @@ -1651,7 +1651,7 @@ static struct target_opcode_descriptor tcm_opcode_pri_read_keys = { 0xff, SCSI_CONTROL_MASK}, }; -static struct target_opcode_descriptor tcm_opcode_pri_read_resrv = { +static const struct target_opcode_descriptor tcm_opcode_pri_read_resrv = { .support = SCSI_SUPPORT_FULL, .serv_action_valid = 1, .opcode = PERSISTENT_RESERVE_IN, @@ -1662,7 +1662,7 @@ static struct target_opcode_descriptor tcm_opcode_pri_read_resrv = { 0xff, SCSI_CONTROL_MASK}, }; -static bool tcm_is_pr_enabled(struct target_opcode_descriptor *descr, +static bool tcm_is_pr_enabled(const struct target_opcode_descriptor *descr, struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; @@ -1704,7 +1704,7 @@ static bool tcm_is_pr_enabled(struct target_opcode_descriptor *descr, return true; } -static struct target_opcode_descriptor tcm_opcode_pri_read_caps = { +static const struct target_opcode_descriptor tcm_opcode_pri_read_caps = { .support = SCSI_SUPPORT_FULL, .serv_action_valid = 1, .opcode = PERSISTENT_RESERVE_IN, @@ -1716,7 +1716,7 @@ static struct target_opcode_descriptor tcm_opcode_pri_read_caps = { .enabled = tcm_is_pr_enabled, }; -static struct target_opcode_descriptor tcm_opcode_pri_read_full_status = { +static const struct target_opcode_descriptor tcm_opcode_pri_read_full_status = { .support = SCSI_SUPPORT_FULL, .serv_action_valid = 1, .opcode = PERSISTENT_RESERVE_IN, @@ -1728,7 +1728,7 @@ static struct target_opcode_descriptor tcm_opcode_pri_read_full_status = { .enabled = tcm_is_pr_enabled, }; -static struct target_opcode_descriptor tcm_opcode_pro_register = { +static const struct target_opcode_descriptor tcm_opcode_pro_register = { .support = SCSI_SUPPORT_FULL, .serv_action_valid = 1, .opcode = PERSISTENT_RESERVE_OUT, @@ -1740,7 +1740,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_register = { .enabled = tcm_is_pr_enabled, }; -static struct target_opcode_descriptor tcm_opcode_pro_reserve = { +static const struct target_opcode_descriptor tcm_opcode_pro_reserve = { .support = SCSI_SUPPORT_FULL, .serv_action_valid = 1, .opcode = PERSISTENT_RESERVE_OUT, @@ -1752,7 +1752,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_reserve = { .enabled = tcm_is_pr_enabled, }; -static struct target_opcode_descriptor tcm_opcode_pro_release = { +static const struct target_opcode_descriptor tcm_opcode_pro_release = { .support = SCSI_SUPPORT_FULL, .serv_action_valid = 1, .opcode = PERSISTENT_RESERVE_OUT, @@ -1764,7 +1764,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_release = { .enabled = tcm_is_pr_enabled, }; -static struct target_opcode_descriptor tcm_opcode_pro_clear = { +static const struct target_opcode_descriptor tcm_opcode_pro_clear = { .support = SCSI_SUPPORT_FULL, .serv_action_valid = 1, .opcode = PERSISTENT_RESERVE_OUT, @@ -1776,7 +1776,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_clear = { .enabled = tcm_is_pr_enabled, }; -static struct target_opcode_descriptor tcm_opcode_pro_preempt = { +static const struct target_opcode_descriptor tcm_opcode_pro_preempt = { .support = SCSI_SUPPORT_FULL, .serv_action_valid = 1, .opcode = PERSISTENT_RESERVE_OUT, @@ -1788,7 +1788,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_preempt = { .enabled = tcm_is_pr_enabled, }; -static struct target_opcode_descriptor tcm_opcode_pro_preempt_abort = { +static const struct target_opcode_descriptor tcm_opcode_pro_preempt_abort = { .support = SCSI_SUPPORT_FULL, .serv_action_valid = 1, .opcode = PERSISTENT_RESERVE_OUT, @@ -1800,7 +1800,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_preempt_abort = { .enabled = tcm_is_pr_enabled, }; -static struct target_opcode_descriptor tcm_opcode_pro_reg_ign_exist = { +static const struct target_opcode_descriptor tcm_opcode_pro_reg_ign_exist = { .support = SCSI_SUPPORT_FULL, .serv_action_valid = 1, .opcode = PERSISTENT_RESERVE_OUT, @@ -1814,7 +1814,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_reg_ign_exist = { .enabled = tcm_is_pr_enabled, }; -static struct target_opcode_descriptor tcm_opcode_pro_register_move = { +static const struct target_opcode_descriptor tcm_opcode_pro_register_move = { .support = SCSI_SUPPORT_FULL, .serv_action_valid = 1, .opcode = PERSISTENT_RESERVE_OUT, @@ -1826,7 +1826,7 @@ static struct target_opcode_descriptor tcm_opcode_pro_register_move = { .enabled = tcm_is_pr_enabled, }; -static struct target_opcode_descriptor tcm_opcode_release = { +static const struct target_opcode_descriptor tcm_opcode_release = { .support = SCSI_SUPPORT_FULL, .opcode = RELEASE_6, .cdb_size = 6, @@ -1835,7 +1835,7 @@ static struct target_opcode_descriptor tcm_opcode_release = { .enabled = tcm_is_pr_enabled, }; -static struct target_opcode_descriptor tcm_opcode_release10 = { +static const struct target_opcode_descriptor tcm_opcode_release10 = { .support = SCSI_SUPPORT_FULL, .opcode = RELEASE_10, .cdb_size = 10, @@ -1845,7 +1845,7 @@ static struct target_opcode_descriptor tcm_opcode_release10 = { .enabled = tcm_is_pr_enabled, }; -static struct target_opcode_descriptor tcm_opcode_reserve = { +static const struct target_opcode_descriptor tcm_opcode_reserve = { .support = SCSI_SUPPORT_FULL, .opcode = RESERVE_6, .cdb_size = 6, @@ -1854,7 +1854,7 @@ static struct target_opcode_descriptor tcm_opcode_reserve = { .enabled = tcm_is_pr_enabled, }; -static struct target_opcode_descriptor tcm_opcode_reserve10 = { +static const struct target_opcode_descriptor tcm_opcode_reserve10 = { .support = SCSI_SUPPORT_FULL, .opcode = RESERVE_10, .cdb_size = 10, @@ -1864,7 +1864,7 @@ static struct target_opcode_descriptor tcm_opcode_reserve10 = { .enabled = tcm_is_pr_enabled, }; -static struct target_opcode_descriptor tcm_opcode_request_sense = { +static const struct target_opcode_descriptor tcm_opcode_request_sense = { .support = SCSI_SUPPORT_FULL, .opcode = REQUEST_SENSE, .cdb_size = 6, @@ -1872,7 +1872,7 @@ static struct target_opcode_descriptor tcm_opcode_request_sense = { 0xff, SCSI_CONTROL_MASK}, }; -static struct target_opcode_descriptor tcm_opcode_inquiry = { +static const struct target_opcode_descriptor tcm_opcode_inquiry = { .support = SCSI_SUPPORT_FULL, .opcode = INQUIRY, .cdb_size = 6, @@ -1880,7 +1880,7 @@ static struct target_opcode_descriptor tcm_opcode_inquiry = { 0xff, SCSI_CONTROL_MASK}, }; -static bool tcm_is_3pc_enabled(struct target_opcode_descriptor *descr, +static bool tcm_is_3pc_enabled(const struct target_opcode_descriptor *descr, struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; @@ -1888,7 +1888,7 @@ static bool tcm_is_3pc_enabled(struct target_opcode_descriptor *descr, return dev->dev_attrib.emulate_3pc; } -static struct target_opcode_descriptor tcm_opcode_extended_copy_lid1 = { +static const struct target_opcode_descriptor tcm_opcode_extended_copy_lid1 = { .support = SCSI_SUPPORT_FULL, .serv_action_valid = 1, .opcode = EXTENDED_COPY, @@ -1900,7 +1900,7 @@ static struct target_opcode_descriptor tcm_opcode_extended_copy_lid1 = { .enabled = tcm_is_3pc_enabled, }; -static struct target_opcode_descriptor tcm_opcode_rcv_copy_res_op_params = { +static const struct target_opcode_descriptor tcm_opcode_rcv_copy_res_op_params = { .support = SCSI_SUPPORT_FULL, .serv_action_valid = 1, .opcode = RECEIVE_COPY_RESULTS, @@ -1914,7 +1914,7 @@ static struct target_opcode_descriptor tcm_opcode_rcv_copy_res_op_params = { .enabled = tcm_is_3pc_enabled, }; -static struct target_opcode_descriptor tcm_opcode_report_luns = { +static const struct target_opcode_descriptor tcm_opcode_report_luns = { .support = SCSI_SUPPORT_FULL, .opcode = REPORT_LUNS, .cdb_size = 12, @@ -1923,7 +1923,7 @@ static struct target_opcode_descriptor tcm_opcode_report_luns = { 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, }; -static struct target_opcode_descriptor tcm_opcode_test_unit_ready = { +static const struct target_opcode_descriptor tcm_opcode_test_unit_ready = { .support = SCSI_SUPPORT_FULL, .opcode = TEST_UNIT_READY, .cdb_size = 6, @@ -1931,7 +1931,7 @@ static struct target_opcode_descriptor tcm_opcode_test_unit_ready = { 0x00, SCSI_CONTROL_MASK}, }; -static struct target_opcode_descriptor tcm_opcode_report_target_pgs = { +static const struct target_opcode_descriptor tcm_opcode_report_target_pgs = { .support = SCSI_SUPPORT_FULL, .serv_action_valid = 1, .opcode = MAINTENANCE_IN, @@ -1942,7 +1942,7 @@ static struct target_opcode_descriptor tcm_opcode_report_target_pgs = { 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, }; -static bool spc_rsoc_enabled(struct target_opcode_descriptor *descr, +static bool spc_rsoc_enabled(const struct target_opcode_descriptor *descr, struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; @@ -1950,7 +1950,7 @@ static bool spc_rsoc_enabled(struct target_opcode_descriptor *descr, return dev->dev_attrib.emulate_rsoc; } -static struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = { +static const struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = { .support = SCSI_SUPPORT_FULL, .serv_action_valid = 1, .opcode = MAINTENANCE_IN, @@ -1963,7 +1963,7 @@ static struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = { .enabled = spc_rsoc_enabled, }; -static bool tcm_is_set_tpg_enabled(struct target_opcode_descriptor *descr, +static bool tcm_is_set_tpg_enabled(const struct target_opcode_descriptor *descr, struct se_cmd *cmd) { struct t10_alua_tg_pt_gp *l_tg_pt_gp; @@ -1984,7 +1984,7 @@ static bool tcm_is_set_tpg_enabled(struct target_opcode_descriptor *descr, return true; } -static struct target_opcode_descriptor tcm_opcode_set_tpg = { +static const struct target_opcode_descriptor tcm_opcode_set_tpg = { .support = SCSI_SUPPORT_FULL, .serv_action_valid = 1, .opcode = MAINTENANCE_OUT, @@ -1996,7 +1996,7 @@ static struct target_opcode_descriptor tcm_opcode_set_tpg = { .enabled = tcm_is_set_tpg_enabled, }; -static struct target_opcode_descriptor *tcm_supported_opcodes[] = { +static const struct target_opcode_descriptor *tcm_supported_opcodes[] = { &tcm_opcode_read6, &tcm_opcode_read10, &tcm_opcode_read12, @@ -2053,7 +2053,7 @@ static struct target_opcode_descriptor *tcm_supported_opcodes[] = { static int spc_rsoc_encode_command_timeouts_descriptor(unsigned char *buf, u8 ctdp, - struct target_opcode_descriptor *descr) + const struct target_opcode_descriptor *descr) { if (!ctdp) return 0; @@ -2068,7 +2068,7 @@ spc_rsoc_encode_command_timeouts_descriptor(unsigned char *buf, u8 ctdp, static int spc_rsoc_encode_command_descriptor(unsigned char *buf, u8 ctdp, - struct target_opcode_descriptor *descr) + const struct target_opcode_descriptor *descr) { int td_size = 0; @@ -2087,7 +2087,7 @@ spc_rsoc_encode_command_descriptor(unsigned char *buf, u8 ctdp, static int spc_rsoc_encode_one_command_descriptor(unsigned char *buf, u8 ctdp, - struct target_opcode_descriptor *descr, + const struct target_opcode_descriptor *descr, struct se_device *dev) { int td_size = 0; @@ -2110,9 +2110,9 @@ spc_rsoc_encode_one_command_descriptor(unsigned char *buf, u8 ctdp, } static sense_reason_t -spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode) +spc_rsoc_get_descr(struct se_cmd *cmd, const struct target_opcode_descriptor **opcode) { - struct target_opcode_descriptor *descr; + const struct target_opcode_descriptor *descr; struct se_session *sess = cmd->se_sess; unsigned char *cdb = cmd->t_task_cdb; u8 opts = cdb[2] & 0x3; @@ -2199,7 +2199,7 @@ static sense_reason_t spc_emulate_report_supp_op_codes(struct se_cmd *cmd) { int descr_num = ARRAY_SIZE(tcm_supported_opcodes); - struct target_opcode_descriptor *descr = NULL; + const struct target_opcode_descriptor *descr = NULL; unsigned char *cdb = cmd->t_task_cdb; u8 rctd = (cdb[2] >> 7) & 0x1; unsigned char *buf = NULL; diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index 210648a0092e..6bdf2d8bd694 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -280,30 +280,51 @@ static ssize_t target_stat_lu_num_cmds_show(struct config_item *item, char *page) { struct se_device *dev = to_stat_lu_dev(item); + struct se_dev_io_stats *stats; + unsigned int cpu; + u32 cmds = 0; + + for_each_possible_cpu(cpu) { + stats = per_cpu_ptr(dev->stats, cpu); + cmds += stats->total_cmds; + } /* scsiLuNumCommands */ - return snprintf(page, PAGE_SIZE, "%lu\n", - atomic_long_read(&dev->num_cmds)); + return snprintf(page, PAGE_SIZE, "%u\n", cmds); } static ssize_t target_stat_lu_read_mbytes_show(struct config_item *item, char *page) { struct se_device *dev = to_stat_lu_dev(item); + struct se_dev_io_stats *stats; + unsigned int cpu; + u32 bytes = 0; + + for_each_possible_cpu(cpu) { + stats = per_cpu_ptr(dev->stats, cpu); + bytes += stats->read_bytes; + } /* scsiLuReadMegaBytes */ - return snprintf(page, PAGE_SIZE, "%lu\n", - atomic_long_read(&dev->read_bytes) >> 20); + return snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20); } static ssize_t target_stat_lu_write_mbytes_show(struct config_item *item, char *page) { struct se_device *dev = to_stat_lu_dev(item); + struct se_dev_io_stats *stats; + unsigned int cpu; + u32 bytes = 0; + + for_each_possible_cpu(cpu) { + stats = per_cpu_ptr(dev->stats, cpu); + bytes += stats->write_bytes; + } /* scsiLuWrittenMegaBytes */ - return snprintf(page, PAGE_SIZE, "%lu\n", - atomic_long_read(&dev->write_bytes) >> 20); + return snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20); } static ssize_t target_stat_lu_resets_show(struct config_item *item, char *page) @@ -1019,8 +1040,11 @@ static ssize_t target_stat_auth_num_cmds_show(struct config_item *item, { struct se_lun_acl *lacl = auth_to_lacl(item); struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry_io_stats *stats; struct se_dev_entry *deve; + unsigned int cpu; ssize_t ret; + u32 cmds = 0; rcu_read_lock(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); @@ -1028,9 +1052,14 @@ static ssize_t target_stat_auth_num_cmds_show(struct config_item *item, rcu_read_unlock(); return -ENODEV; } + + for_each_possible_cpu(cpu) { + stats = per_cpu_ptr(deve->stats, cpu); + cmds += stats->total_cmds; + } + /* scsiAuthIntrOutCommands */ - ret = snprintf(page, PAGE_SIZE, "%lu\n", - atomic_long_read(&deve->total_cmds)); + ret = snprintf(page, PAGE_SIZE, "%u\n", cmds); rcu_read_unlock(); return ret; } @@ -1040,8 +1069,11 @@ static ssize_t target_stat_auth_read_mbytes_show(struct config_item *item, { struct se_lun_acl *lacl = auth_to_lacl(item); struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry_io_stats *stats; struct se_dev_entry *deve; + unsigned int cpu; ssize_t ret; + u32 bytes = 0; rcu_read_lock(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); @@ -1049,9 +1081,14 @@ static ssize_t target_stat_auth_read_mbytes_show(struct config_item *item, rcu_read_unlock(); return -ENODEV; } + + for_each_possible_cpu(cpu) { + stats = per_cpu_ptr(deve->stats, cpu); + bytes += stats->read_bytes; + } + /* scsiAuthIntrReadMegaBytes */ - ret = snprintf(page, PAGE_SIZE, "%u\n", - (u32)(atomic_long_read(&deve->read_bytes) >> 20)); + ret = snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20); rcu_read_unlock(); return ret; } @@ -1061,8 +1098,11 @@ static ssize_t target_stat_auth_write_mbytes_show(struct config_item *item, { struct se_lun_acl *lacl = auth_to_lacl(item); struct se_node_acl *nacl = lacl->se_lun_nacl; + struct se_dev_entry_io_stats *stats; struct se_dev_entry *deve; + unsigned int cpu; ssize_t ret; + u32 bytes = 0; rcu_read_lock(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); @@ -1070,9 +1110,14 @@ static ssize_t target_stat_auth_write_mbytes_show(struct config_item *item, rcu_read_unlock(); return -ENODEV; } + + for_each_possible_cpu(cpu) { + stats = per_cpu_ptr(deve->stats, cpu); + bytes += stats->write_bytes; + } + /* scsiAuthIntrWrittenMegaBytes */ - ret = snprintf(page, PAGE_SIZE, "%u\n", - (u32)(atomic_long_read(&deve->write_bytes) >> 20)); + ret = snprintf(page, PAGE_SIZE, "%u\n", bytes >> 20); rcu_read_unlock(); return ret; } diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 05d29201b730..0a76bdfe5528 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2213,6 +2213,7 @@ static int target_write_prot_action(struct se_cmd *cmd) static bool target_handle_task_attr(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; + unsigned long flags; if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return false; @@ -2225,13 +2226,10 @@ static bool target_handle_task_attr(struct se_cmd *cmd) */ switch (cmd->sam_task_attr) { case TCM_HEAD_TAG: - atomic_inc_mb(&dev->non_ordered); pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", cmd->t_task_cdb[0]); return false; case TCM_ORDERED_TAG: - atomic_inc_mb(&dev->delayed_cmd_count); - pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", cmd->t_task_cdb[0]); break; @@ -2239,29 +2237,29 @@ static bool target_handle_task_attr(struct se_cmd *cmd) /* * For SIMPLE and UNTAGGED Task Attribute commands */ - atomic_inc_mb(&dev->non_ordered); - - if (atomic_read(&dev->delayed_cmd_count) == 0) +retry: + if (percpu_ref_tryget_live(&dev->non_ordered)) return false; + break; } - if (cmd->sam_task_attr != TCM_ORDERED_TAG) { - atomic_inc_mb(&dev->delayed_cmd_count); - /* - * We will account for this when we dequeue from the delayed - * list. - */ - atomic_dec_mb(&dev->non_ordered); + spin_lock_irqsave(&dev->delayed_cmd_lock, flags); + if (cmd->sam_task_attr == TCM_SIMPLE_TAG && + !percpu_ref_is_dying(&dev->non_ordered)) { + spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags); + /* We raced with the last ordered completion so retry. */ + goto retry; + } else if (!percpu_ref_is_dying(&dev->non_ordered)) { + percpu_ref_kill(&dev->non_ordered); } - spin_lock_irq(&cmd->t_state_lock); + spin_lock(&cmd->t_state_lock); cmd->transport_state &= ~CMD_T_SENT; - spin_unlock_irq(&cmd->t_state_lock); + spin_unlock(&cmd->t_state_lock); - spin_lock(&dev->delayed_cmd_lock); list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); - spin_unlock(&dev->delayed_cmd_lock); + spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags); pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", cmd->t_task_cdb[0], cmd->sam_task_attr); @@ -2313,41 +2311,52 @@ void target_do_delayed_work(struct work_struct *work) while (!dev->ordered_sync_in_progress) { struct se_cmd *cmd; - if (list_empty(&dev->delayed_cmd_list)) + /* + * We can be woken up early/late due to races or the + * extra wake up we do when adding commands to the list. + * We check for both cases here. + */ + if (list_empty(&dev->delayed_cmd_list) || + !percpu_ref_is_zero(&dev->non_ordered)) break; cmd = list_entry(dev->delayed_cmd_list.next, struct se_cmd, se_delayed_node); + cmd->se_cmd_flags |= SCF_TASK_ORDERED_SYNC; + cmd->transport_state |= CMD_T_SENT; - if (cmd->sam_task_attr == TCM_ORDERED_TAG) { - /* - * Check if we started with: - * [ordered] [simple] [ordered] - * and we are now at the last ordered so we have to wait - * for the simple cmd. - */ - if (atomic_read(&dev->non_ordered) > 0) - break; - - dev->ordered_sync_in_progress = true; - } + dev->ordered_sync_in_progress = true; list_del(&cmd->se_delayed_node); - atomic_dec_mb(&dev->delayed_cmd_count); spin_unlock(&dev->delayed_cmd_lock); - if (cmd->sam_task_attr != TCM_ORDERED_TAG) - atomic_inc_mb(&dev->non_ordered); - - cmd->transport_state |= CMD_T_SENT; - __target_execute_cmd(cmd, true); - spin_lock(&dev->delayed_cmd_lock); } spin_unlock(&dev->delayed_cmd_lock); } +static void transport_complete_ordered_sync(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + unsigned long flags; + + spin_lock_irqsave(&dev->delayed_cmd_lock, flags); + dev->dev_cur_ordered_id++; + + pr_debug("Incremented dev_cur_ordered_id: %u for type %d\n", + dev->dev_cur_ordered_id, cmd->sam_task_attr); + + dev->ordered_sync_in_progress = false; + + if (list_empty(&dev->delayed_cmd_list)) + percpu_ref_resurrect(&dev->non_ordered); + else + schedule_work(&dev->delayed_cmd_work); + + spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags); +} + /* * Called from I/O completion to determine which dormant/delayed * and ordered cmds need to have their tasks added to the execution queue. @@ -2360,30 +2369,24 @@ static void transport_complete_task_attr(struct se_cmd *cmd) return; if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) - goto restart; - - if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { - atomic_dec_mb(&dev->non_ordered); - dev->dev_cur_ordered_id++; - } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { - atomic_dec_mb(&dev->non_ordered); - dev->dev_cur_ordered_id++; - pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", - dev->dev_cur_ordered_id); - } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { - spin_lock(&dev->delayed_cmd_lock); - dev->ordered_sync_in_progress = false; - spin_unlock(&dev->delayed_cmd_lock); + return; - dev->dev_cur_ordered_id++; - pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", - dev->dev_cur_ordered_id); - } cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; -restart: - if (atomic_read(&dev->delayed_cmd_count) > 0) - schedule_work(&dev->delayed_cmd_work); + if (cmd->se_cmd_flags & SCF_TASK_ORDERED_SYNC) { + transport_complete_ordered_sync(cmd); + return; + } + + switch (cmd->sam_task_attr) { + case TCM_SIMPLE_TAG: + percpu_ref_put(&dev->non_ordered); + break; + case TCM_ORDERED_TAG: + /* All ordered should have been executed as sync */ + WARN_ON(1); + break; + } } static void transport_complete_qf(struct se_cmd *cmd) diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c index e487231d25dc..fb39d9a19c69 100644 --- a/drivers/tee/amdtee/core.c +++ b/drivers/tee/amdtee/core.c @@ -3,19 +3,22 @@ * Copyright 2019 Advanced Micro Devices, Inc. */ + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/errno.h> +#include <linux/device.h> +#include <linux/firmware.h> #include <linux/io.h> +#include <linux/mm.h> #include <linux/module.h> +#include <linux/psp-tee.h> #include <linux/slab.h> #include <linux/string.h> -#include <linux/device.h> #include <linux/tee_core.h> #include <linux/types.h> -#include <linux/mm.h> #include <linux/uaccess.h> -#include <linux/firmware.h> + #include "amdtee_private.h" -#include <linux/psp-tee.h> static struct amdtee_driver_data *drv_data; static DEFINE_MUTEX(session_list_mutex); @@ -458,7 +461,7 @@ static int __init amdtee_driver_init(void) rc = psp_check_tee_status(); if (rc) { - pr_err("amd-tee driver: tee not present\n"); + pr_err("tee not present\n"); return rc; } @@ -494,7 +497,6 @@ static int __init amdtee_driver_init(void) drv_data->amdtee = amdtee; - pr_info("amd-tee driver initialization successful\n"); return 0; err_device_unregister: @@ -510,7 +512,7 @@ err_kfree_drv_data: kfree(drv_data); drv_data = NULL; - pr_err("amd-tee driver initialization failed\n"); + pr_err("initialization failed\n"); return rc; } module_init(amdtee_driver_init); diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index f0c3ac1103bb..26f8f7bbbe56 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -1551,8 +1551,7 @@ fw_load: data_pa_high, data_pa_low, 0, 0, 0, &res); if (!rc) rc = res.a0; - if (fw) - release_firmware(fw); + release_firmware(fw); kfree(data_buf); if (!rc) { diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index d113679b1e2d..acc7998758ad 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -10,6 +10,7 @@ #include <linux/fs.h> #include <linux/idr.h> #include <linux/module.h> +#include <linux/overflow.h> #include <linux/slab.h> #include <linux/tee_core.h> #include <linux/uaccess.h> @@ -19,7 +20,7 @@ #define TEE_NUM_DEVICES 32 -#define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x)) +#define TEE_IOCTL_PARAM_SIZE(x) (size_mul(sizeof(struct tee_param), (x))) #define TEE_UUID_NS_NAME_SIZE 128 @@ -487,7 +488,7 @@ static int tee_ioctl_open_session(struct tee_context *ctx, if (copy_from_user(&arg, uarg, sizeof(arg))) return -EFAULT; - if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len) + if (size_add(sizeof(arg), TEE_IOCTL_PARAM_SIZE(arg.num_params)) != buf.buf_len) return -EINVAL; if (arg.num_params) { @@ -565,7 +566,7 @@ static int tee_ioctl_invoke(struct tee_context *ctx, if (copy_from_user(&arg, uarg, sizeof(arg))) return -EFAULT; - if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len) + if (size_add(sizeof(arg), TEE_IOCTL_PARAM_SIZE(arg.num_params)) != buf.buf_len) return -EINVAL; if (arg.num_params) { @@ -699,7 +700,7 @@ static int tee_ioctl_supp_recv(struct tee_context *ctx, if (get_user(num_params, &uarg->num_params)) return -EFAULT; - if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len) + if (size_add(sizeof(*uarg), TEE_IOCTL_PARAM_SIZE(num_params)) != buf.buf_len) return -EINVAL; params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL); @@ -798,7 +799,7 @@ static int tee_ioctl_supp_send(struct tee_context *ctx, get_user(num_params, &uarg->num_params)) return -EFAULT; - if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len) + if (size_add(sizeof(*uarg), TEE_IOCTL_PARAM_SIZE(num_params)) > buf.buf_len) return -EINVAL; params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL); diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index f1294c29f484..1e50675772fe 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -674,7 +674,6 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) int tag = scsi_cmd_to_rq(cmd)->tag; struct ufshcd_lrb *lrbp = &hba->lrb[tag]; struct ufs_hw_queue *hwq; - unsigned long flags; int err; /* Skip task abort in case previous aborts failed and report failure */ @@ -713,10 +712,5 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) return FAILED; } - spin_lock_irqsave(&hwq->cq_lock, flags); - if (ufshcd_cmd_inflight(lrbp->cmd)) - ufshcd_release_scsi_cmd(hba, lrbp); - spin_unlock_irqrestore(&hwq->cq_lock, flags); - return SUCCESS; } diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c index 634cf163f4cb..de8b6acd4058 100644 --- a/drivers/ufs/core/ufs-sysfs.c +++ b/drivers/ufs/core/ufs-sysfs.c @@ -57,6 +57,36 @@ static const char *ufs_hs_gear_to_string(enum ufs_hs_gear_tag gear) } } +static const char *ufs_wb_resize_hint_to_string(enum wb_resize_hint hint) +{ + switch (hint) { + case WB_RESIZE_HINT_KEEP: + return "keep"; + case WB_RESIZE_HINT_DECREASE: + return "decrease"; + case WB_RESIZE_HINT_INCREASE: + return "increase"; + default: + return "unknown"; + } +} + +static const char *ufs_wb_resize_status_to_string(enum wb_resize_status status) +{ + switch (status) { + case WB_RESIZE_STATUS_IDLE: + return "idle"; + case WB_RESIZE_STATUS_IN_PROGRESS: + return "in_progress"; + case WB_RESIZE_STATUS_COMPLETE_SUCCESS: + return "complete_success"; + case WB_RESIZE_STATUS_GENERAL_FAILURE: + return "general_failure"; + default: + return "unknown"; + } +} + static const char *ufshcd_uic_link_state_to_string( enum uic_link_state state) { @@ -411,6 +441,44 @@ static ssize_t wb_flush_threshold_store(struct device *dev, return count; } +static const char * const wb_resize_en_mode[] = { + [WB_RESIZE_EN_IDLE] = "idle", + [WB_RESIZE_EN_DECREASE] = "decrease", + [WB_RESIZE_EN_INCREASE] = "increase", +}; + +static ssize_t wb_resize_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int mode; + ssize_t res; + + if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled + || !hba->dev_info.b_presrv_uspc_en + || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE)) + return -EOPNOTSUPP; + + mode = sysfs_match_string(wb_resize_en_mode, buf); + if (mode < 0) + return -EINVAL; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + res = -EBUSY; + goto out; + } + + ufshcd_rpm_get_sync(hba); + res = ufshcd_wb_set_resize_en(hba, mode); + ufshcd_rpm_put_sync(hba); + +out: + up(&hba->host_sem); + return res < 0 ? res : count; +} + /** * pm_qos_enable_show - sysfs handler to show pm qos enable value * @dev: device associated with the UFS controller @@ -526,6 +594,7 @@ static DEVICE_ATTR_RW(auto_hibern8); static DEVICE_ATTR_RW(wb_on); static DEVICE_ATTR_RW(enable_wb_buf_flush); static DEVICE_ATTR_RW(wb_flush_threshold); +static DEVICE_ATTR_WO(wb_resize_enable); static DEVICE_ATTR_RW(rtc_update_ms); static DEVICE_ATTR_RW(pm_qos_enable); static DEVICE_ATTR_RO(critical_health); @@ -543,6 +612,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = { &dev_attr_wb_on.attr, &dev_attr_enable_wb_buf_flush.attr, &dev_attr_wb_flush_threshold.attr, + &dev_attr_wb_resize_enable.attr, &dev_attr_rtc_update_ms.attr, &dev_attr_pm_qos_enable.attr, &dev_attr_critical_health.attr, @@ -1549,6 +1619,67 @@ static inline bool ufshcd_is_wb_attrs(enum attr_idn idn) idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE; } +static int wb_read_resize_attrs(struct ufs_hba *hba, + enum attr_idn idn, u32 *attr_val) +{ + u8 index = 0; + int ret; + + if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled + || !hba->dev_info.b_presrv_uspc_en + || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE)) + return -EOPNOTSUPP; + + down(&hba->host_sem); + if (!ufshcd_is_user_access_allowed(hba)) { + up(&hba->host_sem); + return -EBUSY; + } + + index = ufshcd_wb_get_query_index(hba); + ufshcd_rpm_get_sync(hba); + ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + idn, index, 0, attr_val); + ufshcd_rpm_put_sync(hba); + + up(&hba->host_sem); + return ret; +} + +static ssize_t wb_resize_hint_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret; + u32 value; + + ret = wb_read_resize_attrs(hba, + QUERY_ATTR_IDN_WB_BUF_RESIZE_HINT, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%s\n", ufs_wb_resize_hint_to_string(value)); +} + +static DEVICE_ATTR_RO(wb_resize_hint); + +static ssize_t wb_resize_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret; + u32 value; + + ret = wb_read_resize_attrs(hba, + QUERY_ATTR_IDN_WB_BUF_RESIZE_STATUS, &value); + if (ret) + return ret; + + return sysfs_emit(buf, "%s\n", ufs_wb_resize_status_to_string(value)); +} + +static DEVICE_ATTR_RO(wb_resize_status); + #define UFS_ATTRIBUTE(_name, _uname) \ static ssize_t _name##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ @@ -1622,6 +1753,8 @@ static struct attribute *ufs_sysfs_attributes[] = { &dev_attr_wb_avail_buf.attr, &dev_attr_wb_life_time_est.attr, &dev_attr_wb_cur_buf.attr, + &dev_attr_wb_resize_hint.attr, + &dev_attr_wb_resize_status.attr, NULL, }; diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 7735421e3991..76cedd30c274 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -53,7 +53,7 @@ /* UIC command timeout, unit: ms */ enum { UIC_CMD_TIMEOUT_DEFAULT = 500, - UIC_CMD_TIMEOUT_MAX = 2000, + UIC_CMD_TIMEOUT_MAX = 5000, }; /* NOP OUT retries waiting for NOP IN response */ #define NOP_OUT_RETRIES 10 @@ -63,7 +63,11 @@ enum { /* Query request retries */ #define QUERY_REQ_RETRIES 3 /* Query request timeout */ -#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */ +enum { + QUERY_REQ_TIMEOUT_MIN = 1, + QUERY_REQ_TIMEOUT_DEFAULT = 1500, + QUERY_REQ_TIMEOUT_MAX = 30000 +}; /* Advanced RPMB request timeout */ #define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */ @@ -133,7 +137,24 @@ static const struct kernel_param_ops uic_cmd_timeout_ops = { module_param_cb(uic_cmd_timeout, &uic_cmd_timeout_ops, &uic_cmd_timeout, 0644); MODULE_PARM_DESC(uic_cmd_timeout, - "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 2 seconds inclusively"); + "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 5 seconds inclusively"); + +static unsigned int dev_cmd_timeout = QUERY_REQ_TIMEOUT_DEFAULT; + +static int dev_cmd_timeout_set(const char *val, const struct kernel_param *kp) +{ + return param_set_uint_minmax(val, kp, QUERY_REQ_TIMEOUT_MIN, + QUERY_REQ_TIMEOUT_MAX); +} + +static const struct kernel_param_ops dev_cmd_timeout_ops = { + .set = dev_cmd_timeout_set, + .get = param_get_uint, +}; + +module_param_cb(dev_cmd_timeout, &dev_cmd_timeout_ops, &dev_cmd_timeout, 0644); +MODULE_PARM_DESC(dev_cmd_timeout, + "UFS Device command timeout in milliseconds. Defaults to 1.5s. Supported values range from 1ms to 30 seconds inclusively"); #define ufshcd_toggle_vreg(_dev, _vreg, _on) \ ({ \ @@ -432,7 +453,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, u8 opcode = 0, group_id = 0; u32 doorbell = 0; u32 intr; - int hwq_id = -1; + u32 hwq_id = 0; struct ufshcd_lrb *lrbp = &hba->lrb[tag]; struct scsi_cmnd *cmd = lrbp->cmd; struct request *rq = scsi_cmd_to_rq(cmd); @@ -644,9 +665,6 @@ static void ufshcd_print_host_state(struct ufs_hba *hba) "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n", div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000), hba->ufs_stats.hibern8_exit_cnt); - dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n", - div_u64(hba->ufs_stats.last_intr_ts, 1000), - hba->ufs_stats.last_intr_status); dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n", hba->eh_flags, hba->req_abort_count); dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n", @@ -3365,7 +3383,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, struct ufs_query_req *request = NULL; struct ufs_query_res *response = NULL; int err, selector = 0; - int timeout = QUERY_REQ_TIMEOUT; + int timeout = dev_cmd_timeout; BUG_ON(!hba); @@ -3462,7 +3480,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, goto out_unlock; } - err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout); if (err) { dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", @@ -3558,7 +3576,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba, goto out_unlock; } - err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout); if (err) { dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", @@ -6020,7 +6038,7 @@ int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id) request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; - err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout); if (err) { dev_err(hba->dev, "%s: failed to read device level exception %d\n", @@ -6107,6 +6125,21 @@ int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable) return ret; } +int ufshcd_wb_set_resize_en(struct ufs_hba *hba, enum wb_resize_en en_mode) +{ + int ret; + u8 index; + + index = ufshcd_wb_get_query_index(hba); + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_WB_BUF_RESIZE_EN, index, 0, &en_mode); + if (ret) + dev_err(hba->dev, "%s: Enable WB buf resize operation failed %d\n", + __func__, ret); + + return ret; +} + static bool ufshcd_wb_curr_buff_threshold_check(struct ufs_hba *hba, u32 avail_buf) { @@ -6572,7 +6605,7 @@ static void ufshcd_err_handler(struct work_struct *work) hba = container_of(work, struct ufs_hba, eh_work); dev_info(hba->dev, - "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n", + "%s started; HBA state %s; powered %d; shutting down %d; saved_err = 0x%x; saved_uic_err = 0x%x; force_reset = %d%s\n", __func__, ufshcd_state_name[hba->ufshcd_state], hba->is_powered, hba->shutting_down, hba->saved_err, hba->saved_uic_err, hba->force_reset, @@ -7001,7 +7034,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) } /** - * ufshcd_intr - Main interrupt service routine + * ufshcd_threaded_intr - Threaded interrupt service routine * @irq: irq number * @__hba: pointer to adapter instance * @@ -7009,16 +7042,14 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) * IRQ_HANDLED - If interrupt is valid * IRQ_NONE - If invalid interrupt */ -static irqreturn_t ufshcd_intr(int irq, void *__hba) +static irqreturn_t ufshcd_threaded_intr(int irq, void *__hba) { - u32 intr_status, enabled_intr_status = 0; + u32 last_intr_status, intr_status, enabled_intr_status = 0; irqreturn_t retval = IRQ_NONE; struct ufs_hba *hba = __hba; int retries = hba->nutrs; - intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); - hba->ufs_stats.last_intr_status = intr_status; - hba->ufs_stats.last_intr_ts = local_clock(); + last_intr_status = intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); /* * There could be max of hba->nutrs reqs in flight and in worst case @@ -7042,7 +7073,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n", __func__, intr_status, - hba->ufs_stats.last_intr_status, + last_intr_status, enabled_intr_status); ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); } @@ -7050,6 +7081,29 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) return retval; } +/** + * ufshcd_intr - Main interrupt service routine + * @irq: irq number + * @__hba: pointer to adapter instance + * + * Return: + * IRQ_HANDLED - If interrupt is valid + * IRQ_WAKE_THREAD - If handling is moved to threaded handled + * IRQ_NONE - If invalid interrupt + */ +static irqreturn_t ufshcd_intr(int irq, void *__hba) +{ + struct ufs_hba *hba = __hba; + + /* Move interrupt handling to thread when MCQ & ESI are not enabled */ + if (!hba->mcq_enabled || !hba->mcq_esi_enabled) + return IRQ_WAKE_THREAD; + + /* Directly handle interrupts since MCQ ESI handlers does the hard job */ + return ufshcd_sl_intr(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS) & + ufshcd_readl(hba, REG_INTERRUPT_ENABLE)); +} + static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) { int err = 0; @@ -7245,7 +7299,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, * bound to fail since dev_cmd.query and dev_cmd.type were left empty. * read the response directly ignoring all errors. */ - ufshcd_issue_dev_cmd(hba, lrbp, tag, QUERY_REQ_TIMEOUT); + ufshcd_issue_dev_cmd(hba, lrbp, tag, dev_cmd_timeout); /* just copy the upiu response as it is */ memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu)); @@ -8107,6 +8161,9 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf) */ dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE]; + dev_info->ext_wb_sup = get_unaligned_be16(desc_buf + + DEVICE_DESC_PARAM_EXT_WB_SUP); + dev_info->b_presrv_uspc_en = desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN]; @@ -8678,7 +8735,7 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba) put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3); - err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout); if (err) dev_err(hba->dev, "%s: failed to set timestamp %d\n", @@ -8793,6 +8850,7 @@ static void ufshcd_config_mcq(struct ufs_hba *hba) u32 intrs; ret = ufshcd_mcq_vops_config_esi(hba); + hba->mcq_esi_enabled = !ret; dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : ""); intrs = UFSHCD_ENABLE_MCQ_INTRS; @@ -10654,7 +10712,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ufshcd_readl(hba, REG_INTERRUPT_ENABLE); /* IRQ registration */ - err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); + err = devm_request_threaded_irq(dev, irq, ufshcd_intr, ufshcd_threaded_intr, + IRQF_ONESHOT | IRQF_SHARED, UFSHCD, hba); if (err) { dev_err(hba->dev, "request irq failed\n"); goto out_disable; diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 77f8ddb1f207..37887ec68412 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -5,6 +5,7 @@ #include <linux/acpi.h> #include <linux/clk.h> +#include <linux/cleanup.h> #include <linux/delay.h> #include <linux/devfreq.h> #include <linux/gpio/consumer.h> @@ -102,6 +103,24 @@ static const struct __ufs_qcom_bw_table { [MODE_MAX][0][0] = { 7643136, 819200 }, }; +static const struct { + int nminor; + char *prefix; +} testbus_info[TSTBUS_MAX] = { + [TSTBUS_UAWM] = {32, "TSTBUS_UAWM"}, + [TSTBUS_UARM] = {32, "TSTBUS_UARM"}, + [TSTBUS_TXUC] = {32, "TSTBUS_TXUC"}, + [TSTBUS_RXUC] = {32, "TSTBUS_RXUC"}, + [TSTBUS_DFC] = {32, "TSTBUS_DFC"}, + [TSTBUS_TRLUT] = {32, "TSTBUS_TRLUT"}, + [TSTBUS_TMRLUT] = {32, "TSTBUS_TMRLUT"}, + [TSTBUS_OCSC] = {32, "TSTBUS_OCSC"}, + [TSTBUS_UTP_HCI] = {32, "TSTBUS_UTP_HCI"}, + [TSTBUS_COMBINED] = {32, "TSTBUS_COMBINED"}, + [TSTBUS_WRAPPER] = {32, "TSTBUS_WRAPPER"}, + [TSTBUS_UNIPRO] = {256, "TSTBUS_UNIPRO"}, +}; + static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq); @@ -173,7 +192,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host) profile->ll_ops = ufs_qcom_crypto_ops; profile->max_dun_bytes_supported = 8; - profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW; + profile->key_types_supported = qcom_ice_get_supported_key_type(ice); profile->dev = dev; /* @@ -221,17 +240,8 @@ static int ufs_qcom_ice_keyslot_program(struct blk_crypto_profile *profile, struct ufs_qcom_host *host = ufshcd_get_variant(hba); int err; - /* Only AES-256-XTS has been tested so far. */ - if (key->crypto_cfg.crypto_mode != BLK_ENCRYPTION_MODE_AES_256_XTS) - return -EOPNOTSUPP; - ufshcd_hold(hba); - err = qcom_ice_program_key(host->ice, - QCOM_ICE_CRYPTO_ALG_AES_XTS, - QCOM_ICE_CRYPTO_KEY_SIZE_256, - key->bytes, - key->crypto_cfg.data_unit_size / 512, - slot); + err = qcom_ice_program_key(host->ice, slot, key); ufshcd_release(hba); return err; } @@ -250,9 +260,53 @@ static int ufs_qcom_ice_keyslot_evict(struct blk_crypto_profile *profile, return err; } +static int ufs_qcom_ice_derive_sw_secret(struct blk_crypto_profile *profile, + const u8 *eph_key, size_t eph_key_size, + u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_derive_sw_secret(host->ice, eph_key, eph_key_size, + sw_secret); +} + +static int ufs_qcom_ice_import_key(struct blk_crypto_profile *profile, + const u8 *raw_key, size_t raw_key_size, + u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_import_key(host->ice, raw_key, raw_key_size, lt_key); +} + +static int ufs_qcom_ice_generate_key(struct blk_crypto_profile *profile, + u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_generate_key(host->ice, lt_key); +} + +static int ufs_qcom_ice_prepare_key(struct blk_crypto_profile *profile, + const u8 *lt_key, size_t lt_key_size, + u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_prepare_key(host->ice, lt_key, lt_key_size, eph_key); +} + static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops = { .keyslot_program = ufs_qcom_ice_keyslot_program, .keyslot_evict = ufs_qcom_ice_keyslot_evict, + .derive_sw_secret = ufs_qcom_ice_derive_sw_secret, + .import_key = ufs_qcom_ice_import_key, + .generate_key = ufs_qcom_ice_generate_key, + .prepare_key = ufs_qcom_ice_prepare_key, }; #else @@ -1609,6 +1663,85 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) return 0; } +static void ufs_qcom_dump_testbus(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int i, j, nminor = 0, testbus_len = 0; + u32 *testbus __free(kfree) = NULL; + char *prefix; + + testbus = kmalloc_array(256, sizeof(u32), GFP_KERNEL); + if (!testbus) + return; + + for (j = 0; j < TSTBUS_MAX; j++) { + nminor = testbus_info[j].nminor; + prefix = testbus_info[j].prefix; + host->testbus.select_major = j; + testbus_len = nminor * sizeof(u32); + for (i = 0; i < nminor; i++) { + host->testbus.select_minor = i; + ufs_qcom_testbus_config(host); + testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS); + } + print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, + 16, 4, testbus, testbus_len, false); + } +} + +static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, + const char *prefix, enum ufshcd_res id) +{ + u32 *regs __free(kfree) = NULL; + size_t pos; + + if (offset % 4 != 0 || len % 4 != 0) + return -EINVAL; + + regs = kzalloc(len, GFP_ATOMIC); + if (!regs) + return -ENOMEM; + + for (pos = 0; pos < len; pos += 4) + regs[pos / 4] = readl(hba->res[id].base + offset + pos); + + print_hex_dump(KERN_ERR, prefix, + len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE, + 16, 4, regs, len, false); + + return 0; +} + +static void ufs_qcom_dump_mcq_hci_regs(struct ufs_hba *hba) +{ + struct dump_info { + size_t offset; + size_t len; + const char *prefix; + enum ufshcd_res id; + }; + + struct dump_info mcq_dumps[] = { + {0x0, 256 * 4, "MCQ HCI-0 ", RES_MCQ}, + {0x400, 256 * 4, "MCQ HCI-1 ", RES_MCQ}, + {0x0, 5 * 4, "MCQ VS-0 ", RES_MCQ_VS}, + {0x0, 256 * 4, "MCQ SQD-0 ", RES_MCQ_SQD}, + {0x400, 256 * 4, "MCQ SQD-1 ", RES_MCQ_SQD}, + {0x800, 256 * 4, "MCQ SQD-2 ", RES_MCQ_SQD}, + {0xc00, 256 * 4, "MCQ SQD-3 ", RES_MCQ_SQD}, + {0x1000, 256 * 4, "MCQ SQD-4 ", RES_MCQ_SQD}, + {0x1400, 256 * 4, "MCQ SQD-5 ", RES_MCQ_SQD}, + {0x1800, 256 * 4, "MCQ SQD-6 ", RES_MCQ_SQD}, + {0x1c00, 256 * 4, "MCQ SQD-7 ", RES_MCQ_SQD}, + }; + + for (int i = 0; i < ARRAY_SIZE(mcq_dumps); i++) { + ufs_qcom_dump_regs(hba, mcq_dumps[i].offset, mcq_dumps[i].len, + mcq_dumps[i].prefix, mcq_dumps[i].id); + cond_resched(); + } +} + static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) { u32 reg; @@ -1616,6 +1749,15 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) host = ufshcd_get_variant(hba); + dev_err(hba->dev, "HW_H8_ENTER_CNT=%d\n", ufshcd_readl(hba, REG_UFS_HW_H8_ENTER_CNT)); + dev_err(hba->dev, "HW_H8_EXIT_CNT=%d\n", ufshcd_readl(hba, REG_UFS_HW_H8_EXIT_CNT)); + + dev_err(hba->dev, "SW_H8_ENTER_CNT=%d\n", ufshcd_readl(hba, REG_UFS_SW_H8_ENTER_CNT)); + dev_err(hba->dev, "SW_H8_EXIT_CNT=%d\n", ufshcd_readl(hba, REG_UFS_SW_H8_EXIT_CNT)); + + dev_err(hba->dev, "SW_AFTER_HW_H8_ENTER_CNT=%d\n", + ufshcd_readl(hba, REG_UFS_SW_AFTER_HW_H8_ENTER_CNT)); + ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4, "HCI Vendor Specific Registers "); @@ -1658,6 +1800,23 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT); ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT "); + + if (hba->mcq_enabled) { + reg = ufs_qcom_get_debug_reg_offset(host, UFS_RD_REG_MCQ); + ufshcd_dump_regs(hba, reg, 64 * 4, "HCI MCQ Debug Registers "); + } + + /* ensure below dumps occur only in task context due to blocking calls. */ + if (in_task()) { + /* Dump MCQ Host Vendor Specific Registers */ + if (hba->mcq_enabled) + ufs_qcom_dump_mcq_hci_regs(hba); + + /* voluntarily yield the CPU as we are dumping too much data */ + ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS "); + cond_resched(); + ufs_qcom_dump_testbus(hba); + } } /** diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h index 05d4cb569c50..0a5cfc2dd4f7 100644 --- a/drivers/ufs/host/ufs-qcom.h +++ b/drivers/ufs/host/ufs-qcom.h @@ -50,6 +50,8 @@ enum { */ UFS_AH8_CFG = 0xFC, + UFS_RD_REG_MCQ = 0xD00, + REG_UFS_MEM_ICE_CONFIG = 0x260C, REG_UFS_MEM_ICE_NUM_CORE = 0x2664, @@ -75,6 +77,15 @@ enum { UFS_UFS_DBG_RD_EDTL_RAM = 0x1900, }; +/* QCOM UFS HC vendor specific Hibern8 count registers */ +enum { + REG_UFS_HW_H8_ENTER_CNT = 0x2700, + REG_UFS_SW_H8_ENTER_CNT = 0x2704, + REG_UFS_SW_AFTER_HW_H8_ENTER_CNT = 0x2708, + REG_UFS_HW_H8_EXIT_CNT = 0x270C, + REG_UFS_SW_H8_EXIT_CNT = 0x2710, +}; + enum { UFS_MEM_CQIS_VS = 0x8, }; diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 37bd18730fe0..f9cdbf8c53e3 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -1168,7 +1168,7 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, c->vc_screenbuf_size - delta); c->vc_origin = vga_vram_end - c->vc_screenbuf_size; vga_rolled_over = 0; - } else + } else if (oldo - delta >= (unsigned long)c->vc_screenbuf) c->vc_origin -= delta; c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; scr_memsetw((u16 *) (c->vc_origin), c->vc_video_erase_char, diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c index 082501feceb9..ec084323115f 100644 --- a/drivers/video/fbdev/arkfb.c +++ b/drivers/video/fbdev/arkfb.c @@ -431,9 +431,10 @@ static struct dac_ops ics5342_ops = { static struct dac_info * ics5342_init(dac_read_regs_t drr, dac_write_regs_t dwr, void *data) { - struct dac_info *info = kzalloc(sizeof(struct ics5342_info), GFP_KERNEL); + struct ics5342_info *ics_info = kzalloc(sizeof(struct ics5342_info), GFP_KERNEL); + struct dac_info *info = &ics_info->dac; - if (! info) + if (!ics_info) return NULL; info->dacops = &ics5342_ops; diff --git a/drivers/video/fbdev/carminefb.c b/drivers/video/fbdev/carminefb.c index e56065cdba97..2bdd67595891 100644 --- a/drivers/video/fbdev/carminefb.c +++ b/drivers/video/fbdev/carminefb.c @@ -649,13 +649,13 @@ static int carminefb_probe(struct pci_dev *dev, const struct pci_device_id *ent) * is required for that largest resolution to avoid remaps at run * time */ - if (carminefb_fix.smem_len > CARMINE_TOTAL_DIPLAY_MEM) - carminefb_fix.smem_len = CARMINE_TOTAL_DIPLAY_MEM; + if (carminefb_fix.smem_len > CARMINE_TOTAL_DISPLAY_MEM) + carminefb_fix.smem_len = CARMINE_TOTAL_DISPLAY_MEM; - else if (carminefb_fix.smem_len < CARMINE_TOTAL_DIPLAY_MEM) { + else if (carminefb_fix.smem_len < CARMINE_TOTAL_DISPLAY_MEM) { printk(KERN_ERR "carminefb: Memory bar is only %d bytes, %d " "are required.", carminefb_fix.smem_len, - CARMINE_TOTAL_DIPLAY_MEM); + CARMINE_TOTAL_DISPLAY_MEM); goto err_unmap_vregs; } diff --git a/drivers/video/fbdev/carminefb.h b/drivers/video/fbdev/carminefb.h index 297688eba469..c9825481d96b 100644 --- a/drivers/video/fbdev/carminefb.h +++ b/drivers/video/fbdev/carminefb.h @@ -7,7 +7,7 @@ #define MAX_DISPLAY 2 #define CARMINE_DISPLAY_MEM (800 * 600 * 4) -#define CARMINE_TOTAL_DIPLAY_MEM (CARMINE_DISPLAY_MEM * MAX_DISPLAY) +#define CARMINE_TOTAL_DISPLAY_MEM (CARMINE_DISPLAY_MEM * MAX_DISPLAY) #define CARMINE_USE_DISPLAY0 (1 << 0) #define CARMINE_USE_DISPLAY1 (1 << 1) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index ac3c99ed92d1..2df48037688d 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -117,9 +117,14 @@ static signed char con2fb_map_boot[MAX_NR_CONSOLES]; static struct fb_info *fbcon_info_from_console(int console) { + signed char fb; WARN_CONSOLE_UNLOCKED(); - return fbcon_registered_fb[con2fb_map[console]]; + fb = con2fb_map[console]; + if (fb < 0 || fb >= ARRAY_SIZE(fbcon_registered_fb)) + return NULL; + + return fbcon_registered_fb[fb]; } static int logo_lines; diff --git a/drivers/video/fbdev/core/fbcvt.c b/drivers/video/fbdev/core/fbcvt.c index 64843464c661..cd3821bd82e5 100644 --- a/drivers/video/fbdev/core/fbcvt.c +++ b/drivers/video/fbdev/core/fbcvt.c @@ -312,7 +312,7 @@ int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb) cvt.f_refresh = cvt.refresh; cvt.interlace = 1; - if (!cvt.xres || !cvt.yres || !cvt.refresh) { + if (!cvt.xres || !cvt.yres || !cvt.refresh || cvt.f_refresh > INT_MAX) { printk(KERN_INFO "fbcvt: Invalid input parameters\n"); return 1; } diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 3c568cff2913..eca2498f2436 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -328,8 +328,10 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) !list_empty(&info->modelist)) ret = fb_add_videomode(&mode, &info->modelist); - if (ret) + if (ret) { + info->var = old_var; return ret; + } event.info = info; event.data = &mode; @@ -388,7 +390,7 @@ static int fb_check_foreignness(struct fb_info *fi) static int do_register_framebuffer(struct fb_info *fb_info) { - int i; + int i, err = 0; struct fb_videomode mode; if (fb_check_foreignness(fb_info)) @@ -397,10 +399,18 @@ static int do_register_framebuffer(struct fb_info *fb_info) if (num_registered_fb == FB_MAX) return -ENXIO; - num_registered_fb++; for (i = 0 ; i < FB_MAX; i++) if (!registered_fb[i]) break; + + if (!fb_info->modelist.prev || !fb_info->modelist.next) + INIT_LIST_HEAD(&fb_info->modelist); + + fb_var_to_videomode(&mode, &fb_info->var); + err = fb_add_videomode(&mode, &fb_info->modelist); + if (err < 0) + return err; + fb_info->node = i; refcount_set(&fb_info->count, 1); mutex_init(&fb_info->lock); @@ -426,16 +436,12 @@ static int do_register_framebuffer(struct fb_info *fb_info) if (bitmap_empty(fb_info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT)) bitmap_fill(fb_info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT); - if (!fb_info->modelist.prev || !fb_info->modelist.next) - INIT_LIST_HEAD(&fb_info->modelist); - if (fb_info->skip_vt_switch) pm_vt_switch_required(fb_info->device, false); else pm_vt_switch_required(fb_info->device, true); - fb_var_to_videomode(&mode, &fb_info->var); - fb_add_videomode(&mode, &fb_info->modelist); + num_registered_fb++; registered_fb[i] = fb_info; #ifdef CONFIG_GUMSTIX_AM200EPD diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c index 8900f181f195..cfaf9454014d 100644 --- a/drivers/video/fbdev/nvidia/nvidia.c +++ b/drivers/video/fbdev/nvidia/nvidia.c @@ -1484,7 +1484,7 @@ static int nvidiafb_setup(char *options) flatpanel = 1; } else if (!strncmp(this_opt, "hwcur", 5)) { hwcur = 1; - } else if (!strncmp(this_opt, "noaccel", 6)) { + } else if (!strncmp(this_opt, "noaccel", 7)) { noaccel = 1; } else if (!strncmp(this_opt, "noscale", 7)) { noscale = 1; diff --git a/drivers/video/fbdev/via/via-gpio.c b/drivers/video/fbdev/via/via-gpio.c index 9577c2cd52c7..27226a8f3f42 100644 --- a/drivers/video/fbdev/via/via-gpio.c +++ b/drivers/video/fbdev/via/via-gpio.c @@ -81,8 +81,7 @@ struct viafb_gpio_cfg { /* * GPIO access functions */ -static void via_gpio_set(struct gpio_chip *chip, unsigned int nr, - int value) +static int via_gpio_set(struct gpio_chip *chip, unsigned int nr, int value) { struct viafb_gpio_cfg *cfg = gpiochip_get_data(chip); u8 reg; @@ -99,13 +98,14 @@ static void via_gpio_set(struct gpio_chip *chip, unsigned int nr, reg &= ~(0x10 << gpio->vg_mask_shift); via_write_reg(VIASR, gpio->vg_port_index, reg); spin_unlock_irqrestore(&cfg->vdev->reg_lock, flags); + + return 0; } static int via_gpio_dir_out(struct gpio_chip *chip, unsigned int nr, int value) { - via_gpio_set(chip, nr, value); - return 0; + return via_gpio_set(chip, nr, value); } /* @@ -146,7 +146,7 @@ static struct viafb_gpio_cfg viafb_gpio_config = { .label = "VIAFB onboard GPIO", .owner = THIS_MODULE, .direction_output = via_gpio_dir_out, - .set = via_gpio_set, + .set_rv = via_gpio_set, .direction_input = via_gpio_dir_input, .get = via_gpio_get, .base = -1, diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 0d8d37f712e8..0c25b2ed44eb 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -804,6 +804,15 @@ config IMX7ULP_WDT To compile this driver as a module, choose M here: the module will be called imx7ulp_wdt. +config S32G_WDT + tristate "S32G Watchdog" + depends on ARCH_S32 || COMPILE_TEST + select WATCHDOG_CORE + help + This is the driver for the hardware watchdog on the NXP + S32G platforms. If you wish to have watchdog support + enabled, say Y, otherwise say N. + config DB500_WATCHDOG tristate "ST-Ericsson DB800 watchdog" depends on MFD_DB8500_PRCMU @@ -1001,7 +1010,7 @@ config STM32_WATCHDOG tristate "STM32 Independent WatchDoG (IWDG) support" depends on ARCH_STM32 || COMPILE_TEST select WATCHDOG_CORE - default y + default ARCH_STM32 help Say Y here to include support for the watchdog timer in stm32 SoCs. @@ -1363,6 +1372,17 @@ config INTEL_MID_WATCHDOG To compile this driver as a module, choose M here. +config INTEL_OC_WATCHDOG + tristate "Intel OC Watchdog" + depends on (X86 || COMPILE_TEST) && ACPI && HAS_IOPORT + select WATCHDOG_CORE + help + Hardware driver for Intel Over-Clocking watchdog present in + Platform Controller Hub (PCH) chipsets. + + To compile this driver as a module, choose M here: the + module will be called intel_oc_wdt. + config ITCO_WDT tristate "Intel TCO Timer/Watchdog" depends on X86 && PCI @@ -1869,7 +1889,7 @@ config OCTEON_WDT config MARVELL_GTI_WDT tristate "Marvell GTI Watchdog driver" depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT) - default y + default ARCH_THUNDER select WATCHDOG_CORE help Marvell GTI hardware supports watchdog timer. First timeout @@ -2035,7 +2055,7 @@ config 8xxx_WDT config PIKA_WDT tristate "PIKA FPGA Watchdog" depends on WARP || (PPC64 && COMPILE_TEST) - default y + default WARP help This enables the watchdog in the PIKA FPGA. Currently used on the Warp platform. diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index c9482904bf87..bbd4d62d2cc3 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -69,6 +69,7 @@ obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o obj-$(CONFIG_IMX_SC_WDT) += imx_sc_wdt.o obj-$(CONFIG_IMX7ULP_WDT) += imx7ulp_wdt.o +obj-$(CONFIG_S32G_WDT) += s32g_wdt.o obj-$(CONFIG_DB500_WATCHDOG) += db8500_wdt.o obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o @@ -150,6 +151,7 @@ obj-$(CONFIG_W83977F_WDT) += w83977f_wdt.o obj-$(CONFIG_MACHZ_WDT) += machzwd.o obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o obj-$(CONFIG_INTEL_MID_WATCHDOG) += intel-mid_wdt.o +obj-$(CONFIG_INTEL_OC_WATCHDOG) += intel_oc_wdt.o obj-$(CONFIG_INTEL_MEI_WDT) += mei_wdt.o obj-$(CONFIG_NI903X_WDT) += ni903x_wdt.o obj-$(CONFIG_NIC7018_WDT) += nic7018_wdt.o diff --git a/drivers/watchdog/apple_wdt.c b/drivers/watchdog/apple_wdt.c index 95d9e37df41c..66a158f67a71 100644 --- a/drivers/watchdog/apple_wdt.c +++ b/drivers/watchdog/apple_wdt.c @@ -95,9 +95,12 @@ static int apple_wdt_ping(struct watchdog_device *wdd) static int apple_wdt_set_timeout(struct watchdog_device *wdd, unsigned int s) { struct apple_wdt *wdt = to_apple_wdt(wdd); + u32 actual; writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CUR_TIME); - writel_relaxed(wdt->clk_rate * s, wdt->regs + APPLE_WDT_WD1_BITE_TIME); + + actual = min(s, wdd->max_hw_heartbeat_ms / 1000); + writel_relaxed(wdt->clk_rate * actual, wdt->regs + APPLE_WDT_WD1_BITE_TIME); wdd->timeout = s; @@ -177,7 +180,7 @@ static int apple_wdt_probe(struct platform_device *pdev) wdt->wdd.ops = &apple_wdt_ops; wdt->wdd.info = &apple_wdt_info; - wdt->wdd.max_timeout = U32_MAX / wdt->clk_rate; + wdt->wdd.max_hw_heartbeat_ms = U32_MAX / wdt->clk_rate * 1000; wdt->wdd.timeout = APPLE_WDT_TIMEOUT_DEFAULT; wdt_ctrl = readl_relaxed(wdt->regs + APPLE_WDT_WD1_CTRL); diff --git a/drivers/watchdog/arm_smc_wdt.c b/drivers/watchdog/arm_smc_wdt.c index 8f3d0c3a005f..bbba23ace7b8 100644 --- a/drivers/watchdog/arm_smc_wdt.c +++ b/drivers/watchdog/arm_smc_wdt.c @@ -46,6 +46,8 @@ static int smcwd_call(struct watchdog_device *wdd, enum smcwd_call call, return -ENODEV; if (res->a0 == PSCI_RET_INVALID_PARAMS) return -EINVAL; + if (res->a0 == PSCI_RET_DISABLED) + return -ENODATA; if (res->a0 != PSCI_RET_SUCCESS) return -EIO; return 0; @@ -131,10 +133,19 @@ static int smcwd_probe(struct platform_device *pdev) wdd->info = &smcwd_info; /* get_timeleft is optional */ - if (smcwd_call(wdd, SMCWD_GET_TIMELEFT, 0, NULL)) - wdd->ops = &smcwd_ops; - else + err = smcwd_call(wdd, SMCWD_GET_TIMELEFT, 0, NULL); + switch (err) { + case 0: + set_bit(WDOG_HW_RUNNING, &wdd->status); + fallthrough; + case -ENODATA: wdd->ops = &smcwd_timeleft_ops; + break; + default: + wdd->ops = &smcwd_ops; + break; + } + wdd->timeout = res.a2; wdd->max_timeout = res.a2; wdd->min_timeout = res.a1; diff --git a/drivers/watchdog/cros_ec_wdt.c b/drivers/watchdog/cros_ec_wdt.c index 716c23f4388c..9ffe7f505645 100644 --- a/drivers/watchdog/cros_ec_wdt.c +++ b/drivers/watchdog/cros_ec_wdt.c @@ -25,26 +25,22 @@ static int cros_ec_wdt_send_cmd(struct cros_ec_device *cros_ec, union cros_ec_wdt_data *arg) { int ret; - struct { - struct cros_ec_command msg; - union cros_ec_wdt_data data; - } __packed buf = { - .msg = { - .version = 0, - .command = EC_CMD_HANG_DETECT, - .insize = (arg->req.command == EC_HANG_DETECT_CMD_GET_STATUS) ? - sizeof(struct ec_response_hang_detect) : - 0, - .outsize = sizeof(struct ec_params_hang_detect), - }, - .data.req = arg->req - }; - - ret = cros_ec_cmd_xfer_status(cros_ec, &buf.msg); + DEFINE_RAW_FLEX(struct cros_ec_command, msg, data, + sizeof(union cros_ec_wdt_data)); + + msg->version = 0; + msg->command = EC_CMD_HANG_DETECT; + msg->insize = (arg->req.command == EC_HANG_DETECT_CMD_GET_STATUS) ? + sizeof(struct ec_response_hang_detect) : + 0; + msg->outsize = sizeof(struct ec_params_hang_detect); + *(struct ec_params_hang_detect *)msg->data = arg->req; + + ret = cros_ec_cmd_xfer_status(cros_ec, msg); if (ret < 0) return ret; - arg->resp = buf.data.resp; + arg->resp = *(struct ec_response_hang_detect *)msg->data; return 0; } diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c index 77039f2f0be5..afb7887c3a1e 100644 --- a/drivers/watchdog/da9052_wdt.c +++ b/drivers/watchdog/da9052_wdt.c @@ -30,6 +30,18 @@ struct da9052_wdt_data { unsigned long jpast; }; +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, + "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +static int timeout; +module_param(timeout, int, 0); +MODULE_PARM_DESC(timeout, + "Watchdog timeout in seconds. (default = " + __MODULE_STRING(WDT_DEFAULT_TIMEOUT) ")"); + static const struct { u8 reg_val; int time; /* Seconds */ @@ -168,10 +180,13 @@ static int da9052_wdt_probe(struct platform_device *pdev) da9052_wdt = &driver_data->wdt; da9052_wdt->timeout = DA9052_DEF_TIMEOUT; + da9052_wdt->min_hw_heartbeat_ms = DA9052_TWDMIN; da9052_wdt->info = &da9052_wdt_info; da9052_wdt->ops = &da9052_wdt_ops; da9052_wdt->parent = dev; watchdog_set_drvdata(da9052_wdt, driver_data); + watchdog_init_timeout(da9052_wdt, timeout, dev); + watchdog_set_nowayout(da9052_wdt, nowayout); if (da9052->fault_log & DA9052_FAULTLOG_TWDERROR) da9052_wdt->bootstatus |= WDIOF_CARDRESET; @@ -180,11 +195,15 @@ static int da9052_wdt_probe(struct platform_device *pdev) if (da9052->fault_log & DA9052_FAULTLOG_VDDFAULT) da9052_wdt->bootstatus |= WDIOF_POWERUNDER; - ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG, - DA9052_CONTROLD_TWDSCALE, 0); - if (ret < 0) { - dev_err(dev, "Failed to disable watchdog bits, %d\n", ret); + ret = da9052_reg_read(da9052, DA9052_CONTROL_D_REG); + if (ret < 0) return ret; + + /* Check if FW enabled the watchdog */ + if (ret & DA9052_CONTROLD_TWDSCALE) { + /* Ensure proper initialization */ + da9052_wdt_start(da9052_wdt); + set_bit(WDOG_HW_RUNNING, &da9052_wdt->status); } return devm_watchdog_register_device(dev, &driver_data->wdt); diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 7672582fa407..9ab769aa0244 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -58,7 +58,6 @@ #include <linux/platform_device.h> /* For platform_driver framework */ #include <linux/pci.h> /* For pci functions */ #include <linux/ioport.h> /* For io-port access */ -#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */ #include <linux/uaccess.h> /* For copy_to_user/put_user/... */ #include <linux/io.h> /* For inb/outb/... */ #include <linux/platform_data/itco_wdt.h> @@ -102,8 +101,6 @@ struct iTCO_wdt_private { * or memory-mapped PMC register bit 4 (TCO version 3). */ unsigned long __iomem *gcs_pmc; - /* the lock for io operations */ - spinlock_t io_lock; /* the PCI-device */ struct pci_dev *pci_dev; /* whether or not the watchdog has been suspended */ @@ -286,13 +283,10 @@ static int iTCO_wdt_start(struct watchdog_device *wd_dev) struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev); unsigned int val; - spin_lock(&p->io_lock); - iTCO_vendor_pre_start(p->smi_res, wd_dev->timeout); /* disable chipset's NO_REBOOT bit */ if (p->update_no_reboot_bit(p->no_reboot_priv, false)) { - spin_unlock(&p->io_lock); dev_err(wd_dev->parent, "failed to reset NO_REBOOT flag, reboot disabled by hardware/BIOS\n"); return -EIO; } @@ -309,7 +303,6 @@ static int iTCO_wdt_start(struct watchdog_device *wd_dev) val &= 0xf7ff; outw(val, TCO1_CNT(p)); val = inw(TCO1_CNT(p)); - spin_unlock(&p->io_lock); if (val & 0x0800) return -1; @@ -321,8 +314,6 @@ static int iTCO_wdt_stop(struct watchdog_device *wd_dev) struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev); unsigned int val; - spin_lock(&p->io_lock); - iTCO_vendor_pre_stop(p->smi_res); /* Bit 11: TCO Timer Halt -> 1 = The TCO timer is disabled */ @@ -334,8 +325,6 @@ static int iTCO_wdt_stop(struct watchdog_device *wd_dev) /* Set the NO_REBOOT bit to prevent later reboots, just for sure */ p->update_no_reboot_bit(p->no_reboot_priv, true); - spin_unlock(&p->io_lock); - if ((val & 0x0800) == 0) return -1; return 0; @@ -345,8 +334,6 @@ static int iTCO_wdt_ping(struct watchdog_device *wd_dev) { struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev); - spin_lock(&p->io_lock); - /* Reload the timer by writing to the TCO Timer Counter register */ if (p->iTCO_version >= 2) { outw(0x01, TCO_RLD(p)); @@ -358,7 +345,6 @@ static int iTCO_wdt_ping(struct watchdog_device *wd_dev) outb(0x01, TCO_RLD(p)); } - spin_unlock(&p->io_lock); return 0; } @@ -385,24 +371,20 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t) /* Write new heartbeat to watchdog */ if (p->iTCO_version >= 2) { - spin_lock(&p->io_lock); val16 = inw(TCOv2_TMR(p)); val16 &= 0xfc00; val16 |= tmrval; outw(val16, TCOv2_TMR(p)); val16 = inw(TCOv2_TMR(p)); - spin_unlock(&p->io_lock); if ((val16 & 0x3ff) != tmrval) return -EINVAL; } else if (p->iTCO_version == 1) { - spin_lock(&p->io_lock); val8 = inb(TCOv1_TMR(p)); val8 &= 0xc0; val8 |= (tmrval & 0xff); outb(val8, TCOv1_TMR(p)); val8 = inb(TCOv1_TMR(p)); - spin_unlock(&p->io_lock); if ((val8 & 0x3f) != tmrval) return -EINVAL; @@ -421,19 +403,15 @@ static unsigned int iTCO_wdt_get_timeleft(struct watchdog_device *wd_dev) /* read the TCO Timer */ if (p->iTCO_version >= 2) { - spin_lock(&p->io_lock); val16 = inw(TCO_RLD(p)); val16 &= 0x3ff; - spin_unlock(&p->io_lock); time_left = ticks_to_seconds(p, val16); } else if (p->iTCO_version == 1) { - spin_lock(&p->io_lock); val8 = inb(TCO_RLD(p)); val8 &= 0x3f; if (!(inw(TCO1_STS(p)) & 0x0008)) val8 += (inb(TCOv1_TMR(p)) & 0x3f); - spin_unlock(&p->io_lock); time_left = ticks_to_seconds(p, val8); } @@ -493,8 +471,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev) if (!p) return -ENOMEM; - spin_lock_init(&p->io_lock); - p->tco_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_TCO); if (!p->tco_res) return -ENODEV; @@ -604,6 +580,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev) iTCO_wdt_set_timeout(&p->wddev, WATCHDOG_TIMEOUT); dev_info(dev, "timeout value out of range, using %d\n", WATCHDOG_TIMEOUT); + heartbeat = WATCHDOG_TIMEOUT; } watchdog_stop_on_reboot(&p->wddev); diff --git a/drivers/watchdog/intel_oc_wdt.c b/drivers/watchdog/intel_oc_wdt.c new file mode 100644 index 000000000000..7c0551106981 --- /dev/null +++ b/drivers/watchdog/intel_oc_wdt.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel OC Watchdog driver + * + * Copyright (C) 2025, Siemens + * Author: Diogo Ivo <diogo.ivo@siemens.com> + */ + +#define DRV_NAME "intel_oc_wdt" + +#include <linux/acpi.h> +#include <linux/bits.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/platform_device.h> +#include <linux/watchdog.h> + +#define INTEL_OC_WDT_TOV GENMASK(9, 0) +#define INTEL_OC_WDT_MIN_TOV 1 +#define INTEL_OC_WDT_MAX_TOV 1024 +#define INTEL_OC_WDT_DEF_TOV 60 + +/* + * One-time writable lock bit. If set forbids + * modification of itself, _TOV and _EN until + * next reboot. + */ +#define INTEL_OC_WDT_CTL_LCK BIT(12) + +#define INTEL_OC_WDT_EN BIT(14) +#define INTEL_OC_WDT_NO_ICCSURV_STS BIT(24) +#define INTEL_OC_WDT_ICCSURV_STS BIT(25) +#define INTEL_OC_WDT_RLD BIT(31) + +#define INTEL_OC_WDT_STS_BITS (INTEL_OC_WDT_NO_ICCSURV_STS | \ + INTEL_OC_WDT_ICCSURV_STS) + +#define INTEL_OC_WDT_CTRL_REG(wdt) ((wdt)->ctrl_res->start) + +struct intel_oc_wdt { + struct watchdog_device wdd; + struct resource *ctrl_res; + bool locked; +}; + +static int heartbeat; +module_param(heartbeat, uint, 0); +MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. (default=" + __MODULE_STRING(WDT_HEARTBEAT) ")"); + +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +static int intel_oc_wdt_start(struct watchdog_device *wdd) +{ + struct intel_oc_wdt *oc_wdt = watchdog_get_drvdata(wdd); + + if (oc_wdt->locked) + return 0; + + outl(inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)) | INTEL_OC_WDT_EN, + INTEL_OC_WDT_CTRL_REG(oc_wdt)); + + return 0; +} + +static int intel_oc_wdt_stop(struct watchdog_device *wdd) +{ + struct intel_oc_wdt *oc_wdt = watchdog_get_drvdata(wdd); + + outl(inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)) & ~INTEL_OC_WDT_EN, + INTEL_OC_WDT_CTRL_REG(oc_wdt)); + + return 0; +} + +static int intel_oc_wdt_ping(struct watchdog_device *wdd) +{ + struct intel_oc_wdt *oc_wdt = watchdog_get_drvdata(wdd); + + outl(inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)) | INTEL_OC_WDT_RLD, + INTEL_OC_WDT_CTRL_REG(oc_wdt)); + + return 0; +} + +static int intel_oc_wdt_set_timeout(struct watchdog_device *wdd, + unsigned int t) +{ + struct intel_oc_wdt *oc_wdt = watchdog_get_drvdata(wdd); + + outl((inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)) & ~INTEL_OC_WDT_TOV) | (t - 1), + INTEL_OC_WDT_CTRL_REG(oc_wdt)); + + wdd->timeout = t; + + return 0; +} + +static const struct watchdog_info intel_oc_wdt_info = { + .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, + .identity = DRV_NAME, +}; + +static const struct watchdog_ops intel_oc_wdt_ops = { + .owner = THIS_MODULE, + .start = intel_oc_wdt_start, + .stop = intel_oc_wdt_stop, + .ping = intel_oc_wdt_ping, + .set_timeout = intel_oc_wdt_set_timeout, +}; + +static int intel_oc_wdt_setup(struct intel_oc_wdt *oc_wdt) +{ + struct watchdog_info *info; + unsigned long val; + + val = inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)); + + if (val & INTEL_OC_WDT_STS_BITS) + oc_wdt->wdd.bootstatus |= WDIOF_CARDRESET; + + oc_wdt->locked = !!(val & INTEL_OC_WDT_CTL_LCK); + + if (val & INTEL_OC_WDT_EN) { + /* + * No need to issue a ping here to "commit" the new timeout + * value to hardware as the watchdog core schedules one + * immediately when registering the watchdog. + */ + set_bit(WDOG_HW_RUNNING, &oc_wdt->wdd.status); + + if (oc_wdt->locked) { + info = (struct watchdog_info *)&intel_oc_wdt_info; + /* + * Set nowayout unconditionally as we cannot stop + * the watchdog. + */ + nowayout = true; + /* + * If we are locked read the current timeout value + * and inform the core we can't change it. + */ + oc_wdt->wdd.timeout = (val & INTEL_OC_WDT_TOV) + 1; + info->options &= ~WDIOF_SETTIMEOUT; + + dev_info(oc_wdt->wdd.parent, + "Register access locked, heartbeat fixed at: %u s\n", + oc_wdt->wdd.timeout); + } + } else if (oc_wdt->locked) { + /* + * In case the watchdog is disabled and locked there + * is nothing we can do with it so just fail probing. + */ + return -EACCES; + } + + val &= ~INTEL_OC_WDT_TOV; + outl(val | (oc_wdt->wdd.timeout - 1), INTEL_OC_WDT_CTRL_REG(oc_wdt)); + + return 0; +} + +static int intel_oc_wdt_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct intel_oc_wdt *oc_wdt; + struct watchdog_device *wdd; + int ret; + + oc_wdt = devm_kzalloc(&pdev->dev, sizeof(*oc_wdt), GFP_KERNEL); + if (!oc_wdt) + return -ENOMEM; + + oc_wdt->ctrl_res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!oc_wdt->ctrl_res) { + dev_err(&pdev->dev, "missing I/O resource\n"); + return -ENODEV; + } + + if (!devm_request_region(&pdev->dev, oc_wdt->ctrl_res->start, + resource_size(oc_wdt->ctrl_res), pdev->name)) { + dev_err(dev, "resource %pR already in use, device disabled\n", + oc_wdt->ctrl_res); + return -EBUSY; + } + + wdd = &oc_wdt->wdd; + wdd->min_timeout = INTEL_OC_WDT_MIN_TOV; + wdd->max_timeout = INTEL_OC_WDT_MAX_TOV; + wdd->timeout = INTEL_OC_WDT_DEF_TOV; + wdd->info = &intel_oc_wdt_info; + wdd->ops = &intel_oc_wdt_ops; + wdd->parent = dev; + + watchdog_init_timeout(wdd, heartbeat, dev); + + ret = intel_oc_wdt_setup(oc_wdt); + if (ret) + return ret; + + watchdog_set_drvdata(wdd, oc_wdt); + watchdog_set_nowayout(wdd, nowayout); + watchdog_stop_on_reboot(wdd); + watchdog_stop_on_unregister(wdd); + + return devm_watchdog_register_device(dev, wdd); +} + +static const struct acpi_device_id intel_oc_wdt_match[] = { + { "INT3F0D" }, + { "INTC1099" }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, intel_oc_wdt_match); + +static struct platform_driver intel_oc_wdt_platform_driver = { + .driver = { + .name = DRV_NAME, + .acpi_match_table = intel_oc_wdt_match, + }, + .probe = intel_oc_wdt_probe, +}; + +module_platform_driver(intel_oc_wdt_platform_driver); + +MODULE_AUTHOR("Diogo Ivo <diogo.ivo@siemens.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel OC Watchdog driver"); diff --git a/drivers/watchdog/lenovo_se30_wdt.c b/drivers/watchdog/lenovo_se30_wdt.c index 024b842499b3..1c73bb7eeeee 100644 --- a/drivers/watchdog/lenovo_se30_wdt.c +++ b/drivers/watchdog/lenovo_se30_wdt.c @@ -271,6 +271,8 @@ static int lenovo_se30_wdt_probe(struct platform_device *pdev) return -EBUSY; priv->shm_base_addr = devm_ioremap(dev, base_phys, SHM_WIN_SIZE); + if (!priv->shm_base_addr) + return -ENOMEM; priv->wdt_cfg.mod = WDT_MODULE; priv->wdt_cfg.idx = WDT_CFG_INDEX; diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c index 132699e2f247..b636650b714b 100644 --- a/drivers/watchdog/pcwd_usb.c +++ b/drivers/watchdog/pcwd_usb.c @@ -579,7 +579,7 @@ static struct notifier_block usb_pcwd_notifier = { .notifier_call = usb_pcwd_notify_sys, }; -/** +/* * usb_pcwd_delete */ static inline void usb_pcwd_delete(struct usb_pcwd_private *usb_pcwd) @@ -590,7 +590,7 @@ static inline void usb_pcwd_delete(struct usb_pcwd_private *usb_pcwd) kfree(usb_pcwd); } -/** +/* * usb_pcwd_probe * * Called by the usb core when a new device is connected that it thinks @@ -758,7 +758,7 @@ error: } -/** +/* * usb_pcwd_disconnect * * Called by the usb core when the device is removed from the system. diff --git a/drivers/watchdog/pretimeout_noop.c b/drivers/watchdog/pretimeout_noop.c index 4799551dd784..74ec02b9ffca 100644 --- a/drivers/watchdog/pretimeout_noop.c +++ b/drivers/watchdog/pretimeout_noop.c @@ -11,7 +11,7 @@ /** * pretimeout_noop - No operation on watchdog pretimeout event - * @wdd - watchdog_device + * @wdd: watchdog_device * * This function prints a message about pretimeout to kernel log. */ diff --git a/drivers/watchdog/pretimeout_panic.c b/drivers/watchdog/pretimeout_panic.c index 2cc3c41d2be5..8c3ac674dc45 100644 --- a/drivers/watchdog/pretimeout_panic.c +++ b/drivers/watchdog/pretimeout_panic.c @@ -11,7 +11,7 @@ /** * pretimeout_panic - Panic on watchdog pretimeout event - * @wdd - watchdog_device + * @wdd: watchdog_device * * Panic, watchdog has not been fed till pretimeout event. */ diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c index 006f9c61aa64..dfaac5995c84 100644 --- a/drivers/watchdog/qcom-wdt.c +++ b/drivers/watchdog/qcom-wdt.c @@ -181,6 +181,12 @@ static const struct qcom_wdt_match_data match_data_apcs_tmr = { .max_tick_count = 0x10000000U, }; +static const struct qcom_wdt_match_data match_data_ipq5424 = { + .offset = reg_offset_data_kpss, + .pretimeout = true, + .max_tick_count = 0xFFFFFU, +}; + static const struct qcom_wdt_match_data match_data_kpss = { .offset = reg_offset_data_kpss, .pretimeout = true, @@ -322,6 +328,7 @@ static const struct dev_pm_ops qcom_wdt_pm_ops = { }; static const struct of_device_id qcom_wdt_of_table[] = { + { .compatible = "qcom,apss-wdt-ipq5424", .data = &match_data_ipq5424 }, { .compatible = "qcom,kpss-timer", .data = &match_data_apcs_tmr }, { .compatible = "qcom,scss-timer", .data = &match_data_apcs_tmr }, { .compatible = "qcom,kpss-wdt", .data = &match_data_kpss }, diff --git a/drivers/watchdog/s32g_wdt.c b/drivers/watchdog/s32g_wdt.c new file mode 100644 index 000000000000..ad55063060af --- /dev/null +++ b/drivers/watchdog/s32g_wdt.c @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Watchdog driver for S32G SoC + * + * Copyright 2017-2019, 2021-2025 NXP. + * + */ +#include <linux/clk.h> +#include <linux/debugfs.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/watchdog.h> + +#define DRIVER_NAME "s32g-swt" + +#define S32G_SWT_CR(__base) ((__base) + 0x00) /* Control Register offset */ +#define S32G_SWT_CR_SM (BIT(9) | BIT(10)) /* -> Service Mode */ +#define S32G_SWT_CR_STP BIT(2) /* -> Stop Mode Control */ +#define S32G_SWT_CR_FRZ BIT(1) /* -> Debug Mode Control */ +#define S32G_SWT_CR_WEN BIT(0) /* -> Watchdog Enable */ + +#define S32G_SWT_TO(__base) ((__base) + 0x08) /* Timeout Register offset */ + +#define S32G_SWT_SR(__base) ((__base) + 0x10) /* Service Register offset */ +#define S32G_WDT_SEQ1 0xA602 /* -> service sequence number 1 */ +#define S32G_WDT_SEQ2 0xB480 /* -> service sequence number 2 */ + +#define S32G_SWT_CO(__base) ((__base) + 0x14) /* Counter output register */ + +#define S32G_WDT_DEFAULT_TIMEOUT 30 + +struct s32g_wdt_device { + int rate; + void __iomem *base; + struct watchdog_device wdog; +}; + +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +static unsigned int timeout_param = S32G_WDT_DEFAULT_TIMEOUT; +module_param(timeout_param, uint, 0); +MODULE_PARM_DESC(timeout_param, "Watchdog timeout in seconds (default=" + __MODULE_STRING(S32G_WDT_DEFAULT_TIMEOUT) ")"); + +static bool early_enable; +module_param(early_enable, bool, 0); +MODULE_PARM_DESC(early_enable, + "Watchdog is started on module insertion (default=false)"); + +static const struct watchdog_info s32g_wdt_info = { + .identity = "s32g watchdog", + .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | + WDIOC_GETTIMEOUT | WDIOC_GETTIMELEFT, +}; + +static struct s32g_wdt_device *wdd_to_s32g_wdt(struct watchdog_device *wdd) +{ + return container_of(wdd, struct s32g_wdt_device, wdog); +} + +static unsigned int wdog_sec_to_count(struct s32g_wdt_device *wdev, unsigned int timeout) +{ + return wdev->rate * timeout; +} + +static int s32g_wdt_ping(struct watchdog_device *wdog) +{ + struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog); + + writel(S32G_WDT_SEQ1, S32G_SWT_SR(wdev->base)); + writel(S32G_WDT_SEQ2, S32G_SWT_SR(wdev->base)); + + return 0; +} + +static int s32g_wdt_start(struct watchdog_device *wdog) +{ + struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog); + unsigned long val; + + val = readl(S32G_SWT_CR(wdev->base)); + + val |= S32G_SWT_CR_WEN; + + writel(val, S32G_SWT_CR(wdev->base)); + + return 0; +} + +static int s32g_wdt_stop(struct watchdog_device *wdog) +{ + struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog); + unsigned long val; + + val = readl(S32G_SWT_CR(wdev->base)); + + val &= ~S32G_SWT_CR_WEN; + + writel(val, S32G_SWT_CR(wdev->base)); + + return 0; +} + +static int s32g_wdt_set_timeout(struct watchdog_device *wdog, unsigned int timeout) +{ + struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog); + + writel(wdog_sec_to_count(wdev, timeout), S32G_SWT_TO(wdev->base)); + + wdog->timeout = timeout; + + /* + * Conforming to the documentation, the timeout counter is + * loaded when servicing is operated (aka ping) or when the + * counter is enabled. In case the watchdog is already started + * it must be stopped and started again to update the timeout + * register or a ping can be sent to refresh the counter. Here + * we choose to send a ping to the watchdog which is harmless + * if the watchdog is stopped. + */ + return s32g_wdt_ping(wdog); +} + +static unsigned int s32g_wdt_get_timeleft(struct watchdog_device *wdog) +{ + struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog); + unsigned long counter; + bool is_running; + + /* + * The counter output can be read only if the SWT is + * disabled. Given the latency between the internal counter + * and the counter output update, there can be very small + * difference. However, we can accept this matter of fact + * given the resolution is a second based unit for the output. + */ + is_running = watchdog_hw_running(wdog); + + if (is_running) + s32g_wdt_stop(wdog); + + counter = readl(S32G_SWT_CO(wdev->base)); + + if (is_running) + s32g_wdt_start(wdog); + + return counter / wdev->rate; +} + +static const struct watchdog_ops s32g_wdt_ops = { + .owner = THIS_MODULE, + .start = s32g_wdt_start, + .stop = s32g_wdt_stop, + .ping = s32g_wdt_ping, + .set_timeout = s32g_wdt_set_timeout, + .get_timeleft = s32g_wdt_get_timeleft, +}; + +static void s32g_wdt_init(struct s32g_wdt_device *wdev) +{ + unsigned long val; + + /* Set the watchdog's Time-Out value */ + val = wdog_sec_to_count(wdev, wdev->wdog.timeout); + + writel(val, S32G_SWT_TO(wdev->base)); + + /* + * Get the control register content. We are at init time, the + * watchdog should not be started. + */ + val = readl(S32G_SWT_CR(wdev->base)); + + /* + * We want to allow the watchdog timer to be stopped when + * device enters debug mode. + */ + val |= S32G_SWT_CR_FRZ; + + /* + * However, when the CPU is in WFI or suspend mode, the + * watchdog must continue. The documentation refers it as the + * stopped mode. + */ + val &= ~S32G_SWT_CR_STP; + + /* + * Use Fixed Service Sequence to ping the watchdog which is + * 0x00 configuration value for the service mode. It should be + * already set because it is the default value but we reset it + * in case. + */ + val &= ~S32G_SWT_CR_SM; + + writel(val, S32G_SWT_CR(wdev->base)); + + /* + * When the 'early_enable' option is set, we start the + * watchdog from the kernel. + */ + if (early_enable) { + s32g_wdt_start(&wdev->wdog); + set_bit(WDOG_HW_RUNNING, &wdev->wdog.status); + } +} + +static int s32g_wdt_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct clk *clk; + struct s32g_wdt_device *wdev; + struct watchdog_device *wdog; + int ret; + + wdev = devm_kzalloc(dev, sizeof(*wdev), GFP_KERNEL); + if (!wdev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + wdev->base = devm_ioremap_resource(dev, res); + if (IS_ERR(wdev->base)) + return dev_err_probe(&pdev->dev, PTR_ERR(wdev->base), "Can not get resource\n"); + + clk = devm_clk_get_enabled(dev, "counter"); + if (IS_ERR(clk)) + return dev_err_probe(dev, PTR_ERR(clk), "Can't get Watchdog clock\n"); + + wdev->rate = clk_get_rate(clk); + if (!wdev->rate) { + dev_err(dev, "Input clock rate is not valid\n"); + return -EINVAL; + } + + wdog = &wdev->wdog; + wdog->info = &s32g_wdt_info; + wdog->ops = &s32g_wdt_ops; + + /* + * The code converts the timeout into a counter a value, if + * the value is less than 0x100, then it is clamped by the SWT + * module, so it is safe to specify a zero value as the + * minimum timeout. + */ + wdog->min_timeout = 0; + + /* + * The counter register is a 32 bits long, so the maximum + * counter value is UINT_MAX and the timeout in second is the + * value divided by the rate. + * + * For instance, a rate of 51MHz lead to 84 seconds maximum + * timeout. + */ + wdog->max_timeout = UINT_MAX / wdev->rate; + + /* + * The module param and the DT 'timeout-sec' property will + * override the default value if they are specified. + */ + ret = watchdog_init_timeout(wdog, timeout_param, dev); + if (ret) + return ret; + + /* + * As soon as the watchdog is started, there is no way to stop + * it if the 'nowayout' option is set at boot time + */ + watchdog_set_nowayout(wdog, nowayout); + + /* + * The devm_ version of the watchdog_register_device() + * function will call watchdog_unregister_device() when the + * device is removed. + */ + watchdog_stop_on_unregister(wdog); + + s32g_wdt_init(wdev); + + ret = devm_watchdog_register_device(dev, wdog); + if (ret) + return dev_err_probe(dev, ret, "Cannot register watchdog device\n"); + + dev_info(dev, "S32G Watchdog Timer Registered, timeout=%ds, nowayout=%d, early_enable=%d\n", + wdog->timeout, nowayout, early_enable); + + return 0; +} + +static const struct of_device_id s32g_wdt_dt_ids[] = { + { .compatible = "nxp,s32g2-swt" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, s32g_wdt_dt_ids); + +static struct platform_driver s32g_wdt_driver = { + .probe = s32g_wdt_probe, + .driver = { + .name = DRIVER_NAME, + .of_match_table = s32g_wdt_dt_ids, + }, +}; + +module_platform_driver(s32g_wdt_driver); + +MODULE_AUTHOR("Daniel Lezcano <daniel.lezcano@linaro.org>"); +MODULE_DESCRIPTION("Watchdog driver for S32G SoC"); +MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index bdd81d8074b2..40901bdac426 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c @@ -82,6 +82,10 @@ #define GS_CLUSTER2_NONCPU_INT_EN 0x1644 #define GS_RST_STAT_REG_OFFSET 0x3B44 +#define EXYNOS990_CLUSTER2_NONCPU_OUT 0x1620 +#define EXYNOS990_CLUSTER2_NONCPU_INT_EN 0x1644 +#define EXYNOS990_CLUSTER2_WDTRESET_BIT 23 + /** * DOC: Quirk flags for different Samsung watchdog IP-cores * @@ -259,6 +263,32 @@ static const struct s3c2410_wdt_variant drv_data_exynos850_cl1 = { QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN, }; +static const struct s3c2410_wdt_variant drv_data_exynos990_cl0 = { + .mask_reset_reg = GS_CLUSTER0_NONCPU_INT_EN, + .mask_bit = 2, + .mask_reset_inv = true, + .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET, + .rst_stat_bit = EXYNOS850_CLUSTER0_WDTRESET_BIT, + .cnt_en_reg = EXYNOSAUTOV920_CLUSTER0_NONCPU_OUT, + .cnt_en_bit = 7, + .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | + QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN | + QUIRK_HAS_DBGACK_BIT, +}; + +static const struct s3c2410_wdt_variant drv_data_exynos990_cl2 = { + .mask_reset_reg = EXYNOS990_CLUSTER2_NONCPU_INT_EN, + .mask_bit = 2, + .mask_reset_inv = true, + .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET, + .rst_stat_bit = EXYNOS990_CLUSTER2_WDTRESET_BIT, + .cnt_en_reg = EXYNOS990_CLUSTER2_NONCPU_OUT, + .cnt_en_bit = 7, + .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | + QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN | + QUIRK_HAS_DBGACK_BIT, +}; + static const struct s3c2410_wdt_variant drv_data_exynosautov9_cl0 = { .mask_reset_reg = EXYNOS850_CLUSTER0_NONCPU_INT_EN, .mask_bit = 2, @@ -350,6 +380,8 @@ static const struct of_device_id s3c2410_wdt_match[] = { .data = &drv_data_exynos7 }, { .compatible = "samsung,exynos850-wdt", .data = &drv_data_exynos850_cl0 }, + { .compatible = "samsung,exynos990-wdt", + .data = &drv_data_exynos990_cl0 }, { .compatible = "samsung,exynosautov9-wdt", .data = &drv_data_exynosautov9_cl0 }, { .compatible = "samsung,exynosautov920-wdt", @@ -678,7 +710,8 @@ s3c2410_get_wdt_drv_data(struct platform_device *pdev, struct s3c2410_wdt *wdt) if (variant == &drv_data_exynos850_cl0 || variant == &drv_data_exynosautov9_cl0 || variant == &drv_data_gs101_cl0 || - variant == &drv_data_exynosautov920_cl0) { + variant == &drv_data_exynosautov920_cl0 || + variant == &drv_data_exynos990_cl0) { u32 index; int err; @@ -700,6 +733,10 @@ s3c2410_get_wdt_drv_data(struct platform_device *pdev, struct s3c2410_wdt *wdt) else if (variant == &drv_data_exynosautov920_cl0) variant = &drv_data_exynosautov920_cl1; break; + case 2: + if (variant == &drv_data_exynos990_cl0) + variant = &drv_data_exynos990_cl2; + break; default: return dev_err_probe(dev, -EINVAL, "wrong cluster index: %u\n", index); } diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c index 8ad06b54c5ad..b356a272ff9a 100644 --- a/drivers/watchdog/stm32_iwdg.c +++ b/drivers/watchdog/stm32_iwdg.c @@ -291,7 +291,7 @@ static int stm32_iwdg_irq_init(struct platform_device *pdev, return 0; if (of_property_read_bool(np, "wakeup-source")) { - ret = device_init_wakeup(dev, true); + ret = devm_device_init_wakeup(dev); if (ret) return ret; diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c index dc5f29560e9b..3918a600f2a0 100644 --- a/drivers/watchdog/wdt_pci.c +++ b/drivers/watchdog/wdt_pci.c @@ -264,7 +264,7 @@ static int wdtpci_get_status(int *status) return 0; } -/** +/* * wdtpci_get_temperature: * * Reports the temperature in degrees Fahrenheit. The API is in |