diff options
Diffstat (limited to 'drivers')
123 files changed, 4682 insertions, 3030 deletions
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c index 1687483ff319..76a856c32c4d 100644 --- a/drivers/acpi/irq.c +++ b/drivers/acpi/irq.c @@ -12,7 +12,7 @@ enum acpi_irq_model_id acpi_irq_model; -static struct fwnode_handle *(*acpi_get_gsi_domain_id)(u32 gsi); +static acpi_gsi_domain_disp_fn acpi_get_gsi_domain_id; static u32 (*acpi_gsi_to_irq_fallback)(u32 gsi); /** @@ -307,12 +307,24 @@ EXPORT_SYMBOL_GPL(acpi_irq_get); * for a given GSI */ void __init acpi_set_irq_model(enum acpi_irq_model_id model, - struct fwnode_handle *(*fn)(u32)) + acpi_gsi_domain_disp_fn fn) { acpi_irq_model = model; acpi_get_gsi_domain_id = fn; } +/* + * acpi_get_gsi_dispatcher() - Get the GSI dispatcher function + * + * Return the dispatcher function that computes the domain fwnode for + * a given GSI. + */ +acpi_gsi_domain_disp_fn acpi_get_gsi_dispatcher(void) +{ + return acpi_get_gsi_domain_id; +} +EXPORT_SYMBOL_GPL(acpi_get_gsi_dispatcher); + /** * acpi_set_gsi_to_irq_fallback - Register a GSI transfer * callback to fallback to arch specified implementation. diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c index 0a725e46d017..53816dfab645 100644 --- a/drivers/acpi/numa/srat.c +++ b/drivers/acpi/numa/srat.c @@ -14,6 +14,7 @@ #include <linux/errno.h> #include <linux/acpi.h> #include <linux/memblock.h> +#include <linux/memory.h> #include <linux/numa.h> #include <linux/nodemask.h> #include <linux/topology.h> @@ -429,13 +430,23 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header, { struct acpi_cedt_cfmws *cfmws; int *fake_pxm = arg; - u64 start, end; + u64 start, end, align; int node; + int err; cfmws = (struct acpi_cedt_cfmws *)header; start = cfmws->base_hpa; end = cfmws->base_hpa + cfmws->window_size; + /* Align memblock size to CFMW regions if possible */ + align = 1UL << __ffs(start | end); + if (align >= SZ_256M) { + err = memory_block_advise_max_size(align); + if (err) + pr_warn("CFMWS: memblock size advise failed (%d)\n", err); + } else + pr_err("CFMWS: [BIOS BUG] base/size alignment violates spec\n"); + /* * The SRAT may have already described NUMA details for all, * or a portion of, this CFMWS HPA range. Extend the memblks @@ -453,7 +464,7 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header, return -EINVAL; } - if (numa_add_memblk(node, start, end) < 0) { + if (numa_add_reserved_memblk(node, start, end) < 0) { /* CXL driver must handle the NUMA_NO_NODE case */ pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n", node, start, end); diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 19469e7f88c2..ed3e69dc785c 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -110,6 +110,57 @@ static void memory_block_release(struct device *dev) kfree(mem); } + +/* Max block size to be set by memory_block_advise_max_size */ +static unsigned long memory_block_advised_size; +static bool memory_block_advised_size_queried; + +/** + * memory_block_advise_max_size() - advise memory hotplug on the max suggested + * block size, usually for alignment. + * @size: suggestion for maximum block size. must be aligned on power of 2. + * + * Early boot software (pre-allocator init) may advise archs on the max block + * size. This value can only decrease after initialization, as the intent is + * to identify the largest supported alignment for all sources. + * + * Use of this value is arch-defined, as is min/max block size. + * + * Return: 0 on success + * -EINVAL if size is 0 or not pow2 aligned + * -EBUSY if value has already been probed + */ +int __init memory_block_advise_max_size(unsigned long size) +{ + if (!size || !is_power_of_2(size)) + return -EINVAL; + + if (memory_block_advised_size_queried) + return -EBUSY; + + if (memory_block_advised_size) + memory_block_advised_size = min(memory_block_advised_size, size); + else + memory_block_advised_size = size; + + return 0; +} + +/** + * memory_block_advised_max_size() - query advised max hotplug block size. + * + * After the first call, the value can never change. Callers looking for the + * actual block size should use memory_block_size_bytes. This interface is + * intended for use by arch-init when initializing the hotplug block size. + * + * Return: advised size in bytes, or 0 if never set. + */ +unsigned long memory_block_advised_max_size(void) +{ + memory_block_advised_size_queried = true; + return memory_block_advised_size; +} + unsigned long __weak memory_block_size_bytes(void) { return MIN_MEMORY_BLOCK_SIZE; diff --git a/drivers/base/node.c b/drivers/base/node.c index 618712071a1e..c19094481630 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -7,6 +7,7 @@ #include <linux/init.h> #include <linux/mm.h> #include <linux/memory.h> +#include <linux/mempolicy.h> #include <linux/vmstat.h> #include <linux/notifier.h> #include <linux/node.h> @@ -214,6 +215,14 @@ void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord, break; } } + + /* When setting CPU access coordinates, update mempolicy */ + if (access == ACCESS_COORDINATE_CPU) { + if (mempolicy_set_node_perf(nid, coord)) { + pr_info("failed to set mempolicy attrs for node %d\n", + nid); + } + } } EXPORT_SYMBOL_GPL(node_set_perf_attrs); diff --git a/drivers/block/zram/backend_deflate.c b/drivers/block/zram/backend_deflate.c index 0f7f252c12f4..b75016e0e654 100644 --- a/drivers/block/zram/backend_deflate.c +++ b/drivers/block/zram/backend_deflate.c @@ -8,7 +8,7 @@ #include "backend_deflate.h" /* Use the same value as crypto API */ -#define DEFLATE_DEF_WINBITS 11 +#define DEFLATE_DEF_WINBITS (-11) #define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL struct deflate_ctx { @@ -22,8 +22,10 @@ static void deflate_release_params(struct zcomp_params *params) static int deflate_setup_params(struct zcomp_params *params) { - if (params->level == ZCOMP_PARAM_NO_LEVEL) + if (params->level == ZCOMP_PARAM_NOT_SET) params->level = Z_DEFAULT_COMPRESSION; + if (params->deflate.winbits == ZCOMP_PARAM_NOT_SET) + params->deflate.winbits = DEFLATE_DEF_WINBITS; return 0; } @@ -57,13 +59,13 @@ static int deflate_create(struct zcomp_params *params, struct zcomp_ctx *ctx) return -ENOMEM; ctx->context = zctx; - sz = zlib_deflate_workspacesize(-DEFLATE_DEF_WINBITS, MAX_MEM_LEVEL); + sz = zlib_deflate_workspacesize(params->deflate.winbits, MAX_MEM_LEVEL); zctx->cctx.workspace = vzalloc(sz); if (!zctx->cctx.workspace) goto error; ret = zlib_deflateInit2(&zctx->cctx, params->level, Z_DEFLATED, - -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL, + params->deflate.winbits, DEFLATE_DEF_MEMLEVEL, Z_DEFAULT_STRATEGY); if (ret != Z_OK) goto error; @@ -73,7 +75,7 @@ static int deflate_create(struct zcomp_params *params, struct zcomp_ctx *ctx) if (!zctx->dctx.workspace) goto error; - ret = zlib_inflateInit2(&zctx->dctx, -DEFLATE_DEF_WINBITS); + ret = zlib_inflateInit2(&zctx->dctx, params->deflate.winbits); if (ret != Z_OK) goto error; diff --git a/drivers/block/zram/backend_lz4.c b/drivers/block/zram/backend_lz4.c index 847f3334eb38..daccd60857eb 100644 --- a/drivers/block/zram/backend_lz4.c +++ b/drivers/block/zram/backend_lz4.c @@ -18,7 +18,7 @@ static void lz4_release_params(struct zcomp_params *params) static int lz4_setup_params(struct zcomp_params *params) { - if (params->level == ZCOMP_PARAM_NO_LEVEL) + if (params->level == ZCOMP_PARAM_NOT_SET) params->level = LZ4_ACCELERATION_DEFAULT; return 0; diff --git a/drivers/block/zram/backend_lz4hc.c b/drivers/block/zram/backend_lz4hc.c index 5f37d5abcaeb..9e8a35dfa56d 100644 --- a/drivers/block/zram/backend_lz4hc.c +++ b/drivers/block/zram/backend_lz4hc.c @@ -18,7 +18,7 @@ static void lz4hc_release_params(struct zcomp_params *params) static int lz4hc_setup_params(struct zcomp_params *params) { - if (params->level == ZCOMP_PARAM_NO_LEVEL) + if (params->level == ZCOMP_PARAM_NOT_SET) params->level = LZ4HC_DEFAULT_CLEVEL; return 0; diff --git a/drivers/block/zram/backend_zstd.c b/drivers/block/zram/backend_zstd.c index 22c8067536f3..81defb98ed09 100644 --- a/drivers/block/zram/backend_zstd.c +++ b/drivers/block/zram/backend_zstd.c @@ -58,7 +58,7 @@ static int zstd_setup_params(struct zcomp_params *params) return -ENOMEM; params->drv_data = zp; - if (params->level == ZCOMP_PARAM_NO_LEVEL) + if (params->level == ZCOMP_PARAM_NOT_SET) params->level = zstd_default_clevel(); zp->cprm = zstd_get_params(params->level, PAGE_SIZE); diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h index 25339ed1e07e..4acffe671a5e 100644 --- a/drivers/block/zram/zcomp.h +++ b/drivers/block/zram/zcomp.h @@ -5,7 +5,11 @@ #include <linux/mutex.h> -#define ZCOMP_PARAM_NO_LEVEL INT_MIN +#define ZCOMP_PARAM_NOT_SET INT_MIN + +struct deflate_params { + s32 winbits; +}; /* * Immutable driver (backend) parameters. The driver may attach private @@ -17,6 +21,9 @@ struct zcomp_params { void *dict; size_t dict_sz; s32 level; + union { + struct deflate_params deflate; + }; void *drv_data; }; diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index fda7d8624889..54c57103715f 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -734,114 +734,19 @@ static void read_from_bdev_async(struct zram *zram, struct page *page, submit_bio(bio); } -#define PAGE_WB_SIG "page_index=" - -#define PAGE_WRITEBACK 0 -#define HUGE_WRITEBACK (1<<0) -#define IDLE_WRITEBACK (1<<1) -#define INCOMPRESSIBLE_WRITEBACK (1<<2) - -static int scan_slots_for_writeback(struct zram *zram, u32 mode, - unsigned long nr_pages, - unsigned long index, - struct zram_pp_ctl *ctl) -{ - for (; nr_pages != 0; index++, nr_pages--) { - bool ok = true; - - zram_slot_lock(zram, index); - if (!zram_allocated(zram, index)) - goto next; - - if (zram_test_flag(zram, index, ZRAM_WB) || - zram_test_flag(zram, index, ZRAM_SAME)) - goto next; - - if (mode & IDLE_WRITEBACK && - !zram_test_flag(zram, index, ZRAM_IDLE)) - goto next; - if (mode & HUGE_WRITEBACK && - !zram_test_flag(zram, index, ZRAM_HUGE)) - goto next; - if (mode & INCOMPRESSIBLE_WRITEBACK && - !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) - goto next; - - ok = place_pp_slot(zram, ctl, index); -next: - zram_slot_unlock(zram, index); - if (!ok) - break; - } - - return 0; -} - -static ssize_t writeback_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) +static int zram_writeback_slots(struct zram *zram, struct zram_pp_ctl *ctl) { - struct zram *zram = dev_to_zram(dev); - unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; - struct zram_pp_ctl *ctl = NULL; + unsigned long blk_idx = 0; + struct page *page = NULL; struct zram_pp_slot *pps; - unsigned long index = 0; - struct bio bio; struct bio_vec bio_vec; - struct page *page = NULL; - ssize_t ret = len; - int mode, err; - unsigned long blk_idx = 0; - - if (sysfs_streq(buf, "idle")) - mode = IDLE_WRITEBACK; - else if (sysfs_streq(buf, "huge")) - mode = HUGE_WRITEBACK; - else if (sysfs_streq(buf, "huge_idle")) - mode = IDLE_WRITEBACK | HUGE_WRITEBACK; - else if (sysfs_streq(buf, "incompressible")) - mode = INCOMPRESSIBLE_WRITEBACK; - else { - if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1)) - return -EINVAL; - - if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) || - index >= nr_pages) - return -EINVAL; - - nr_pages = 1; - mode = PAGE_WRITEBACK; - } - - down_read(&zram->init_lock); - if (!init_done(zram)) { - ret = -EINVAL; - goto release_init_lock; - } - - /* Do not permit concurrent post-processing actions. */ - if (atomic_xchg(&zram->pp_in_progress, 1)) { - up_read(&zram->init_lock); - return -EAGAIN; - } - - if (!zram->backing_dev) { - ret = -ENODEV; - goto release_init_lock; - } + struct bio bio; + int ret = 0, err; + u32 index; page = alloc_page(GFP_KERNEL); - if (!page) { - ret = -ENOMEM; - goto release_init_lock; - } - - ctl = init_pp_ctl(); - if (!ctl) { - ret = -ENOMEM; - goto release_init_lock; - } - - scan_slots_for_writeback(zram, mode, nr_pages, index, ctl); + if (!page) + return -ENOMEM; while ((pps = select_pp_slot(ctl))) { spin_lock(&zram->wb_limit_lock); @@ -929,10 +834,215 @@ next: if (blk_idx) free_block_bdev(zram, blk_idx); - -release_init_lock: if (page) __free_page(page); + + return ret; +} + +#define PAGE_WRITEBACK 0 +#define HUGE_WRITEBACK (1 << 0) +#define IDLE_WRITEBACK (1 << 1) +#define INCOMPRESSIBLE_WRITEBACK (1 << 2) + +static int parse_page_index(char *val, unsigned long nr_pages, + unsigned long *lo, unsigned long *hi) +{ + int ret; + + ret = kstrtoul(val, 10, lo); + if (ret) + return ret; + if (*lo >= nr_pages) + return -ERANGE; + *hi = *lo + 1; + return 0; +} + +static int parse_page_indexes(char *val, unsigned long nr_pages, + unsigned long *lo, unsigned long *hi) +{ + char *delim; + int ret; + + delim = strchr(val, '-'); + if (!delim) + return -EINVAL; + + *delim = 0x00; + ret = kstrtoul(val, 10, lo); + if (ret) + return ret; + if (*lo >= nr_pages) + return -ERANGE; + + ret = kstrtoul(delim + 1, 10, hi); + if (ret) + return ret; + if (*hi >= nr_pages || *lo > *hi) + return -ERANGE; + *hi += 1; + return 0; +} + +static int parse_mode(char *val, u32 *mode) +{ + *mode = 0; + + if (!strcmp(val, "idle")) + *mode = IDLE_WRITEBACK; + if (!strcmp(val, "huge")) + *mode = HUGE_WRITEBACK; + if (!strcmp(val, "huge_idle")) + *mode = IDLE_WRITEBACK | HUGE_WRITEBACK; + if (!strcmp(val, "incompressible")) + *mode = INCOMPRESSIBLE_WRITEBACK; + + if (*mode == 0) + return -EINVAL; + return 0; +} + +static int scan_slots_for_writeback(struct zram *zram, u32 mode, + unsigned long lo, unsigned long hi, + struct zram_pp_ctl *ctl) +{ + u32 index = lo; + + while (index < hi) { + bool ok = true; + + zram_slot_lock(zram, index); + if (!zram_allocated(zram, index)) + goto next; + + if (zram_test_flag(zram, index, ZRAM_WB) || + zram_test_flag(zram, index, ZRAM_SAME)) + goto next; + + if (mode & IDLE_WRITEBACK && + !zram_test_flag(zram, index, ZRAM_IDLE)) + goto next; + if (mode & HUGE_WRITEBACK && + !zram_test_flag(zram, index, ZRAM_HUGE)) + goto next; + if (mode & INCOMPRESSIBLE_WRITEBACK && + !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) + goto next; + + ok = place_pp_slot(zram, ctl, index); +next: + zram_slot_unlock(zram, index); + if (!ok) + break; + index++; + } + + return 0; +} + +static ssize_t writeback_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct zram *zram = dev_to_zram(dev); + u64 nr_pages = zram->disksize >> PAGE_SHIFT; + unsigned long lo = 0, hi = nr_pages; + struct zram_pp_ctl *ctl = NULL; + char *args, *param, *val; + ssize_t ret = len; + int err, mode = 0; + + down_read(&zram->init_lock); + if (!init_done(zram)) { + up_read(&zram->init_lock); + return -EINVAL; + } + + /* Do not permit concurrent post-processing actions. */ + if (atomic_xchg(&zram->pp_in_progress, 1)) { + up_read(&zram->init_lock); + return -EAGAIN; + } + + if (!zram->backing_dev) { + ret = -ENODEV; + goto release_init_lock; + } + + ctl = init_pp_ctl(); + if (!ctl) { + ret = -ENOMEM; + goto release_init_lock; + } + + args = skip_spaces(buf); + while (*args) { + args = next_arg(args, ¶m, &val); + + /* + * Workaround to support the old writeback interface. + * + * The old writeback interface has a minor inconsistency and + * requires key=value only for page_index parameter, while the + * writeback mode is a valueless parameter. + * + * This is not the case anymore and now all parameters are + * required to have values, however, we need to support the + * legacy writeback interface format so we check if we can + * recognize a valueless parameter as the (legacy) writeback + * mode. + */ + if (!val || !*val) { + err = parse_mode(param, &mode); + if (err) { + ret = err; + goto release_init_lock; + } + + scan_slots_for_writeback(zram, mode, lo, hi, ctl); + break; + } + + if (!strcmp(param, "type")) { + err = parse_mode(val, &mode); + if (err) { + ret = err; + goto release_init_lock; + } + + scan_slots_for_writeback(zram, mode, lo, hi, ctl); + break; + } + + if (!strcmp(param, "page_index")) { + err = parse_page_index(val, nr_pages, &lo, &hi); + if (err) { + ret = err; + goto release_init_lock; + } + + scan_slots_for_writeback(zram, mode, lo, hi, ctl); + continue; + } + + if (!strcmp(param, "page_indexes")) { + err = parse_page_indexes(val, nr_pages, &lo, &hi); + if (err) { + ret = err; + goto release_init_lock; + } + + scan_slots_for_writeback(zram, mode, lo, hi, ctl); + continue; + } + } + + err = zram_writeback_slots(zram, ctl); + if (err) + ret = err; + +release_init_lock: release_pp_ctl(zram, ctl); atomic_set(&zram->pp_in_progress, 0); up_read(&zram->init_lock); @@ -1166,13 +1276,15 @@ static void comp_params_reset(struct zram *zram, u32 prio) struct zcomp_params *params = &zram->params[prio]; vfree(params->dict); - params->level = ZCOMP_PARAM_NO_LEVEL; + params->level = ZCOMP_PARAM_NOT_SET; + params->deflate.winbits = ZCOMP_PARAM_NOT_SET; params->dict_sz = 0; params->dict = NULL; } static int comp_params_store(struct zram *zram, u32 prio, s32 level, - const char *dict_path) + const char *dict_path, + struct deflate_params *deflate_params) { ssize_t sz = 0; @@ -1190,6 +1302,7 @@ static int comp_params_store(struct zram *zram, u32 prio, s32 level, zram->params[prio].dict_sz = sz; zram->params[prio].level = level; + zram->params[prio].deflate.winbits = deflate_params->winbits; return 0; } @@ -1198,11 +1311,14 @@ static ssize_t algorithm_params_store(struct device *dev, const char *buf, size_t len) { - s32 prio = ZRAM_PRIMARY_COMP, level = ZCOMP_PARAM_NO_LEVEL; + s32 prio = ZRAM_PRIMARY_COMP, level = ZCOMP_PARAM_NOT_SET; char *args, *param, *val, *algo = NULL, *dict_path = NULL; + struct deflate_params deflate_params; struct zram *zram = dev_to_zram(dev); int ret; + deflate_params.winbits = ZCOMP_PARAM_NOT_SET; + args = skip_spaces(buf); while (*args) { args = next_arg(args, ¶m, &val); @@ -1233,6 +1349,13 @@ static ssize_t algorithm_params_store(struct device *dev, dict_path = val; continue; } + + if (!strcmp(param, "deflate.winbits")) { + ret = kstrtoint(val, 10, &deflate_params.winbits); + if (ret) + return ret; + continue; + } } /* Lookup priority by algorithm name */ @@ -1254,7 +1377,7 @@ static ssize_t algorithm_params_store(struct device *dev, if (prio < ZRAM_PRIMARY_COMP || prio >= ZRAM_MAX_COMPS) return -EINVAL; - ret = comp_params_store(zram, prio, level, dict_path); + ret = comp_params_store(zram, prio, level, dict_path, &deflate_params); return ret ? ret : len; } @@ -1694,7 +1817,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page, */ handle = zs_malloc(zram->mem_pool, PAGE_SIZE, GFP_NOIO | __GFP_NOWARN | - __GFP_HIGHMEM | __GFP_MOVABLE); + __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page)); if (IS_ERR_VALUE(handle)) return PTR_ERR((void *)handle); @@ -1761,7 +1884,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) handle = zs_malloc(zram->mem_pool, comp_len, GFP_NOIO | __GFP_NOWARN | - __GFP_HIGHMEM | __GFP_MOVABLE); + __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page)); if (IS_ERR_VALUE(handle)) { zcomp_stream_put(zstrm); return PTR_ERR((void *)handle); @@ -1981,10 +2104,15 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, * We are holding per-CPU stream mutex and entry lock so better * avoid direct reclaim. Allocation error is not fatal since * we still have the old object in the mem_pool. + * + * XXX: technically, the node we really want here is the node that holds + * the original compressed data. But that would require us to modify + * zsmalloc API to return this information. For now, we will make do with + * the node of the page allocated for recompression. */ handle_new = zs_malloc(zram->mem_pool, comp_len_new, GFP_NOIO | __GFP_NOWARN | - __GFP_HIGHMEM | __GFP_MOVABLE); + __GFP_HIGHMEM | __GFP_MOVABLE, page_to_nid(page)); if (IS_ERR_VALUE(handle_new)) { zcomp_stream_put(zstrm); return PTR_ERR((void *)handle_new); diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 4e3ba6e68c32..f7512b4e923e 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -482,7 +482,7 @@ static void check_supported_cpu(void *_rc) cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); if ((edx & P_STATE_TRANSITION_CAPABLE) != P_STATE_TRANSITION_CAPABLE) { - pr_info("Power state transitions not supported\n"); + pr_info_once("Power state transitions not supported\n"); return; } *rc = 0; diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c index e97d47f42ee2..584c70a34b52 100644 --- a/drivers/dax/kmem.c +++ b/drivers/dax/kmem.c @@ -13,6 +13,7 @@ #include <linux/mman.h> #include <linux/memory-tiers.h> #include <linux/memory_hotplug.h> +#include <linux/string_helpers.h> #include "dax-private.h" #include "bus.h" @@ -68,7 +69,7 @@ static void kmem_put_memory_types(void) static int dev_dax_kmem_probe(struct dev_dax *dev_dax) { struct device *dev = &dev_dax->dev; - unsigned long total_len = 0; + unsigned long total_len = 0, orig_len = 0; struct dax_kmem_data *data; struct memory_dev_type *mtype; int i, rc, mapped = 0; @@ -97,6 +98,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax) for (i = 0; i < dev_dax->nr_range; i++) { struct range range; + orig_len += range_len(&dev_dax->ranges[i].range); rc = dax_kmem_range(dev_dax, i, &range); if (rc) { dev_info(dev, "mapping%d: %#llx-%#llx too small after alignment\n", @@ -109,6 +111,12 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax) if (!total_len) { dev_warn(dev, "rejecting DAX region without any memory after alignment\n"); return -EINVAL; + } else if (total_len != orig_len) { + char buf[16]; + + string_get_size(orig_len - total_len, 1, STRING_UNITS_2, + buf, sizeof(buf)); + dev_warn(dev, "DAX region truncated by %s due to alignment\n", buf); } init_node_memory_type(numa_node, mtype); diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index 20333608b983..cae52c654a15 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -1746,9 +1746,9 @@ altr_edac_a10_device_trig(struct file *file, const char __user *user_buf, local_irq_save(flags); if (trig_type == ALTR_UE_TRIGGER_CHAR) - writel(priv->ue_set_mask, set_addr); + writew(priv->ue_set_mask, set_addr); else - writel(priv->ce_set_mask, set_addr); + writew(priv->ce_set_mask, set_addr); /* Ensure the interrupt test bits are set */ wmb(); @@ -1778,7 +1778,7 @@ altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf, local_irq_save(flags); if (trig_type == ALTR_UE_TRIGGER_CHAR) { - writel(priv->ue_set_mask, set_addr); + writew(priv->ue_set_mask, set_addr); } else { /* Setup read/write of 4 bytes */ writel(ECC_WORD_WRITE, drvdata->base + ECC_BLK_DBYTECTRL_OFST); diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c index a123c05cbc9e..49e1de83d2e8 100644 --- a/drivers/firmware/smccc/kvm_guest.c +++ b/drivers/firmware/smccc/kvm_guest.c @@ -17,17 +17,11 @@ static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_afte void __init kvm_init_hyp_services(void) { + uuid_t kvm_uuid = ARM_SMCCC_VENDOR_HYP_UID_KVM; struct arm_smccc_res res; u32 val[4]; - if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_HVC) - return; - - arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res); - if (res.a0 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 || - res.a1 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 || - res.a2 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 || - res.a3 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3) + if (!arm_smccc_hypervisor_has_uuid(&kvm_uuid)) return; memset(&res, 0, sizeof(res)); diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c index a74600d9f2d7..cd65b434dc6e 100644 --- a/drivers/firmware/smccc/smccc.c +++ b/drivers/firmware/smccc/smccc.c @@ -67,6 +67,23 @@ s32 arm_smccc_get_soc_id_revision(void) } EXPORT_SYMBOL_GPL(arm_smccc_get_soc_id_revision); +bool arm_smccc_hypervisor_has_uuid(const uuid_t *hyp_uuid) +{ + struct arm_smccc_res res = {}; + uuid_t uuid; + + if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_HVC) + return false; + + arm_smccc_1_1_hvc(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res); + if (res.a0 == SMCCC_RET_NOT_SUPPORTED) + return false; + + uuid = smccc_res_to_uuid(res.a0, res.a1, res.a2, res.a3); + return uuid_equal(&uuid, hyp_uuid); +} +EXPORT_SYMBOL_GPL(arm_smccc_hypervisor_has_uuid); + static int __init smccc_devices_init(void) { struct platform_device *pdev; diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c index 76e2801619f0..c33bd3d83069 100644 --- a/drivers/gpu/drm/i915/i915_mm.c +++ b/drivers/gpu/drm/i915/i915_mm.c @@ -100,7 +100,7 @@ int remap_io_mapping(struct vm_area_struct *vma, GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); - /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ + /* We rely on prevalidation of the io-mapping to skip pfnmap tracking. */ r.mm = vma->vm_mm; r.pfn = pfn; r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) | @@ -140,7 +140,7 @@ int remap_io_sg(struct vm_area_struct *vma, }; int err; - /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ + /* We rely on prevalidation of the io-mapping to skip pfnmap tracking. */ GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); while (offset >= r.sgt.max >> PAGE_SHIFT) { diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h index 94a8f902689e..bfe98cb9a038 100644 --- a/drivers/gpu/drm/i915/i915_reg_defs.h +++ b/drivers/gpu/drm/i915/i915_reg_defs.h @@ -9,76 +9,19 @@ #include <linux/bitfield.h> #include <linux/bits.h> -/** - * REG_BIT() - Prepare a u32 bit value - * @__n: 0-based bit number - * - * Local wrapper for BIT() to force u32, with compile time checks. - * - * @return: Value with bit @__n set. - */ -#define REG_BIT(__n) \ - ((u32)(BIT(__n) + \ - BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \ - ((__n) < 0 || (__n) > 31)))) - -/** - * REG_BIT8() - Prepare a u8 bit value - * @__n: 0-based bit number - * - * Local wrapper for BIT() to force u8, with compile time checks. - * - * @return: Value with bit @__n set. - */ -#define REG_BIT8(__n) \ - ((u8)(BIT(__n) + \ - BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \ - ((__n) < 0 || (__n) > 7)))) - -/** - * REG_GENMASK() - Prepare a continuous u32 bitmask - * @__high: 0-based high bit - * @__low: 0-based low bit - * - * Local wrapper for GENMASK() to force u32, with compile time checks. - * - * @return: Continuous bitmask from @__high to @__low, inclusive. - */ -#define REG_GENMASK(__high, __low) \ - ((u32)(GENMASK(__high, __low) + \ - BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \ - __is_constexpr(__low) && \ - ((__low) < 0 || (__high) > 31 || (__low) > (__high))))) - -/** - * REG_GENMASK64() - Prepare a continuous u64 bitmask - * @__high: 0-based high bit - * @__low: 0-based low bit - * - * Local wrapper for GENMASK_ULL() to force u64, with compile time checks. - * - * @return: Continuous bitmask from @__high to @__low, inclusive. +/* + * Wrappers over the generic fixed width BIT_U*() and GENMASK_U*() + * implementations, for compatibility reasons with previous implementation. */ -#define REG_GENMASK64(__high, __low) \ - ((u64)(GENMASK_ULL(__high, __low) + \ - BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \ - __is_constexpr(__low) && \ - ((__low) < 0 || (__high) > 63 || (__low) > (__high))))) +#define REG_GENMASK(high, low) GENMASK_U32(high, low) +#define REG_GENMASK64(high, low) GENMASK_U64(high, low) +#define REG_GENMASK16(high, low) GENMASK_U16(high, low) +#define REG_GENMASK8(high, low) GENMASK_U8(high, low) -/** - * REG_GENMASK8() - Prepare a continuous u8 bitmask - * @__high: 0-based high bit - * @__low: 0-based low bit - * - * Local wrapper for GENMASK() to force u8, with compile time checks. - * - * @return: Continuous bitmask from @__high to @__low, inclusive. - */ -#define REG_GENMASK8(__high, __low) \ - ((u8)(GENMASK(__high, __low) + \ - BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \ - __is_constexpr(__low) && \ - ((__low) < 0 || (__high) > 7 || (__low) > (__high))))) +#define REG_BIT(n) BIT_U32(n) +#define REG_BIT64(n) BIT_U64(n) +#define REG_BIT16(n) BIT_U16(n) +#define REG_BIT8(n) BIT_U8(n) /* * Local integer constant expression version of is_power_of_2(). @@ -143,35 +86,6 @@ */ #define REG_FIELD_GET64(__mask, __val) ((u64)FIELD_GET(__mask, __val)) -/** - * REG_BIT16() - Prepare a u16 bit value - * @__n: 0-based bit number - * - * Local wrapper for BIT() to force u16, with compile time - * checks. - * - * @return: Value with bit @__n set. - */ -#define REG_BIT16(__n) \ - ((u16)(BIT(__n) + \ - BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \ - ((__n) < 0 || (__n) > 15)))) - -/** - * REG_GENMASK16() - Prepare a continuous u8 bitmask - * @__high: 0-based high bit - * @__low: 0-based low bit - * - * Local wrapper for GENMASK() to force u16, with compile time - * checks. - * - * @return: Continuous bitmask from @__high to @__low, inclusive. - */ -#define REG_GENMASK16(__high, __low) \ - ((u16)(GENMASK(__high, __low) + \ - BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \ - __is_constexpr(__low) && \ - ((__low) < 0 || (__high) > 15 || (__low) > (__high))))) /** * REG_FIELD_PREP16() - Prepare a u16 bitfield value diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c index 210a25afe82b..d92ae6b6100f 100644 --- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c +++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2020 Caleb Connolly <caleb@connolly.tech> +/* Copyright (c) 2020 Casey Connolly <casey.connolly@linaro.org> * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree: * Copyright (c) 2020, The Linux Foundation. All rights reserved. */ @@ -260,6 +260,6 @@ static struct mipi_dsi_driver sofef00_panel_driver = { module_mipi_dsi_driver(sofef00_panel_driver); -MODULE_AUTHOR("Caleb Connolly <caleb@connolly.tech>"); +MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>"); MODULE_DESCRIPTION("DRM driver for Samsung AMOLED DSI panels found in OnePlus 6/6T phones"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig index 6c1416167bd2..1cd188b73b74 100644 --- a/drivers/hv/Kconfig +++ b/drivers/hv/Kconfig @@ -5,17 +5,18 @@ menu "Microsoft Hyper-V guest support" config HYPERV tristate "Microsoft Hyper-V client drivers" depends on (X86 && X86_LOCAL_APIC && HYPERVISOR_GUEST) \ - || (ACPI && ARM64 && !CPU_BIG_ENDIAN) + || (ARM64 && !CPU_BIG_ENDIAN) select PARAVIRT select X86_HV_CALLBACK_VECTOR if X86 select OF_EARLY_FLATTREE if OF + select SYSFB if !HYPERV_VTL_MODE help Select this option to run Linux as a Hyper-V client operating system. config HYPERV_VTL_MODE bool "Enable Linux to boot in VTL context" - depends on X86_64 && HYPERV + depends on (X86_64 || ARM64) && HYPERV depends on SMP default n help @@ -31,7 +32,7 @@ config HYPERV_VTL_MODE Select this option to build a Linux kernel to run at a VTL other than the normal VTL0, which currently is only VTL2. This option - initializes the x86 platform for VTL2, and adds the ability to boot + initializes the kernel to run in VTL2, and adds the ability to boot secondary CPUs directly into 64-bit context as required for VTLs other than 0. A kernel built with this option must run at VTL2, and will not run as a normal guest. diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index 8351360bba16..be490c598785 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -207,10 +207,19 @@ int vmbus_connect(void) mutex_init(&vmbus_connection.channel_mutex); /* + * The following Hyper-V interrupt and monitor pages can be used by + * UIO for mapping to user-space, so they should always be allocated on + * system page boundaries. The system page size must be >= the Hyper-V + * page size. + */ + BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE); + + /* * Setup the vmbus event connection for channel interrupt * abstraction stuff */ - vmbus_connection.int_page = hv_alloc_hyperv_zeroed_page(); + vmbus_connection.int_page = + (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); if (vmbus_connection.int_page == NULL) { ret = -ENOMEM; goto cleanup; @@ -225,8 +234,8 @@ int vmbus_connect(void) * Setup the monitor notification facility. The 1st page for * parent->child and the 2nd page for child->parent */ - vmbus_connection.monitor_pages[0] = hv_alloc_hyperv_page(); - vmbus_connection.monitor_pages[1] = hv_alloc_hyperv_page(); + vmbus_connection.monitor_pages[0] = (void *)__get_free_page(GFP_KERNEL); + vmbus_connection.monitor_pages[1] = (void *)__get_free_page(GFP_KERNEL); if ((vmbus_connection.monitor_pages[0] == NULL) || (vmbus_connection.monitor_pages[1] == NULL)) { ret = -ENOMEM; @@ -342,21 +351,23 @@ void vmbus_disconnect(void) destroy_workqueue(vmbus_connection.work_queue); if (vmbus_connection.int_page) { - hv_free_hyperv_page(vmbus_connection.int_page); + free_page((unsigned long)vmbus_connection.int_page); vmbus_connection.int_page = NULL; } if (vmbus_connection.monitor_pages[0]) { if (!set_memory_encrypted( (unsigned long)vmbus_connection.monitor_pages[0], 1)) - hv_free_hyperv_page(vmbus_connection.monitor_pages[0]); + free_page((unsigned long) + vmbus_connection.monitor_pages[0]); vmbus_connection.monitor_pages[0] = NULL; } if (vmbus_connection.monitor_pages[1]) { if (!set_memory_encrypted( (unsigned long)vmbus_connection.monitor_pages[1], 1)) - hv_free_hyperv_page(vmbus_connection.monitor_pages[1]); + free_page((unsigned long) + vmbus_connection.monitor_pages[1]); vmbus_connection.monitor_pages[1] = NULL; } } diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c index 59792e00cecf..49898d10faff 100644 --- a/drivers/hv/hv_common.c +++ b/drivers/hv/hv_common.c @@ -105,45 +105,6 @@ void __init hv_common_free(void) hv_synic_eventring_tail = NULL; } -/* - * Functions for allocating and freeing memory with size and - * alignment HV_HYP_PAGE_SIZE. These functions are needed because - * the guest page size may not be the same as the Hyper-V page - * size. We depend upon kmalloc() aligning power-of-two size - * allocations to the allocation size boundary, so that the - * allocated memory appears to Hyper-V as a page of the size - * it expects. - */ - -void *hv_alloc_hyperv_page(void) -{ - BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE); - - if (PAGE_SIZE == HV_HYP_PAGE_SIZE) - return (void *)__get_free_page(GFP_KERNEL); - else - return kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL); -} -EXPORT_SYMBOL_GPL(hv_alloc_hyperv_page); - -void *hv_alloc_hyperv_zeroed_page(void) -{ - if (PAGE_SIZE == HV_HYP_PAGE_SIZE) - return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); - else - return kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL); -} -EXPORT_SYMBOL_GPL(hv_alloc_hyperv_zeroed_page); - -void hv_free_hyperv_page(void *addr) -{ - if (PAGE_SIZE == HV_HYP_PAGE_SIZE) - free_page((unsigned long)addr); - else - kfree(addr); -} -EXPORT_SYMBOL_GPL(hv_free_hyperv_page); - static void *hv_panic_page; /* @@ -272,7 +233,7 @@ static void hv_kmsg_dump_unregister(void) atomic_notifier_chain_unregister(&panic_notifier_list, &hyperv_panic_report_block); - hv_free_hyperv_page(hv_panic_page); + kfree(hv_panic_page); hv_panic_page = NULL; } @@ -280,7 +241,7 @@ static void hv_kmsg_dump_register(void) { int ret; - hv_panic_page = hv_alloc_hyperv_zeroed_page(); + hv_panic_page = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL); if (!hv_panic_page) { pr_err("Hyper-V: panic message page memory allocation failed\n"); return; @@ -289,7 +250,7 @@ static void hv_kmsg_dump_register(void) ret = kmsg_dump_register(&hv_kmsg_dumper); if (ret) { pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret); - hv_free_hyperv_page(hv_panic_page); + kfree(hv_panic_page); hv_panic_page = NULL; } } @@ -317,6 +278,37 @@ void __init hv_get_partition_id(void) pr_err("Hyper-V: failed to get partition ID: %#x\n", hv_result(status)); } +#if IS_ENABLED(CONFIG_HYPERV_VTL_MODE) +u8 __init get_vtl(void) +{ + u64 control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_REGISTERS; + struct hv_input_get_vp_registers *input; + struct hv_output_get_vp_registers *output; + unsigned long flags; + u64 ret; + + local_irq_save(flags); + input = *this_cpu_ptr(hyperv_pcpu_input_arg); + output = *this_cpu_ptr(hyperv_pcpu_output_arg); + + memset(input, 0, struct_size(input, names, 1)); + input->partition_id = HV_PARTITION_ID_SELF; + input->vp_index = HV_VP_INDEX_SELF; + input->input_vtl.as_uint8 = 0; + input->names[0] = HV_REGISTER_VSM_VP_STATUS; + + ret = hv_do_hypercall(control, input, output); + if (hv_result_success(ret)) { + ret = output->values[0].reg8 & HV_VTL_MASK; + } else { + pr_err("Failed to get VTL(error: %lld) exiting...\n", ret); + BUG(); + } + + local_irq_restore(flags); + return ret; +} +#endif int __init hv_common_init(void) { diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index d74adb5bba44..33b524b4eb5e 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -45,7 +45,8 @@ struct vmbus_dynid { struct hv_vmbus_device_id id; }; -static struct device *hv_dev; +/* VMBus Root Device */ +static struct device *vmbus_root_device; static int hyperv_cpuhp_online; @@ -80,9 +81,15 @@ static struct resource *fb_mmio; static struct resource *hyperv_mmio; static DEFINE_MUTEX(hyperv_mmio_lock); +struct device *hv_get_vmbus_root_device(void) +{ + return vmbus_root_device; +} +EXPORT_SYMBOL_GPL(hv_get_vmbus_root_device); + static int vmbus_exists(void) { - if (hv_dev == NULL) + if (vmbus_root_device == NULL) return -ENODEV; return 0; @@ -707,7 +714,30 @@ static const struct hv_vmbus_device_id *hv_vmbus_get_id(const struct hv_driver * return id; } -/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */ +/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices + * + * This function can race with vmbus_device_register(). This function is + * typically running on a user thread in response to writing to the "new_id" + * sysfs entry for a driver. vmbus_device_register() is running on a + * workqueue thread in response to the Hyper-V host offering a device to the + * guest. This function calls driver_attach(), which looks for an existing + * device matching the new id, and attaches the driver to which the new id + * has been assigned. vmbus_device_register() calls device_register(), which + * looks for a driver that matches the device being registered. If both + * operations are running simultaneously, the device driver probe function runs + * on whichever thread establishes the linkage between the driver and device. + * + * In most cases, it doesn't matter which thread runs the driver probe + * function. But if vmbus_device_register() does not find a matching driver, + * it proceeds to create the "channels" subdirectory and numbered per-channel + * subdirectory in sysfs. While that multi-step creation is in progress, this + * function could run the driver probe function. If the probe function checks + * for, or operates on, entries in the "channels" subdirectory, including by + * calling hv_create_ring_sysfs(), the operation may or may not succeed + * depending on the race. The race can't create a kernel failure in VMBus + * or device subsystem code, but probe functions in VMBus drivers doing such + * operations must be prepared for the failure case. + */ static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid) { struct vmbus_dynid *dynid; @@ -861,7 +891,7 @@ static int vmbus_dma_configure(struct device *child_device) * On x86/x64 coherence is assumed and these calls have no effect. */ hv_setup_dma_ops(child_device, - device_get_dma_attr(hv_dev) == DEV_DMA_COHERENT); + device_get_dma_attr(vmbus_root_device) == DEV_DMA_COHERENT); return 0; } @@ -1921,7 +1951,8 @@ static const struct kobj_type vmbus_chan_ktype = { * ring for userspace to use. * Note: Race conditions can happen with userspace and it is not encouraged to create new * use-cases for this. This was added to maintain backward compatibility, while solving - * one of the race conditions in uio_hv_generic while creating sysfs. + * one of the race conditions in uio_hv_generic while creating sysfs. See comments with + * vmbus_add_dynid() and vmbus_device_register(). * * Returns 0 on success or error code on failure. */ @@ -2037,7 +2068,7 @@ int vmbus_device_register(struct hv_device *child_device_obj) &child_device_obj->channel->offermsg.offer.if_instance); child_device_obj->device.bus = &hv_bus; - child_device_obj->device.parent = hv_dev; + child_device_obj->device.parent = vmbus_root_device; child_device_obj->device.release = vmbus_device_release; child_device_obj->device.dma_parms = &child_device_obj->dma_parms; @@ -2055,6 +2086,20 @@ int vmbus_device_register(struct hv_device *child_device_obj) return ret; } + /* + * If device_register() found a driver to assign to the device, the + * driver's probe function has already run at this point. If that + * probe function accesses or operates on the "channels" subdirectory + * in sysfs, those operations will have failed because the "channels" + * subdirectory doesn't exist until the code below runs. Or if the + * probe function creates a /dev entry, a user space program could + * find and open the /dev entry, and then create a race by accessing + * the "channels" subdirectory while the creation steps are in progress + * here. The race can't result in a kernel failure, but the user space + * program may get an error in accessing "channels" or its + * subdirectories. See also comments with vmbus_add_dynid() about a + * related race condition. + */ child_device_obj->channels_kset = kset_create_and_add("channels", NULL, kobj); if (!child_device_obj->channels_kset) { @@ -2412,7 +2457,7 @@ static int vmbus_acpi_add(struct platform_device *pdev) struct acpi_device *ancestor; struct acpi_device *device = ACPI_COMPANION(&pdev->dev); - hv_dev = &device->dev; + vmbus_root_device = &device->dev; /* * Older versions of Hyper-V for ARM64 fail to include the _CCA @@ -2465,6 +2510,31 @@ static int vmbus_acpi_add(struct platform_device *pdev) } #endif +static int vmbus_set_irq(struct platform_device *pdev) +{ + struct irq_data *data; + int irq; + irq_hw_number_t hwirq; + + irq = platform_get_irq(pdev, 0); + /* platform_get_irq() may not return 0. */ + if (irq < 0) + return irq; + + data = irq_get_irq_data(irq); + if (!data) { + pr_err("No interrupt data for VMBus virq %d\n", irq); + return -ENODEV; + } + hwirq = irqd_to_hwirq(data); + + vmbus_irq = irq; + vmbus_interrupt = hwirq; + pr_debug("VMBus virq %d, hwirq %d\n", vmbus_irq, vmbus_interrupt); + + return 0; +} + static int vmbus_device_add(struct platform_device *pdev) { struct resource **cur_res = &hyperv_mmio; @@ -2473,12 +2543,17 @@ static int vmbus_device_add(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; int ret; - hv_dev = &pdev->dev; + vmbus_root_device = &pdev->dev; ret = of_range_parser_init(&parser, np); if (ret) return ret; + if (!__is_defined(HYPERVISOR_CALLBACK_VECTOR)) + ret = vmbus_set_irq(pdev); + if (ret) + return ret; + for_each_of_range(&parser, &range) { struct resource *res; @@ -2786,7 +2861,7 @@ static int __init hv_acpi_init(void) if (ret) return ret; - if (!hv_dev) { + if (!vmbus_root_device) { ret = -ENODEV; goto cleanup; } @@ -2817,7 +2892,7 @@ static int __init hv_acpi_init(void) cleanup: platform_driver_unregister(&vmbus_platform_driver); - hv_dev = NULL; + vmbus_root_device = NULL; return ret; } diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig index 77da199c7413..7b30db3253af 100644 --- a/drivers/i3c/master/Kconfig +++ b/drivers/i3c/master/Kconfig @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only config CDNS_I3C_MASTER tristate "Cadence I3C master driver" - depends on I3C depends on HAS_IOMEM depends on !(ALPHA || PARISC) help @@ -9,7 +8,6 @@ config CDNS_I3C_MASTER config DW_I3C_MASTER tristate "Synospsys DesignWare I3C master driver" - depends on I3C depends on HAS_IOMEM depends on !(ALPHA || PARISC) # ALPHA and PARISC needs {read,write}sl() @@ -38,7 +36,6 @@ config AST2600_I3C_MASTER config SVC_I3C_MASTER tristate "Silvaco I3C Dual-Role Master driver" - depends on I3C depends on HAS_IOMEM depends on !(ALPHA || PARISC) help @@ -46,7 +43,6 @@ config SVC_I3C_MASTER config MIPI_I3C_HCI tristate "MIPI I3C Host Controller Interface driver (EXPERIMENTAL)" - depends on I3C depends on HAS_IOMEM help Support for hardware following the MIPI Aliance's I3C Host Controller diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c index a71226d7ca59..bc4538694540 100644 --- a/drivers/i3c/master/mipi-i3c-hci/core.c +++ b/drivers/i3c/master/mipi-i3c-hci/core.c @@ -78,7 +78,7 @@ #define INTR_SIGNAL_ENABLE 0x28 #define INTR_FORCE 0x2c #define INTR_HC_CMD_SEQ_UFLOW_STAT BIT(12) /* Cmd Sequence Underflow */ -#define INTR_HC_RESET_CANCEL BIT(11) /* HC Cancelled Reset */ +#define INTR_HC_SEQ_CANCEL BIT(11) /* HC Cancelled Transaction Sequence */ #define INTR_HC_INTERNAL_ERR BIT(10) /* HC Internal Error */ #define DAT_SECTION 0x30 /* Device Address Table */ @@ -590,26 +590,27 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id) u32 val; val = reg_read(INTR_STATUS); + reg_write(INTR_STATUS, val); DBG("INTR_STATUS = %#x", val); - if (val) { - reg_write(INTR_STATUS, val); - } + if (val) + result = IRQ_HANDLED; - if (val & INTR_HC_RESET_CANCEL) { - DBG("cancelled reset"); - val &= ~INTR_HC_RESET_CANCEL; + if (val & INTR_HC_SEQ_CANCEL) { + dev_dbg(&hci->master.dev, + "Host Controller Cancelled Transaction Sequence\n"); + val &= ~INTR_HC_SEQ_CANCEL; } if (val & INTR_HC_INTERNAL_ERR) { dev_err(&hci->master.dev, "Host Controller Internal Error\n"); val &= ~INTR_HC_INTERNAL_ERR; } - hci->io->irq_handler(hci); - if (val) - dev_err(&hci->master.dev, "unexpected INTR_STATUS %#x\n", val); - else + dev_warn_once(&hci->master.dev, + "unexpected INTR_STATUS %#x\n", val); + + if (hci->io->irq_handler(hci)) result = IRQ_HANDLED; return result; @@ -699,9 +700,14 @@ static int i3c_hci_init(struct i3c_hci *hci) if (ret) return -ENXIO; - /* Disable all interrupts and allow all signal updates */ + /* Disable all interrupts */ reg_write(INTR_SIGNAL_ENABLE, 0x0); - reg_write(INTR_STATUS_ENABLE, 0xffffffff); + /* + * Only allow bit 31:10 signal updates because + * Bit 0:9 are reserved in IP version >= 0.8 + * Bit 0:5 are defined in IP version < 0.8 but not handled by PIO code + */ + reg_write(INTR_STATUS_ENABLE, GENMASK(31, 10)); /* Make sure our data ordering fits the host's */ regval = reg_read(HC_CONTROL); diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c index 85e16de208d3..7e1a7cb94b43 100644 --- a/drivers/i3c/master/svc-i3c-master.c +++ b/drivers/i3c/master/svc-i3c-master.c @@ -201,11 +201,10 @@ struct svc_i3c_drvdata { * @addrs: Array containing the dynamic addresses of each attached device * @descs: Array of descriptors, one per attached device * @hj_work: Hot-join work - * @ibi_work: IBI work * @irq: Main interrupt - * @pclk: System clock + * @num_clks: I3C clock number * @fclk: Fast clock (bus) - * @sclk: Slow clock (other events) + * @clks: I3C clock array * @xferqueue: Transfer queue structure * @xferqueue.list: List member * @xferqueue.cur: Current ongoing transfer @@ -229,11 +228,10 @@ struct svc_i3c_master { u8 addrs[SVC_I3C_MAX_DEVS]; struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS]; struct work_struct hj_work; - struct work_struct ibi_work; int irq; - struct clk *pclk; + int num_clks; struct clk *fclk; - struct clk *sclk; + struct clk_bulk_data *clks; struct { struct list_head list; struct svc_i3c_xfer *cur; @@ -487,9 +485,8 @@ static int svc_i3c_master_handle_ibi_won(struct svc_i3c_master *master, u32 msta return ret; } -static void svc_i3c_master_ibi_work(struct work_struct *work) +static void svc_i3c_master_ibi_isr(struct svc_i3c_master *master) { - struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work); struct svc_i3c_i2c_dev_data *data; unsigned int ibitype, ibiaddr; struct i3c_dev_desc *dev; @@ -504,7 +501,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) * schedule during the whole I3C transaction, otherwise, the I3C bus timeout may happen if * any irq or schedule happen during transaction. */ - guard(spinlock_irqsave)(&master->xferqueue.lock); + guard(spinlock)(&master->xferqueue.lock); /* * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing @@ -530,7 +527,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) if (ret) { dev_err(master->dev, "Timeout when polling for IBIWON\n"); svc_i3c_master_emit_stop(master); - goto reenable_ibis; + return; } status = readl(master->regs + SVC_I3C_MSTATUS); @@ -574,17 +571,17 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) svc_i3c_master_emit_stop(master); - goto reenable_ibis; + return; } /* Handle the non critical tasks */ switch (ibitype) { case SVC_I3C_MSTATUS_IBITYPE_IBI: + svc_i3c_master_emit_stop(master); if (dev) { i3c_master_queue_ibi(dev, master->ibi.tbq_slot); master->ibi.tbq_slot = NULL; } - svc_i3c_master_emit_stop(master); break; case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN: svc_i3c_master_emit_stop(master); @@ -597,9 +594,6 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) default: break; } - -reenable_ibis: - svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART); } static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id) @@ -618,10 +612,12 @@ static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id) !SVC_I3C_MSTATUS_STATE_SLVREQ(active)) return IRQ_HANDLED; - svc_i3c_master_disable_interrupts(master); - - /* Handle the interrupt in a non atomic context */ - queue_work(master->base.wq, &master->ibi_work); + /* + * The SDA line remains low until the request is processed. + * Receive the request in the interrupt context to respond promptly + * and restore the bus to idle state. + */ + svc_i3c_master_ibi_isr(master); return IRQ_HANDLED; } @@ -1281,9 +1277,9 @@ static int svc_i3c_master_write(struct svc_i3c_master *master, static int svc_i3c_master_xfer(struct svc_i3c_master *master, bool rnw, unsigned int xfer_type, u8 addr, u8 *in, const u8 *out, unsigned int xfer_len, - unsigned int *actual_len, bool continued) + unsigned int *actual_len, bool continued, bool repeat_start) { - int retry = 2; + int retry = repeat_start ? 1 : 2; u32 reg; int ret; @@ -1468,7 +1464,7 @@ static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master) ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type, cmd->addr, cmd->in, cmd->out, cmd->len, &cmd->actual_len, - cmd->continued); + cmd->continued, i > 0); /* cmd->xfer is NULL if I2C or CCC transfer */ if (cmd->xfer) cmd->xfer->actual_len = cmd->actual_len; @@ -1875,42 +1871,11 @@ static const struct i3c_master_controller_ops svc_i3c_master_ops = { .set_speed = svc_i3c_master_set_speed, }; -static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master) -{ - int ret = 0; - - ret = clk_prepare_enable(master->pclk); - if (ret) - return ret; - - ret = clk_prepare_enable(master->fclk); - if (ret) { - clk_disable_unprepare(master->pclk); - return ret; - } - - ret = clk_prepare_enable(master->sclk); - if (ret) { - clk_disable_unprepare(master->pclk); - clk_disable_unprepare(master->fclk); - return ret; - } - - return 0; -} - -static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master) -{ - clk_disable_unprepare(master->pclk); - clk_disable_unprepare(master->fclk); - clk_disable_unprepare(master->sclk); -} - static int svc_i3c_master_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct svc_i3c_master *master; - int ret; + int ret, i; master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL); if (!master) @@ -1924,30 +1889,33 @@ static int svc_i3c_master_probe(struct platform_device *pdev) if (IS_ERR(master->regs)) return PTR_ERR(master->regs); - master->pclk = devm_clk_get(dev, "pclk"); - if (IS_ERR(master->pclk)) - return PTR_ERR(master->pclk); + master->num_clks = devm_clk_bulk_get_all(dev, &master->clks); + if (master->num_clks < 0) + return dev_err_probe(dev, -EINVAL, "can't get I3C clocks\n"); - master->fclk = devm_clk_get(dev, "fast_clk"); + for (i = 0; i < master->num_clks; i++) { + if (!strcmp(master->clks[i].id, "fast_clk")) + break; + } + + if (i == master->num_clks) + return dev_err_probe(dev, -EINVAL, + "can't get I3C peripheral clock\n"); + + master->fclk = master->clks[i].clk; if (IS_ERR(master->fclk)) return PTR_ERR(master->fclk); - master->sclk = devm_clk_get(dev, "slow_clk"); - if (IS_ERR(master->sclk)) - return PTR_ERR(master->sclk); - master->irq = platform_get_irq(pdev, 0); if (master->irq < 0) return master->irq; master->dev = dev; - - ret = svc_i3c_master_prepare_clks(master); + ret = clk_bulk_prepare_enable(master->num_clks, master->clks); if (ret) - return ret; + return dev_err_probe(dev, ret, "can't enable I3C clocks\n"); INIT_WORK(&master->hj_work, svc_i3c_master_hj_work); - INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work); mutex_init(&master->lock); ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler, @@ -1998,7 +1966,7 @@ rpm_disable: pm_runtime_set_suspended(&pdev->dev); err_disable_clks: - svc_i3c_master_unprepare_clks(master); + clk_bulk_disable_unprepare(master->num_clks, master->clks); return ret; } @@ -2036,7 +2004,7 @@ static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev) struct svc_i3c_master *master = dev_get_drvdata(dev); svc_i3c_save_regs(master); - svc_i3c_master_unprepare_clks(master); + clk_bulk_disable_unprepare(master->num_clks, master->clks); pinctrl_pm_select_sleep_state(dev); return 0; @@ -2045,9 +2013,12 @@ static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev) static int __maybe_unused svc_i3c_runtime_resume(struct device *dev) { struct svc_i3c_master *master = dev_get_drvdata(dev); + int ret; pinctrl_pm_select_default_state(dev); - svc_i3c_master_prepare_clks(master); + ret = clk_bulk_prepare_enable(master->num_clks, master->clks); + if (ret) + return ret; svc_i3c_restore_regs(master); diff --git a/drivers/iio/adc/qcom-spmi-rradc.c b/drivers/iio/adc/qcom-spmi-rradc.c index 63ebaf13ef19..f61ad0510f04 100644 --- a/drivers/iio/adc/qcom-spmi-rradc.c +++ b/drivers/iio/adc/qcom-spmi-rradc.c @@ -2,7 +2,7 @@ /* * Copyright (c) 2016-2017, 2019, The Linux Foundation. All rights reserved. * Copyright (c) 2022 Linaro Limited. - * Author: Caleb Connolly <caleb.connolly@linaro.org> + * Author: Casey Connolly <casey.connolly@linaro.org> * * This driver is for the Round Robin ADC found in the pmi8998 and pm660 PMICs. */ @@ -1016,5 +1016,5 @@ static struct platform_driver rradc_driver = { module_platform_driver(rradc_driver); MODULE_DESCRIPTION("QCOM SPMI PMIC RR ADC driver"); -MODULE_AUTHOR("Caleb Connolly <caleb.connolly@linaro.org>"); +MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>"); MODULE_LICENSE("GPL"); diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 1008858f78e2..c066a4da7c14 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -105,6 +105,8 @@ #define PKT_XBE2_FW_5_EARLY 3 #define PKT_XBE2_FW_5_11 4 +#define FLAG_DELAY_INIT BIT(0) + static bool dpad_to_buttons; module_param(dpad_to_buttons, bool, S_IRUGO); MODULE_PARM_DESC(dpad_to_buttons, "Map D-PAD to buttons rather than axes for unknown pads"); @@ -127,6 +129,7 @@ static const struct xpad_device { char *name; u8 mapping; u8 xtype; + u8 flags; } xpad_device[] = { /* Please keep this list sorted by vendor and product ID. */ { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 }, @@ -416,6 +419,7 @@ static const struct xpad_device { { 0x3285, 0x0663, "Nacon Evol-X", 0, XTYPE_XBOXONE }, { 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 }, { 0x3537, 0x1010, "GameSir G7 SE", 0, XTYPE_XBOXONE }, + { 0x366c, 0x0005, "ByoWave Proteus Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE, FLAG_DELAY_INIT }, { 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX }, { 0x413d, 0x2104, "Black Shark Green Ghost Gamepad", 0, XTYPE_XBOX360 }, { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX }, @@ -571,6 +575,7 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOXONE_VENDOR(0x3285), /* Nacon Evol-X */ XPAD_XBOX360_VENDOR(0x3537), /* GameSir Controllers */ XPAD_XBOXONE_VENDOR(0x3537), /* GameSir Controllers */ + XPAD_XBOXONE_VENDOR(0x366c), /* ByoWave controllers */ XPAD_XBOX360_VENDOR(0x413d), /* Black Shark Green Ghost Controller */ { } }; @@ -599,6 +604,7 @@ struct xboxone_init_packet { * - https://github.com/medusalix/xone/blob/master/bus/protocol.c */ #define GIP_CMD_ACK 0x01 +#define GIP_CMD_ANNOUNCE 0x02 #define GIP_CMD_IDENTIFY 0x04 #define GIP_CMD_POWER 0x05 #define GIP_CMD_AUTHENTICATE 0x06 @@ -673,20 +679,19 @@ static const u8 xboxone_hori_ack_id[] = { }; /* - * This packet is required for most (all?) of the PDP pads to start - * sending input reports. These pads include: (0x0e6f:0x02ab), - * (0x0e6f:0x02a4), (0x0e6f:0x02a6). + * This packet is sent by default on Windows, and is required for some pads to + * start sending input reports, including most (all?) of the PDP. These pads + * include: (0x0e6f:0x02ab), (0x0e6f:0x02a4), (0x0e6f:0x02a6). */ -static const u8 xboxone_pdp_led_on[] = { - GIP_CMD_LED, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(3), 0x00, GIP_LED_ON, 0x14 -}; +static const u8 xboxone_led_on[] = { GIP_CMD_LED, GIP_OPT_INTERNAL, GIP_SEQ0, +GIP_PL_LEN(3), 0x00, GIP_LED_ON, 0x14 }; /* * This packet is required for most (all?) of the PDP pads to start * sending input reports. These pads include: (0x0e6f:0x02ab), * (0x0e6f:0x02a4), (0x0e6f:0x02a6). */ -static const u8 xboxone_pdp_auth[] = { +static const u8 xboxone_auth_done[] = { GIP_CMD_AUTHENTICATE, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(2), 0x01, 0x00 }; @@ -723,12 +728,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init), XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init), XBOXONE_INIT_PKT(0x045e, 0x0b00, extra_input_packet_init), - XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_led_on), - XBOXONE_INIT_PKT(0x0f0d, 0x01b2, xboxone_pdp_led_on), - XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_led_on), - XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_auth), - XBOXONE_INIT_PKT(0x0f0d, 0x01b2, xboxone_pdp_auth), - XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_auth), + XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_led_on), + XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_auth_done), XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), @@ -788,10 +789,13 @@ struct usb_xpad { const char *name; /* name of the device */ struct work_struct work; /* init/remove device from callback */ time64_t mode_btn_down_ts; + bool delay_init; /* init packets should be delayed */ + bool delayed_init_done; }; static int xpad_init_input(struct usb_xpad *xpad); static void xpad_deinit_input(struct usb_xpad *xpad); +static int xpad_start_input(struct usb_xpad *xpad); static void xpadone_ack_mode_report(struct usb_xpad *xpad, u8 seq_num); static void xpad360w_poweroff_controller(struct usb_xpad *xpad); @@ -1076,6 +1080,17 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char do_sync = true; } + } else if (data[0] == GIP_CMD_ANNOUNCE) { + int error; + + if (xpad->delay_init && !xpad->delayed_init_done) { + xpad->delayed_init_done = true; + error = xpad_start_input(xpad); + if (error) + dev_warn(&xpad->dev->dev, + "unable to start delayed input: %d\n", + error); + } } else if (data[0] == GIP_CMD_INPUT) { /* The main valid packet type for inputs */ /* menu/view buttons */ input_report_key(dev, BTN_START, data[4] & BIT(2)); @@ -1254,6 +1269,14 @@ static bool xpad_prepare_next_init_packet(struct usb_xpad *xpad) if (xpad->xtype != XTYPE_XBOXONE) return false; + /* + * Some dongles will discard init packets if they're sent before the + * controller connects. In these cases, we need to wait until we get + * an announce packet from them to send the init packet sequence. + */ + if (xpad->delay_init && !xpad->delayed_init_done) + return false; + /* Perform initialization sequence for Xbox One pads that require it */ while (xpad->init_seq < ARRAY_SIZE(xboxone_init_packets)) { init_packet = &xboxone_init_packets[xpad->init_seq++]; @@ -2069,6 +2092,9 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id xpad->mapping = xpad_device[i].mapping; xpad->xtype = xpad_device[i].xtype; xpad->name = xpad_device[i].name; + if (xpad_device[i].flags & FLAG_DELAY_INIT) + xpad->delay_init = true; + xpad->packet_type = PKT_XB; INIT_WORK(&xpad->work, xpad_presence_work); @@ -2268,6 +2294,7 @@ static int xpad_resume(struct usb_interface *intf) struct usb_xpad *xpad = usb_get_intfdata(intf); struct input_dev *input = xpad->dev; + xpad->delayed_init_done = false; if (xpad->xtype == XTYPE_XBOX360W) return xpad360w_start_input(xpad); diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index adf0f311996c..3ff2fcf05ad5 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -37,7 +37,7 @@ static int atkbd_set = 2; module_param_named(set, atkbd_set, int, 0); MODULE_PARM_DESC(set, "Select keyboard code set (2 = default, 3 = PS/2 native)"); -#if defined(__i386__) || defined(__x86_64__) || defined(__hppa__) +#if defined(__i386__) || defined(__x86_64__) || defined(__hppa__) || defined(__loongarch__) static bool atkbd_reset; #else static bool atkbd_reset = true; diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index 5c39a217b94c..f9db86da0818 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c @@ -449,6 +449,8 @@ static enum hrtimer_restart gpio_keys_irq_timer(struct hrtimer *t) release_timer); struct input_dev *input = bdata->input; + guard(spinlock_irqsave)(&bdata->lock); + if (bdata->key_pressed) { input_report_key(input, *bdata->code, 0); input_sync(input); @@ -486,7 +488,7 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id) if (bdata->release_delay) hrtimer_start(&bdata->release_timer, ms_to_ktime(bdata->release_delay), - HRTIMER_MODE_REL_HARD); + HRTIMER_MODE_REL); out: return IRQ_HANDLED; } @@ -628,7 +630,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev, bdata->release_delay = button->debounce_interval; hrtimer_setup(&bdata->release_timer, gpio_keys_irq_timer, - CLOCK_REALTIME, HRTIMER_MODE_REL_HARD); + CLOCK_REALTIME, HRTIMER_MODE_REL); isr = gpio_keys_irq_isr; irqflags = 0; diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index e46473cb817c..e50a6fea9a60 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c @@ -104,6 +104,16 @@ static void disable_row_irqs(struct matrix_keypad *keypad) disable_irq_nosync(keypad->row_irqs[i]); } +static uint32_t read_row_state(struct matrix_keypad *keypad) +{ + int row; + u32 row_state = 0; + + for (row = 0; row < keypad->num_row_gpios; row++) + row_state |= row_asserted(keypad, row) ? BIT(row) : 0; + return row_state; +} + /* * This gets the keys from keyboard and reports it to input subsystem */ @@ -115,6 +125,10 @@ static void matrix_keypad_scan(struct work_struct *work) const unsigned short *keycodes = input_dev->keycode; uint32_t new_state[MATRIX_MAX_COLS]; int row, col, code; + u32 init_row_state, new_row_state; + + /* read initial row state to detect changes between scan */ + init_row_state = read_row_state(keypad); /* de-activate all columns for scanning */ activate_all_cols(keypad, false); @@ -129,9 +143,7 @@ static void matrix_keypad_scan(struct work_struct *work) activate_col(keypad, col, true); - for (row = 0; row < keypad->num_row_gpios; row++) - new_state[col] |= - row_asserted(keypad, row) ? BIT(row) : 0; + new_state[col] = read_row_state(keypad); activate_col(keypad, col, false); } @@ -165,6 +177,18 @@ static void matrix_keypad_scan(struct work_struct *work) keypad->scan_pending = false; enable_row_irqs(keypad); } + + /* read new row state and detect if value has changed */ + new_row_state = read_row_state(keypad); + if (init_row_state != new_row_state) { + guard(spinlock_irq)(&keypad->lock); + if (unlikely(keypad->scan_pending || keypad->stopped)) + return; + disable_row_irqs(keypad); + keypad->scan_pending = true; + schedule_delayed_work(&keypad->work, + msecs_to_jiffies(keypad->debounce_ms)); + } } static irqreturn_t matrix_keypad_interrupt(int irq, void *id) diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c index bbf409dda89f..fe7398eeb828 100644 --- a/drivers/input/keyboard/snvs_pwrkey.c +++ b/drivers/input/keyboard/snvs_pwrkey.c @@ -27,6 +27,8 @@ #define SNVS_HPSR_BTN BIT(6) #define SNVS_LPSR_SPO BIT(18) #define SNVS_LPCR_DEP_EN BIT(5) +#define SNVS_LPCR_BPT_SHIFT 16 +#define SNVS_LPCR_BPT_MASK (3 << SNVS_LPCR_BPT_SHIFT) #define DEBOUNCE_TIME 30 #define REPEAT_INTERVAL 60 @@ -114,6 +116,8 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev) struct device_node *np; struct clk *clk; int error; + unsigned int val; + unsigned int bpt; u32 vid; /* Get SNVS register Page */ @@ -148,6 +152,27 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev) if (pdata->irq < 0) return -EINVAL; + error = of_property_read_u32(np, "power-off-time-sec", &val); + if (!error) { + switch (val) { + case 0: + bpt = 0x3; + break; + case 5: + case 10: + case 15: + bpt = (val / 5) - 1; + break; + default: + dev_err(&pdev->dev, + "power-off-time-sec %d out of range\n", val); + return -EINVAL; + } + + regmap_update_bits(pdata->snvs, SNVS_LPCR_REG, SNVS_LPCR_BPT_MASK, + bpt << SNVS_LPCR_BPT_SHIFT); + } + regmap_read(pdata->snvs, SNVS_HPVIDR1_REG, &vid); pdata->minor_rev = vid & 0xff; diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index d9ee14b1f451..4581f1c53644 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c @@ -844,6 +844,12 @@ static int ims_pcu_flash_firmware(struct ims_pcu *pcu, addr = be32_to_cpu(rec->addr) / 2; len = be16_to_cpu(rec->len); + if (len > sizeof(pcu->cmd_buf) - 1 - sizeof(*fragment)) { + dev_err(pcu->dev, + "Invalid record length in firmware: %d\n", len); + return -EINVAL; + } + fragment = (void *)&pcu->cmd_buf[1]; put_unaligned_le32(addr, &fragment->addr); fragment->len = len; diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index ed52db272f4d..68eeed660a4a 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig @@ -36,6 +36,16 @@ config ARM_MHU_V3 that provides different means of transports: supported extensions will be discovered and possibly managed at probe-time. +config CV1800_MBOX + tristate "cv1800 mailbox" + depends on ARCH_SOPHGO || COMPILE_TEST + help + Mailbox driver implementation for Sophgo CV18XX SoCs. This driver + can be used to send message between different processors in SoC. Any + processer can write data in a channel, and set co-responding register + to raise interrupt to notice another processor, and it is allowed to + send data to itself. + config EXYNOS_MBOX tristate "Exynos Mailbox" depends on ARCH_EXYNOS || COMPILE_TEST @@ -191,8 +201,8 @@ config POLARFIRE_SOC_MAILBOX config MCHP_SBI_IPC_MBOX tristate "Microchip Inter-processor Communication (IPC) SBI driver" - depends on RISCV_SBI || COMPILE_TEST - depends on ARCH_MICROCHIP + depends on RISCV_SBI + depends on ARCH_MICROCHIP || COMPILE_TEST help Mailbox implementation for Microchip devices with an Inter-process communication (IPC) controller. diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index 9a1542b55539..13a3448b3271 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile @@ -11,6 +11,8 @@ obj-$(CONFIG_ARM_MHU_V2) += arm_mhuv2.o obj-$(CONFIG_ARM_MHU_V3) += arm_mhuv3.o +obj-$(CONFIG_CV1800_MBOX) += cv1800-mailbox.o + obj-$(CONFIG_EXYNOS_MBOX) += exynos-mailbox.o obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o diff --git a/drivers/mailbox/cv1800-mailbox.c b/drivers/mailbox/cv1800-mailbox.c new file mode 100644 index 000000000000..4761191acf78 --- /dev/null +++ b/drivers/mailbox/cv1800-mailbox.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2024 Sophgo Technology Inc. + * Copyright (C) 2024 Yuntao Dai <d1581209858@live.com> + * Copyright (C) 2025 Junhui Liu <junhui.liu@pigmoral.tech> + */ + +#include <linux/bits.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kfifo.h> +#include <linux/mailbox_client.h> +#include <linux/mailbox_controller.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#define RECV_CPU 1 + +#define MAILBOX_MAX_CHAN 8 +#define MAILBOX_MSG_LEN 8 + +#define MBOX_EN_REG(cpu) (cpu << 2) +#define MBOX_DONE_REG(cpu) ((cpu << 2) + 2) +#define MBOX_SET_CLR_REG(cpu) (0x10 + (cpu << 4)) +#define MBOX_SET_INT_REG(cpu) (0x18 + (cpu << 4)) +#define MBOX_SET_REG 0x60 + +#define MAILBOX_CONTEXT_OFFSET 0x0400 +#define MAILBOX_CONTEXT_SIZE 0x0040 + +#define MBOX_CONTEXT_BASE_INDEX(base, index) \ + ((u64 __iomem *)(base + MAILBOX_CONTEXT_OFFSET) + index) + +/** + * struct cv1800_mbox_chan_priv - cv1800 mailbox channel private data + * @idx: index of channel + * @cpu: send to which processor + */ +struct cv1800_mbox_chan_priv { + int idx; + int cpu; +}; + +struct cv1800_mbox { + struct mbox_controller mbox; + struct cv1800_mbox_chan_priv priv[MAILBOX_MAX_CHAN]; + struct mbox_chan chans[MAILBOX_MAX_CHAN]; + u64 __iomem *content[MAILBOX_MAX_CHAN]; + void __iomem *mbox_base; + int recvid; +}; + +static irqreturn_t cv1800_mbox_isr(int irq, void *dev_id) +{ + struct cv1800_mbox *mbox = (struct cv1800_mbox *)dev_id; + size_t i; + u64 msg; + int ret = IRQ_NONE; + + for (i = 0; i < MAILBOX_MAX_CHAN; i++) { + if (mbox->content[i] && mbox->chans[i].cl) { + memcpy_fromio(&msg, mbox->content[i], MAILBOX_MSG_LEN); + mbox->content[i] = NULL; + mbox_chan_received_data(&mbox->chans[i], (void *)&msg); + ret = IRQ_HANDLED; + } + } + + return ret; +} + +static irqreturn_t cv1800_mbox_irq(int irq, void *dev_id) +{ + struct cv1800_mbox *mbox = (struct cv1800_mbox *)dev_id; + u8 set, valid; + size_t i; + int ret = IRQ_NONE; + + set = readb(mbox->mbox_base + MBOX_SET_INT_REG(RECV_CPU)); + + if (!set) + return ret; + + for (i = 0; i < MAILBOX_MAX_CHAN; i++) { + valid = set & BIT(i); + if (valid) { + mbox->content[i] = + MBOX_CONTEXT_BASE_INDEX(mbox->mbox_base, i); + writeb(valid, mbox->mbox_base + + MBOX_SET_CLR_REG(RECV_CPU)); + writeb(~valid, mbox->mbox_base + MBOX_EN_REG(RECV_CPU)); + ret = IRQ_WAKE_THREAD; + } + } + + return ret; +} + +static int cv1800_mbox_send_data(struct mbox_chan *chan, void *data) +{ + struct cv1800_mbox_chan_priv *priv = + (struct cv1800_mbox_chan_priv *)chan->con_priv; + struct cv1800_mbox *mbox = dev_get_drvdata(chan->mbox->dev); + int idx = priv->idx; + int cpu = priv->cpu; + u8 en, valid; + + memcpy_toio(MBOX_CONTEXT_BASE_INDEX(mbox->mbox_base, idx), + data, MAILBOX_MSG_LEN); + + valid = BIT(idx); + writeb(valid, mbox->mbox_base + MBOX_SET_CLR_REG(cpu)); + en = readb(mbox->mbox_base + MBOX_EN_REG(cpu)); + writeb(en | valid, mbox->mbox_base + MBOX_EN_REG(cpu)); + writeb(valid, mbox->mbox_base + MBOX_SET_REG); + + return 0; +} + +static bool cv1800_last_tx_done(struct mbox_chan *chan) +{ + struct cv1800_mbox_chan_priv *priv = + (struct cv1800_mbox_chan_priv *)chan->con_priv; + struct cv1800_mbox *mbox = dev_get_drvdata(chan->mbox->dev); + u8 en; + + en = readb(mbox->mbox_base + MBOX_EN_REG(priv->cpu)); + + return !(en & BIT(priv->idx)); +} + +static const struct mbox_chan_ops cv1800_mbox_chan_ops = { + .send_data = cv1800_mbox_send_data, + .last_tx_done = cv1800_last_tx_done, +}; + +static struct mbox_chan *cv1800_mbox_xlate(struct mbox_controller *mbox, + const struct of_phandle_args *spec) +{ + struct cv1800_mbox_chan_priv *priv; + + int idx = spec->args[0]; + int cpu = spec->args[1]; + + if (idx >= mbox->num_chans) + return ERR_PTR(-EINVAL); + + priv = mbox->chans[idx].con_priv; + priv->cpu = cpu; + + return &mbox->chans[idx]; +} + +static const struct of_device_id cv1800_mbox_of_match[] = { + { .compatible = "sophgo,cv1800b-mailbox", }, + {}, +}; +MODULE_DEVICE_TABLE(of, cv1800_mbox_of_match); + +static int cv1800_mbox_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cv1800_mbox *mb; + int irq, idx, err; + + mb = devm_kzalloc(dev, sizeof(*mb), GFP_KERNEL); + if (!mb) + return -ENOMEM; + + mb->mbox_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mb->mbox_base)) + return dev_err_probe(dev, PTR_ERR(mb->mbox_base), + "Failed to map resource\n"); + + mb->mbox.dev = dev; + mb->mbox.chans = mb->chans; + mb->mbox.txdone_poll = true; + mb->mbox.ops = &cv1800_mbox_chan_ops; + mb->mbox.num_chans = MAILBOX_MAX_CHAN; + mb->mbox.of_xlate = cv1800_mbox_xlate; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + err = devm_request_threaded_irq(dev, irq, cv1800_mbox_irq, + cv1800_mbox_isr, IRQF_ONESHOT, + dev_name(&pdev->dev), mb); + if (err < 0) + return dev_err_probe(dev, err, "Failed to register irq\n"); + + for (idx = 0; idx < MAILBOX_MAX_CHAN; idx++) { + mb->priv[idx].idx = idx; + mb->mbox.chans[idx].con_priv = &mb->priv[idx]; + } + + platform_set_drvdata(pdev, mb); + + err = devm_mbox_controller_register(dev, &mb->mbox); + if (err) + return dev_err_probe(dev, err, "Failed to register mailbox\n"); + + return 0; +} + +static struct platform_driver cv1800_mbox_driver = { + .driver = { + .name = "cv1800-mbox", + .of_match_table = cv1800_mbox_of_match, + }, + .probe = cv1800_mbox_probe, +}; + +module_platform_driver(cv1800_mbox_driver); + +MODULE_DESCRIPTION("cv1800 mailbox driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index 6ef8338add0d..6778afc64a04 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -226,7 +226,7 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv, { u32 *arg = data; u32 val; - int ret; + int ret, count; switch (cp->type) { case IMX_MU_TYPE_TX: @@ -240,11 +240,20 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv, case IMX_MU_TYPE_TXDB_V2: imx_mu_write(priv, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), priv->dcfg->xCR[IMX_MU_GCR]); - ret = readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val, - !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)), - 0, 1000); - if (ret) - dev_warn_ratelimited(priv->dev, "channel type: %d failure\n", cp->type); + ret = -ETIMEDOUT; + count = 0; + while (ret && (count < 10)) { + ret = + readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val, + !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)), + 0, 10000); + + if (ret) { + dev_warn_ratelimited(priv->dev, + "channel type: %d timeout, %d times, retry\n", + cp->type, ++count); + } + } break; default: dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type); diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index 0593b4d03685..5cd8ae222073 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c @@ -6,6 +6,7 @@ * Author: Jassi Brar <jassisinghbrar@gmail.com> */ +#include <linux/cleanup.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/err.h> @@ -24,15 +25,12 @@ static DEFINE_MUTEX(con_mutex); static int add_to_rbuf(struct mbox_chan *chan, void *mssg) { int idx; - unsigned long flags; - spin_lock_irqsave(&chan->lock, flags); + guard(spinlock_irqsave)(&chan->lock); /* See if there is any space left */ - if (chan->msg_count == MBOX_TX_QUEUE_LEN) { - spin_unlock_irqrestore(&chan->lock, flags); + if (chan->msg_count == MBOX_TX_QUEUE_LEN) return -ENOBUFS; - } idx = chan->msg_free; chan->msg_data[idx] = mssg; @@ -43,60 +41,53 @@ static int add_to_rbuf(struct mbox_chan *chan, void *mssg) else chan->msg_free++; - spin_unlock_irqrestore(&chan->lock, flags); - return idx; } static void msg_submit(struct mbox_chan *chan) { unsigned count, idx; - unsigned long flags; void *data; int err = -EBUSY; - spin_lock_irqsave(&chan->lock, flags); - - if (!chan->msg_count || chan->active_req) - goto exit; + scoped_guard(spinlock_irqsave, &chan->lock) { + if (!chan->msg_count || chan->active_req) + break; - count = chan->msg_count; - idx = chan->msg_free; - if (idx >= count) - idx -= count; - else - idx += MBOX_TX_QUEUE_LEN - count; + count = chan->msg_count; + idx = chan->msg_free; + if (idx >= count) + idx -= count; + else + idx += MBOX_TX_QUEUE_LEN - count; - data = chan->msg_data[idx]; + data = chan->msg_data[idx]; - if (chan->cl->tx_prepare) - chan->cl->tx_prepare(chan->cl, data); - /* Try to submit a message to the MBOX controller */ - err = chan->mbox->ops->send_data(chan, data); - if (!err) { - chan->active_req = data; - chan->msg_count--; + if (chan->cl->tx_prepare) + chan->cl->tx_prepare(chan->cl, data); + /* Try to submit a message to the MBOX controller */ + err = chan->mbox->ops->send_data(chan, data); + if (!err) { + chan->active_req = data; + chan->msg_count--; + } } -exit: - spin_unlock_irqrestore(&chan->lock, flags); if (!err && (chan->txdone_method & TXDONE_BY_POLL)) { /* kick start the timer immediately to avoid delays */ - spin_lock_irqsave(&chan->mbox->poll_hrt_lock, flags); - hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL); - spin_unlock_irqrestore(&chan->mbox->poll_hrt_lock, flags); + scoped_guard(spinlock_irqsave, &chan->mbox->poll_hrt_lock) + hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL); } } static void tx_tick(struct mbox_chan *chan, int r) { - unsigned long flags; void *mssg; - spin_lock_irqsave(&chan->lock, flags); - mssg = chan->active_req; - chan->active_req = NULL; - spin_unlock_irqrestore(&chan->lock, flags); + scoped_guard(spinlock_irqsave, &chan->lock) { + mssg = chan->active_req; + chan->active_req = NULL; + } /* Submit next message */ msg_submit(chan); @@ -118,7 +109,6 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer) container_of(hrtimer, struct mbox_controller, poll_hrt); bool txdone, resched = false; int i; - unsigned long flags; for (i = 0; i < mbox->num_chans; i++) { struct mbox_chan *chan = &mbox->chans[i]; @@ -133,10 +123,10 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer) } if (resched) { - spin_lock_irqsave(&mbox->poll_hrt_lock, flags); - if (!hrtimer_is_queued(hrtimer)) - hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period)); - spin_unlock_irqrestore(&mbox->poll_hrt_lock, flags); + scoped_guard(spinlock_irqsave, &mbox->poll_hrt_lock) { + if (!hrtimer_is_queued(hrtimer)) + hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period)); + } return HRTIMER_RESTART; } @@ -318,25 +308,23 @@ EXPORT_SYMBOL_GPL(mbox_flush); static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl) { struct device *dev = cl->dev; - unsigned long flags; int ret; if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) { - dev_dbg(dev, "%s: mailbox not free\n", __func__); + dev_err(dev, "%s: mailbox not free\n", __func__); return -EBUSY; } - spin_lock_irqsave(&chan->lock, flags); - chan->msg_free = 0; - chan->msg_count = 0; - chan->active_req = NULL; - chan->cl = cl; - init_completion(&chan->tx_complete); - - if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) - chan->txdone_method = TXDONE_BY_ACK; + scoped_guard(spinlock_irqsave, &chan->lock) { + chan->msg_free = 0; + chan->msg_count = 0; + chan->active_req = NULL; + chan->cl = cl; + init_completion(&chan->tx_complete); - spin_unlock_irqrestore(&chan->lock, flags); + if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) + chan->txdone_method = TXDONE_BY_ACK; + } if (chan->mbox->ops->startup) { ret = chan->mbox->ops->startup(chan); @@ -370,13 +358,9 @@ static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl) */ int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl) { - int ret; - - mutex_lock(&con_mutex); - ret = __mbox_bind_client(chan, cl); - mutex_unlock(&con_mutex); + guard(mutex)(&con_mutex); - return ret; + return __mbox_bind_client(chan, cl); } EXPORT_SYMBOL_GPL(mbox_bind_client); @@ -413,32 +397,29 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index) ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells", index, &spec); if (ret) { - dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__); + dev_err(dev, "%s: can't parse \"mboxes\" property\n", __func__); return ERR_PTR(ret); } - mutex_lock(&con_mutex); + scoped_guard(mutex, &con_mutex) { + chan = ERR_PTR(-EPROBE_DEFER); + list_for_each_entry(mbox, &mbox_cons, node) + if (mbox->dev->of_node == spec.np) { + chan = mbox->of_xlate(mbox, &spec); + if (!IS_ERR(chan)) + break; + } - chan = ERR_PTR(-EPROBE_DEFER); - list_for_each_entry(mbox, &mbox_cons, node) - if (mbox->dev->of_node == spec.np) { - chan = mbox->of_xlate(mbox, &spec); - if (!IS_ERR(chan)) - break; - } + of_node_put(spec.np); - of_node_put(spec.np); + if (IS_ERR(chan)) + return chan; - if (IS_ERR(chan)) { - mutex_unlock(&con_mutex); - return chan; + ret = __mbox_bind_client(chan, cl); + if (ret) + chan = ERR_PTR(ret); } - ret = __mbox_bind_client(chan, cl); - if (ret) - chan = ERR_PTR(ret); - - mutex_unlock(&con_mutex); return chan; } EXPORT_SYMBOL_GPL(mbox_request_channel); @@ -458,7 +439,7 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl, if (index < 0) { dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n", __func__, name); - return ERR_PTR(-EINVAL); + return ERR_PTR(index); } return mbox_request_channel(cl, index); } @@ -471,8 +452,6 @@ EXPORT_SYMBOL_GPL(mbox_request_channel_byname); */ void mbox_free_channel(struct mbox_chan *chan) { - unsigned long flags; - if (!chan || !chan->cl) return; @@ -480,14 +459,14 @@ void mbox_free_channel(struct mbox_chan *chan) chan->mbox->ops->shutdown(chan); /* The queued TX requests are simply aborted, no callbacks are made */ - spin_lock_irqsave(&chan->lock, flags); - chan->cl = NULL; - chan->active_req = NULL; - if (chan->txdone_method == TXDONE_BY_ACK) - chan->txdone_method = TXDONE_BY_POLL; + scoped_guard(spinlock_irqsave, &chan->lock) { + chan->cl = NULL; + chan->active_req = NULL; + if (chan->txdone_method == TXDONE_BY_ACK) + chan->txdone_method = TXDONE_BY_POLL; + } module_put(chan->mbox->dev->driver->owner); - spin_unlock_irqrestore(&chan->lock, flags); } EXPORT_SYMBOL_GPL(mbox_free_channel); @@ -547,9 +526,8 @@ int mbox_controller_register(struct mbox_controller *mbox) if (!mbox->of_xlate) mbox->of_xlate = of_mbox_index_xlate; - mutex_lock(&con_mutex); - list_add_tail(&mbox->node, &mbox_cons); - mutex_unlock(&con_mutex); + scoped_guard(mutex, &con_mutex) + list_add_tail(&mbox->node, &mbox_cons); return 0; } @@ -566,17 +544,15 @@ void mbox_controller_unregister(struct mbox_controller *mbox) if (!mbox) return; - mutex_lock(&con_mutex); - - list_del(&mbox->node); - - for (i = 0; i < mbox->num_chans; i++) - mbox_free_channel(&mbox->chans[i]); + scoped_guard(mutex, &con_mutex) { + list_del(&mbox->node); - if (mbox->txdone_poll) - hrtimer_cancel(&mbox->poll_hrt); + for (i = 0; i < mbox->num_chans; i++) + mbox_free_channel(&mbox->chans[i]); - mutex_unlock(&con_mutex); + if (mbox->txdone_poll) + hrtimer_cancel(&mbox->poll_hrt); + } } EXPORT_SYMBOL_GPL(mbox_controller_unregister); @@ -587,16 +563,6 @@ static void __devm_mbox_controller_unregister(struct device *dev, void *res) mbox_controller_unregister(*mbox); } -static int devm_mbox_controller_match(struct device *dev, void *res, void *data) -{ - struct mbox_controller **mbox = res; - - if (WARN_ON(!mbox || !*mbox)) - return 0; - - return *mbox == data; -} - /** * devm_mbox_controller_register() - managed mbox_controller_register() * @dev: device owning the mailbox controller being registered @@ -632,20 +598,3 @@ int devm_mbox_controller_register(struct device *dev, return 0; } EXPORT_SYMBOL_GPL(devm_mbox_controller_register); - -/** - * devm_mbox_controller_unregister() - managed mbox_controller_unregister() - * @dev: device owning the mailbox controller being unregistered - * @mbox: mailbox controller being unregistered - * - * This function unregisters the mailbox controller and removes the device- - * managed resource that was set up to automatically unregister the mailbox - * controller on driver probe failure or driver removal. It's typically not - * necessary to call this function. - */ -void devm_mbox_controller_unregister(struct device *dev, struct mbox_controller *mbox) -{ - WARN_ON(devres_release(dev, __devm_mbox_controller_unregister, - devm_mbox_controller_match, mbox)); -} -EXPORT_SYMBOL_GPL(devm_mbox_controller_unregister); diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c index d186865b8dce..ab4e8d1954a1 100644 --- a/drivers/mailbox/mtk-cmdq-mailbox.c +++ b/drivers/mailbox/mtk-cmdq-mailbox.c @@ -92,18 +92,6 @@ struct gce_plat { u32 gce_num; }; -static void cmdq_sw_ddr_enable(struct cmdq *cmdq, bool enable) -{ - WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks)); - - if (enable) - writel(GCE_DDR_EN | GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE); - else - writel(GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE); - - clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks); -} - u8 cmdq_get_shift_pa(struct mbox_chan *chan) { struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox); @@ -112,6 +100,19 @@ u8 cmdq_get_shift_pa(struct mbox_chan *chan) } EXPORT_SYMBOL(cmdq_get_shift_pa); +static void cmdq_gctl_value_toggle(struct cmdq *cmdq, bool ddr_enable) +{ + u32 val = cmdq->pdata->control_by_sw ? GCE_CTRL_BY_SW : 0; + + if (!cmdq->pdata->control_by_sw && !cmdq->pdata->sw_ddr_en) + return; + + if (cmdq->pdata->sw_ddr_en && ddr_enable) + val |= GCE_DDR_EN; + + writel(val, cmdq->base + GCE_GCTL_VALUE); +} + static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread) { u32 status; @@ -140,16 +141,10 @@ static void cmdq_thread_resume(struct cmdq_thread *thread) static void cmdq_init(struct cmdq *cmdq) { int i; - u32 gctl_regval = 0; WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks)); - if (cmdq->pdata->control_by_sw) - gctl_regval = GCE_CTRL_BY_SW; - if (cmdq->pdata->sw_ddr_en) - gctl_regval |= GCE_DDR_EN; - if (gctl_regval) - writel(gctl_regval, cmdq->base + GCE_GCTL_VALUE); + cmdq_gctl_value_toggle(cmdq, true); writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES); for (i = 0; i <= CMDQ_MAX_EVENT; i++) @@ -315,14 +310,21 @@ static irqreturn_t cmdq_irq_handler(int irq, void *dev) static int cmdq_runtime_resume(struct device *dev) { struct cmdq *cmdq = dev_get_drvdata(dev); + int ret; - return clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks); + ret = clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks); + if (ret) + return ret; + + cmdq_gctl_value_toggle(cmdq, true); + return 0; } static int cmdq_runtime_suspend(struct device *dev) { struct cmdq *cmdq = dev_get_drvdata(dev); + cmdq_gctl_value_toggle(cmdq, false); clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks); return 0; } @@ -347,9 +349,6 @@ static int cmdq_suspend(struct device *dev) if (task_running) dev_warn(dev, "exist running task(s) in suspend\n"); - if (cmdq->pdata->sw_ddr_en) - cmdq_sw_ddr_enable(cmdq, false); - return pm_runtime_force_suspend(dev); } @@ -360,9 +359,6 @@ static int cmdq_resume(struct device *dev) WARN_ON(pm_runtime_force_resume(dev)); cmdq->suspended = false; - if (cmdq->pdata->sw_ddr_en) - cmdq_sw_ddr_enable(cmdq, true); - return 0; } @@ -370,9 +366,6 @@ static void cmdq_remove(struct platform_device *pdev) { struct cmdq *cmdq = platform_get_drvdata(pdev); - if (cmdq->pdata->sw_ddr_en) - cmdq_sw_ddr_enable(cmdq, false); - if (!IS_ENABLED(CONFIG_PM)) cmdq_runtime_suspend(&pdev->dev); diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c index 11c41e935a36..8b24ec0fa191 100644 --- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c +++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c @@ -116,10 +116,18 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev) } if (apcs_data->clk_name) { - apcs->clk = platform_device_register_data(&pdev->dev, - apcs_data->clk_name, - PLATFORM_DEVID_AUTO, - NULL, 0); + struct device_node *np = of_get_child_by_name(pdev->dev.of_node, + "clock-controller"); + struct platform_device_info pdevinfo = { + .parent = &pdev->dev, + .name = apcs_data->clk_name, + .id = PLATFORM_DEVID_AUTO, + .fwnode = of_fwnode_handle(np) ?: pdev->dev.fwnode, + .of_node_reused = !np, + }; + + apcs->clk = platform_device_register_full(&pdevinfo); + of_node_put(np); if (IS_ERR(apcs->clk)) dev_err(&pdev->dev, "failed to register APCS clk\n"); } diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index ed40d8600656..2cc2eb24dc8a 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -36,6 +36,7 @@ #include <linux/sched/clock.h> #include <linux/rculist.h> #include <linux/delay.h> +#include <linux/sort.h> #include <trace/events/bcache.h> /* @@ -559,8 +560,6 @@ static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) } } -#define cmp_int(l, r) ((l > r) - (l < r)) - #ifdef CONFIG_PROVE_LOCKING static int btree_lock_cmp_fn(const struct lockdep_map *_a, const struct lockdep_map *_b) diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index ff2f9e55ef28..aed653ce8fa2 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -98,7 +98,7 @@ config MTD_MCHP48L640 config MTD_SPEAR_SMI tristate "SPEAR MTD NOR Support through SMI controller" depends on PLAT_SPEAR || COMPILE_TEST - default y + default PLAT_SPEAR help This enable SNOR support on SPEAR platforms using SMI controller diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 8dc4f5c493fc..391d81ad960c 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -559,7 +559,7 @@ static int mtdchar_blkpg_ioctl(struct mtd_info *mtd, /* Sanitize user input */ p.devname[BLKPG_DEVNAMELTH - 1] = '\0'; - return mtd_add_partition(mtd, p.devname, p.start, p.length); + return mtd_add_partition(mtd, p.devname, p.start, p.length, NULL); case BLKPG_DEL_PARTITION: diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 5ba9a741f5ac..429d8c16baf0 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -68,7 +68,13 @@ static struct class mtd_class = { .pm = MTD_CLS_PM_OPS, }; +static struct class mtd_master_class = { + .name = "mtd_master", + .pm = MTD_CLS_PM_OPS, +}; + static DEFINE_IDR(mtd_idr); +static DEFINE_IDR(mtd_master_idr); /* These are exported solely for the purpose of mtd_blkdevs.c. You should not use them for _anything_ else */ @@ -83,8 +89,9 @@ EXPORT_SYMBOL_GPL(__mtd_next_device); static LIST_HEAD(mtd_notifiers); - +#define MTD_MASTER_DEVS 255 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2) +static dev_t mtd_master_devt; /* REVISIT once MTD uses the driver model better, whoever allocates * the mtd_info will probably want to use the release() hook... @@ -104,6 +111,17 @@ static void mtd_release(struct device *dev) device_destroy(&mtd_class, index + 1); } +static void mtd_master_release(struct device *dev) +{ + struct mtd_info *mtd = dev_get_drvdata(dev); + + idr_remove(&mtd_master_idr, mtd->index); + of_node_put(mtd_get_of_node(mtd)); + + if (mtd_is_partition(mtd)) + release_mtd_partition(mtd); +} + static void mtd_device_release(struct kref *kref) { struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt); @@ -367,6 +385,11 @@ static const struct device_type mtd_devtype = { .release = mtd_release, }; +static const struct device_type mtd_master_devtype = { + .name = "mtd_master", + .release = mtd_master_release, +}; + static bool mtd_expert_analysis_mode; #ifdef CONFIG_DEBUG_FS @@ -634,13 +657,13 @@ exit_parent: /** * add_mtd_device - register an MTD device * @mtd: pointer to new MTD device info structure + * @partitioned: create partitioned device * * Add a device to the list of MTD devices present in the system, and * notify each currently active MTD 'user' of its arrival. Returns * zero on success or non-zero on failure. */ - -int add_mtd_device(struct mtd_info *mtd) +int add_mtd_device(struct mtd_info *mtd, bool partitioned) { struct device_node *np = mtd_get_of_node(mtd); struct mtd_info *master = mtd_get_master(mtd); @@ -687,10 +710,17 @@ int add_mtd_device(struct mtd_info *mtd) ofidx = -1; if (np) ofidx = of_alias_get_id(np, "mtd"); - if (ofidx >= 0) - i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL); - else - i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL); + if (partitioned) { + if (ofidx >= 0) + i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL); + else + i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL); + } else { + if (ofidx >= 0) + i = idr_alloc(&mtd_master_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL); + else + i = idr_alloc(&mtd_master_idr, mtd, 0, 0, GFP_KERNEL); + } if (i < 0) { error = i; goto fail_locked; @@ -738,10 +768,18 @@ int add_mtd_device(struct mtd_info *mtd) /* Caller should have set dev.parent to match the * physical device, if appropriate. */ - mtd->dev.type = &mtd_devtype; - mtd->dev.class = &mtd_class; - mtd->dev.devt = MTD_DEVT(i); - error = dev_set_name(&mtd->dev, "mtd%d", i); + if (partitioned) { + mtd->dev.type = &mtd_devtype; + mtd->dev.class = &mtd_class; + mtd->dev.devt = MTD_DEVT(i); + dev_set_name(&mtd->dev, "mtd%d", i); + error = dev_set_name(&mtd->dev, "mtd%d", i); + } else { + mtd->dev.type = &mtd_master_devtype; + mtd->dev.class = &mtd_master_class; + mtd->dev.devt = MKDEV(MAJOR(mtd_master_devt), i); + error = dev_set_name(&mtd->dev, "mtd_master%d", i); + } if (error) goto fail_devname; dev_set_drvdata(&mtd->dev, mtd); @@ -749,6 +787,7 @@ int add_mtd_device(struct mtd_info *mtd) of_node_get(mtd_get_of_node(mtd)); error = device_register(&mtd->dev); if (error) { + pr_err("mtd: %s device_register fail %d\n", mtd->name, error); put_device(&mtd->dev); goto fail_added; } @@ -760,10 +799,13 @@ int add_mtd_device(struct mtd_info *mtd) mtd_debugfs_populate(mtd); - device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL, - "mtd%dro", i); + if (partitioned) { + device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL, + "mtd%dro", i); + } - pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name); + pr_debug("mtd: Giving out %spartitioned device %d to %s\n", + partitioned ? "" : "un-", i, mtd->name); /* No need to get a refcount on the module containing the notifier, since we hold the mtd_table_mutex */ list_for_each_entry(not, &mtd_notifiers, list) @@ -771,13 +813,16 @@ int add_mtd_device(struct mtd_info *mtd) mutex_unlock(&mtd_table_mutex); - if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) { - if (IS_BUILTIN(CONFIG_MTD)) { - pr_info("mtd: setting mtd%d (%s) as root device\n", mtd->index, mtd->name); - ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index); - } else { - pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n", - mtd->index, mtd->name); + if (partitioned) { + if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) { + if (IS_BUILTIN(CONFIG_MTD)) { + pr_info("mtd: setting mtd%d (%s) as root device\n", + mtd->index, mtd->name); + ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index); + } else { + pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n", + mtd->index, mtd->name); + } } } @@ -793,7 +838,10 @@ fail_nvmem_add: fail_added: of_node_put(mtd_get_of_node(mtd)); fail_devname: - idr_remove(&mtd_idr, i); + if (partitioned) + idr_remove(&mtd_idr, i); + else + idr_remove(&mtd_master_idr, i); fail_locked: mutex_unlock(&mtd_table_mutex); return error; @@ -811,12 +859,14 @@ fail_locked: int del_mtd_device(struct mtd_info *mtd) { - int ret; struct mtd_notifier *not; + struct idr *idr; + int ret; mutex_lock(&mtd_table_mutex); - if (idr_find(&mtd_idr, mtd->index) != mtd) { + idr = mtd->dev.class == &mtd_class ? &mtd_idr : &mtd_master_idr; + if (idr_find(idr, mtd->index) != mtd) { ret = -ENODEV; goto out_error; } @@ -1056,6 +1106,7 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, const struct mtd_partition *parts, int nr_parts) { + struct mtd_info *parent; int ret, err; mtd_set_dev_defaults(mtd); @@ -1064,25 +1115,30 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, if (ret) goto out; + ret = add_mtd_device(mtd, false); + if (ret) + goto out; + if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) { - ret = add_mtd_device(mtd); + ret = mtd_add_partition(mtd, mtd->name, 0, MTDPART_SIZ_FULL, &parent); if (ret) goto out; + + } else { + parent = mtd; } /* Prefer parsed partitions over driver-provided fallback */ - ret = parse_mtd_partitions(mtd, types, parser_data); + ret = parse_mtd_partitions(parent, types, parser_data); if (ret == -EPROBE_DEFER) goto out; if (ret > 0) ret = 0; else if (nr_parts) - ret = add_mtd_partitions(mtd, parts, nr_parts); - else if (!device_is_registered(&mtd->dev)) - ret = add_mtd_device(mtd); - else - ret = 0; + ret = add_mtd_partitions(parent, parts, nr_parts); + else if (!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) + ret = mtd_add_partition(parent, mtd->name, 0, MTDPART_SIZ_FULL, NULL); if (ret) goto out; @@ -1102,13 +1158,14 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, register_reboot_notifier(&mtd->reboot_notifier); } + return 0; out: - if (ret) { - nvmem_unregister(mtd->otp_user_nvmem); - nvmem_unregister(mtd->otp_factory_nvmem); - } + nvmem_unregister(mtd->otp_user_nvmem); + nvmem_unregister(mtd->otp_factory_nvmem); - if (ret && device_is_registered(&mtd->dev)) { + del_mtd_partitions(mtd); + + if (device_is_registered(&mtd->dev)) { err = del_mtd_device(mtd); if (err) pr_err("Error when deleting MTD device (%d)\n", err); @@ -1267,8 +1324,7 @@ int __get_mtd_device(struct mtd_info *mtd) mtd = mtd->parent; } - if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) - kref_get(&master->refcnt); + kref_get(&master->refcnt); return 0; } @@ -1362,8 +1418,7 @@ void __put_mtd_device(struct mtd_info *mtd) mtd = parent; } - if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) - kref_put(&master->refcnt, mtd_device_release); + kref_put(&master->refcnt, mtd_device_release); module_put(master->owner); @@ -2530,6 +2585,16 @@ static int __init init_mtd(void) if (ret) goto err_reg; + ret = class_register(&mtd_master_class); + if (ret) + goto err_reg2; + + ret = alloc_chrdev_region(&mtd_master_devt, 0, MTD_MASTER_DEVS, "mtd_master"); + if (ret < 0) { + pr_err("unable to allocate char dev region\n"); + goto err_chrdev; + } + mtd_bdi = mtd_bdi_init("mtd"); if (IS_ERR(mtd_bdi)) { ret = PTR_ERR(mtd_bdi); @@ -2554,6 +2619,10 @@ out_procfs: bdi_unregister(mtd_bdi); bdi_put(mtd_bdi); err_bdi: + unregister_chrdev_region(mtd_master_devt, MTD_MASTER_DEVS); +err_chrdev: + class_unregister(&mtd_master_class); +err_reg2: class_unregister(&mtd_class); err_reg: pr_err("Error registering mtd class or bdi: %d\n", ret); @@ -2567,9 +2636,12 @@ static void __exit cleanup_mtd(void) if (proc_mtd) remove_proc_entry("mtd", NULL); class_unregister(&mtd_class); + class_unregister(&mtd_master_class); + unregister_chrdev_region(mtd_master_devt, MTD_MASTER_DEVS); bdi_unregister(mtd_bdi); bdi_put(mtd_bdi); idr_destroy(&mtd_idr); + idr_destroy(&mtd_master_idr); } module_init(init_mtd); diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h index b014861a06a6..2258d31c5aa6 100644 --- a/drivers/mtd/mtdcore.h +++ b/drivers/mtd/mtdcore.h @@ -8,7 +8,7 @@ extern struct mutex mtd_table_mutex; extern struct backing_dev_info *mtd_bdi; struct mtd_info *__mtd_next_device(int i); -int __must_check add_mtd_device(struct mtd_info *mtd); +int __must_check add_mtd_device(struct mtd_info *mtd, bool partitioned); int del_mtd_device(struct mtd_info *mtd); int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int); int del_mtd_partitions(struct mtd_info *); diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 994e8c51e674..5a3db36d734e 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -86,8 +86,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, * parent conditional on that option. Note, this is a way to * distinguish between the parent and its partitions in sysfs. */ - child->dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ? - &parent->dev : parent->dev.parent; + child->dev.parent = &parent->dev; child->dev.of_node = part->of_node; child->parent = parent; child->part.offset = part->offset; @@ -243,7 +242,7 @@ static int mtd_add_partition_attrs(struct mtd_info *new) } int mtd_add_partition(struct mtd_info *parent, const char *name, - long long offset, long long length) + long long offset, long long length, struct mtd_info **out) { struct mtd_info *master = mtd_get_master(parent); u64 parent_size = mtd_is_partition(parent) ? @@ -276,12 +275,15 @@ int mtd_add_partition(struct mtd_info *parent, const char *name, list_add_tail(&child->part.node, &parent->partitions); mutex_unlock(&master->master.partitions_lock); - ret = add_mtd_device(child); + ret = add_mtd_device(child, true); if (ret) goto err_remove_part; mtd_add_partition_attrs(child); + if (out) + *out = child; + return 0; err_remove_part: @@ -413,7 +415,7 @@ int add_mtd_partitions(struct mtd_info *parent, list_add_tail(&child->part.node, &parent->partitions); mutex_unlock(&master->master.partitions_lock); - ret = add_mtd_device(child); + ret = add_mtd_device(child, true); if (ret) { mutex_lock(&master->master.partitions_lock); list_del(&child->part.node); @@ -590,9 +592,6 @@ static int mtd_part_of_parse(struct mtd_info *master, int ret, err = 0; dev = &master->dev; - /* Use parent device (controller) if the top level MTD is not registered */ - if (!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) && !mtd_is_partition(master)) - dev = master->dev.parent; np = mtd_get_of_node(master); if (mtd_is_partition(master)) @@ -711,6 +710,7 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types, if (ret < 0 && !err) err = ret; } + return err; } diff --git a/drivers/mtd/nand/ecc-mxic.c b/drivers/mtd/nand/ecc-mxic.c index 56b56f726b99..1bf9a5a64b87 100644 --- a/drivers/mtd/nand/ecc-mxic.c +++ b/drivers/mtd/nand/ecc-mxic.c @@ -614,7 +614,7 @@ static int mxic_ecc_finish_io_req_external(struct nand_device *nand, { struct mxic_ecc_engine *mxic = nand_to_mxic(nand); struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand); - int nents, step, ret; + int nents, step, ret = 0; if (req->mode == MTD_OPS_RAW) return 0; diff --git a/drivers/mtd/nand/qpic_common.c b/drivers/mtd/nand/qpic_common.c index e0ed25b5afea..4dc4d65e7d32 100644 --- a/drivers/mtd/nand/qpic_common.c +++ b/drivers/mtd/nand/qpic_common.c @@ -236,21 +236,21 @@ int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, int i, ret; struct bam_cmd_element *bam_ce_buffer; struct bam_transaction *bam_txn = nandc->bam_txn; + u32 offset; bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos]; /* fill the command desc */ for (i = 0; i < size; i++) { + offset = nandc->props->bam_offset + reg_off + 4 * i; if (read) bam_prep_ce(&bam_ce_buffer[i], - nandc_reg_phys(nandc, reg_off + 4 * i), - BAM_READ_COMMAND, + offset, BAM_READ_COMMAND, reg_buf_dma_addr(nandc, (__le32 *)vaddr + i)); else bam_prep_ce_le32(&bam_ce_buffer[i], - nandc_reg_phys(nandc, reg_off + 4 * i), - BAM_WRITE_COMMAND, + offset, BAM_WRITE_COMMAND, *((__le32 *)vaddr + i)); } diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index b8035df8f732..4b99d9c422c3 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -34,7 +34,7 @@ config MTD_NAND_DENALI_DT config MTD_NAND_AMS_DELTA tristate "Amstrad E3 NAND controller" depends on MACH_AMS_DELTA || COMPILE_TEST - default y + default MACH_AMS_DELTA help Support for NAND flash on Amstrad E3 (Delta). @@ -462,6 +462,13 @@ config MTD_NAND_NUVOTON_MA35 Enables support for the NAND controller found on the Nuvoton MA35 series SoCs. +config MTD_NAND_LOONGSON1 + tristate "Loongson1 NAND controller" + depends on LOONGSON1_APB_DMA || COMPILE_TEST + select REGMAP_MMIO + help + Enables support for NAND controller on Loongson1 SoCs. + comment "Misc" config MTD_SM_COMMON diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile index 99e79c448847..711d043ad4f8 100644 --- a/drivers/mtd/nand/raw/Makefile +++ b/drivers/mtd/nand/raw/Makefile @@ -59,6 +59,7 @@ obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o obj-$(CONFIG_MTD_NAND_RENESAS) += renesas-nand-controller.o obj-$(CONFIG_MTD_NAND_NUVOTON_MA35) += nuvoton-ma35d1-nand-controller.o +obj-$(CONFIG_MTD_NAND_LOONGSON1) += loongson1-nand-controller.o nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o nand-objs += nand_onfi.o diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c index 6487dfc64258..e532c3535b16 100644 --- a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c +++ b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c @@ -171,6 +171,7 @@ static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct nand_chip *nand_chip, { struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip); u32 code = 0; + int rc; if (cmd == NAND_CMD_NONE) return; @@ -182,7 +183,9 @@ static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct nand_chip *nand_chip, if (cmd != NAND_CMD_RESET) code |= NCTL_CSA; - bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, code); + rc = bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, code); + if (rc) + pr_err("ctl_cmd didn't work with error %d\n", rc); } /* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */ diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 17f6d9723df9..62bdda3be92f 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -65,6 +65,7 @@ module_param(wp_on, int, 0444); #define CMD_PARAMETER_READ 0x0e #define CMD_PARAMETER_CHANGE_COL 0x0f #define CMD_LOW_LEVEL_OP 0x10 +#define CMD_NOT_SUPPORTED 0xff struct brcm_nand_dma_desc { u32 next_desc; @@ -101,7 +102,7 @@ struct brcm_nand_dma_desc { #define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024) #define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY) -#define NAND_POLL_STATUS_TIMEOUT_MS 100 +#define NAND_POLL_STATUS_TIMEOUT_MS 500 #define EDU_CMD_WRITE 0x00 #define EDU_CMD_READ 0x01 @@ -199,6 +200,30 @@ static const u16 flash_dma_regs_v4[] = { [FLASH_DMA_CURRENT_DESC_EXT] = 0x34, }; +/* Native command conversion for legacy controllers (< v5.0) */ +static const u8 native_cmd_conv[] = { + [NAND_CMD_READ0] = CMD_NOT_SUPPORTED, + [NAND_CMD_READ1] = CMD_NOT_SUPPORTED, + [NAND_CMD_RNDOUT] = CMD_PARAMETER_CHANGE_COL, + [NAND_CMD_PAGEPROG] = CMD_NOT_SUPPORTED, + [NAND_CMD_READOOB] = CMD_NOT_SUPPORTED, + [NAND_CMD_ERASE1] = CMD_BLOCK_ERASE, + [NAND_CMD_STATUS] = CMD_NOT_SUPPORTED, + [NAND_CMD_SEQIN] = CMD_NOT_SUPPORTED, + [NAND_CMD_RNDIN] = CMD_NOT_SUPPORTED, + [NAND_CMD_READID] = CMD_DEVICE_ID_READ, + [NAND_CMD_ERASE2] = CMD_NULL, + [NAND_CMD_PARAM] = CMD_PARAMETER_READ, + [NAND_CMD_GET_FEATURES] = CMD_NOT_SUPPORTED, + [NAND_CMD_SET_FEATURES] = CMD_NOT_SUPPORTED, + [NAND_CMD_RESET] = CMD_NOT_SUPPORTED, + [NAND_CMD_READSTART] = CMD_NOT_SUPPORTED, + [NAND_CMD_READCACHESEQ] = CMD_NOT_SUPPORTED, + [NAND_CMD_READCACHEEND] = CMD_NOT_SUPPORTED, + [NAND_CMD_RNDOUTSTART] = CMD_NULL, + [NAND_CMD_CACHEDPROG] = CMD_NOT_SUPPORTED, +}; + /* Controller feature flags */ enum { BRCMNAND_HAS_1K_SECTORS = BIT(0), @@ -237,6 +262,12 @@ struct brcmnand_controller { /* List of NAND hosts (one for each chip-select) */ struct list_head host_list; + /* Functions to be called from exec_op */ + int (*check_instr)(struct nand_chip *chip, + const struct nand_operation *op); + int (*exec_instr)(struct nand_chip *chip, + const struct nand_operation *op); + /* EDU info, per-transaction */ const u16 *edu_offsets; void __iomem *edu_base; @@ -310,9 +341,6 @@ struct brcmnand_host { struct platform_device *pdev; int cs; - unsigned int last_cmd; - unsigned int last_byte; - u64 last_addr; struct brcmnand_cfg hwcfg; struct brcmnand_controller *ctrl; }; @@ -2233,14 +2261,11 @@ static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); - struct brcmnand_host *host = nand_get_controller_data(chip); u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL; u64 addr = (u64)page << chip->page_shift; - host->last_addr = addr; - - return brcmnand_read(mtd, chip, host->last_addr, - mtd->writesize >> FC_SHIFT, (u32 *)buf, oob); + return brcmnand_read(mtd, chip, addr, mtd->writesize >> FC_SHIFT, + (u32 *)buf, oob); } static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf, @@ -2252,11 +2277,9 @@ static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int ret; u64 addr = (u64)page << chip->page_shift; - host->last_addr = addr; - brcmnand_set_ecc_enabled(host, 0); - ret = brcmnand_read(mtd, chip, host->last_addr, - mtd->writesize >> FC_SHIFT, (u32 *)buf, oob); + ret = brcmnand_read(mtd, chip, addr, mtd->writesize >> FC_SHIFT, + (u32 *)buf, oob); brcmnand_set_ecc_enabled(host, 1); return ret; } @@ -2363,13 +2386,10 @@ static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); - struct brcmnand_host *host = nand_get_controller_data(chip); void *oob = oob_required ? chip->oob_poi : NULL; u64 addr = (u64)page << chip->page_shift; - host->last_addr = addr; - - return brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); + return brcmnand_write(mtd, chip, addr, (const u32 *)buf, oob); } static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, @@ -2381,9 +2401,8 @@ static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, u64 addr = (u64)page << chip->page_shift; int ret = 0; - host->last_addr = addr; brcmnand_set_ecc_enabled(host, 0); - ret = brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); + ret = brcmnand_write(mtd, chip, addr, (const u32 *)buf, oob); brcmnand_set_ecc_enabled(host, 1); return ret; @@ -2490,18 +2509,190 @@ static int brcmnand_op_is_reset(const struct nand_operation *op) return 0; } +static int brcmnand_check_instructions(struct nand_chip *chip, + const struct nand_operation *op) +{ + return 0; +} + +static int brcmnand_exec_instructions(struct nand_chip *chip, + const struct nand_operation *op) +{ + struct brcmnand_host *host = nand_get_controller_data(chip); + unsigned int i; + int ret = 0; + + for (i = 0; i < op->ninstrs; i++) { + ret = brcmnand_exec_instr(host, i, op); + if (ret) + break; + } + + return ret; +} + +static int brcmnand_check_instructions_legacy(struct nand_chip *chip, + const struct nand_operation *op) +{ + const struct nand_op_instr *instr; + unsigned int i; + u8 cmd; + + for (i = 0; i < op->ninstrs; i++) { + instr = &op->instrs[i]; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + cmd = native_cmd_conv[instr->ctx.cmd.opcode]; + if (cmd == CMD_NOT_SUPPORTED) + return -EOPNOTSUPP; + break; + case NAND_OP_ADDR_INSTR: + case NAND_OP_DATA_IN_INSTR: + case NAND_OP_WAITRDY_INSTR: + break; + default: + return -EOPNOTSUPP; + } + } + + return 0; +} + +static int brcmnand_exec_instructions_legacy(struct nand_chip *chip, + const struct nand_operation *op) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct brcmnand_host *host = nand_get_controller_data(chip); + struct brcmnand_controller *ctrl = host->ctrl; + const struct nand_op_instr *instr; + unsigned int i, j; + u8 cmd = CMD_NULL, last_cmd = CMD_NULL; + int ret = 0; + u64 last_addr; + + for (i = 0; i < op->ninstrs; i++) { + instr = &op->instrs[i]; + + if (instr->type == NAND_OP_CMD_INSTR) { + cmd = native_cmd_conv[instr->ctx.cmd.opcode]; + if (cmd == CMD_NOT_SUPPORTED) { + dev_err(ctrl->dev, "unsupported cmd=%d\n", + instr->ctx.cmd.opcode); + ret = -EOPNOTSUPP; + break; + } + } else if (instr->type == NAND_OP_ADDR_INSTR) { + u64 addr = 0; + + if (cmd == CMD_NULL) + continue; + + if (instr->ctx.addr.naddrs > 8) { + dev_err(ctrl->dev, "unsupported naddrs=%u\n", + instr->ctx.addr.naddrs); + ret = -EOPNOTSUPP; + break; + } + + for (j = 0; j < instr->ctx.addr.naddrs; j++) + addr |= (instr->ctx.addr.addrs[j]) << (j << 3); + + if (cmd == CMD_BLOCK_ERASE) + addr <<= chip->page_shift; + else if (cmd == CMD_PARAMETER_CHANGE_COL) + addr &= ~((u64)(FC_BYTES - 1)); + + brcmnand_set_cmd_addr(mtd, addr); + brcmnand_send_cmd(host, cmd); + last_addr = addr; + last_cmd = cmd; + cmd = CMD_NULL; + brcmnand_waitfunc(chip); + + if (last_cmd == CMD_PARAMETER_READ || + last_cmd == CMD_PARAMETER_CHANGE_COL) { + /* Copy flash cache word-wise */ + u32 *flash_cache = (u32 *)ctrl->flash_cache; + + brcmnand_soc_data_bus_prepare(ctrl->soc, true); + + /* + * Must cache the FLASH_CACHE now, since changes in + * SECTOR_SIZE_1K may invalidate it + */ + for (j = 0; j < FC_WORDS; j++) + /* + * Flash cache is big endian for parameter pages, at + * least on STB SoCs + */ + flash_cache[j] = be32_to_cpu(brcmnand_read_fc(ctrl, j)); + + brcmnand_soc_data_bus_unprepare(ctrl->soc, true); + } + } else if (instr->type == NAND_OP_DATA_IN_INSTR) { + u8 *in = instr->ctx.data.buf.in; + + if (last_cmd == CMD_DEVICE_ID_READ) { + u32 val; + + if (instr->ctx.data.len > 8) { + dev_err(ctrl->dev, "unsupported len=%u\n", + instr->ctx.data.len); + ret = -EOPNOTSUPP; + break; + } + + for (j = 0; j < instr->ctx.data.len; j++) { + if (j == 0) + val = brcmnand_read_reg(ctrl, BRCMNAND_ID); + else if (j == 4) + val = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT); + + in[j] = (val >> (24 - ((j % 4) << 3))) & 0xff; + } + } else if (last_cmd == CMD_PARAMETER_READ || + last_cmd == CMD_PARAMETER_CHANGE_COL) { + u64 addr; + u32 offs; + + for (j = 0; j < instr->ctx.data.len; j++) { + addr = last_addr + j; + offs = addr & (FC_BYTES - 1); + + if (j > 0 && offs == 0) + nand_change_read_column_op(chip, addr, NULL, 0, + false); + + in[j] = ctrl->flash_cache[offs]; + } + } + } else if (instr->type == NAND_OP_WAITRDY_INSTR) { + ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0); + if (ret) + break; + } else { + dev_err(ctrl->dev, "unsupported instruction type: %d\n", instr->type); + ret = -EOPNOTSUPP; + break; + } + } + + return ret; +} + static int brcmnand_exec_op(struct nand_chip *chip, const struct nand_operation *op, bool check_only) { struct brcmnand_host *host = nand_get_controller_data(chip); + struct brcmnand_controller *ctrl = host->ctrl; struct mtd_info *mtd = nand_to_mtd(chip); u8 *status; - unsigned int i; int ret = 0; if (check_only) - return 0; + return ctrl->check_instr(chip, op); if (brcmnand_op_is_status(op)) { status = op->instrs[1].ctx.data.buf.in; @@ -2525,11 +2716,7 @@ static int brcmnand_exec_op(struct nand_chip *chip, if (op->deassert_wp) brcmnand_wp(mtd, 0); - for (i = 0; i < op->ninstrs; i++) { - ret = brcmnand_exec_instr(host, i, op); - if (ret) - break; - } + ret = ctrl->exec_instr(chip, op); if (op->deassert_wp) brcmnand_wp(mtd, 1); @@ -3142,6 +3329,15 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc) if (ret) goto err; + /* Only v5.0+ controllers have low level ops support */ + if (ctrl->nand_version >= 0x0500) { + ctrl->check_instr = brcmnand_check_instructions; + ctrl->exec_instr = brcmnand_exec_instructions; + } else { + ctrl->check_instr = brcmnand_check_instructions_legacy; + ctrl->exec_instr = brcmnand_exec_instructions_legacy; + } + /* * Most chips have this cache at a fixed offset within 'nand' block. * Some must specify this region separately. diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c index e22094e39546..97fa32d73441 100644 --- a/drivers/mtd/nand/raw/denali_pci.c +++ b/drivers/mtd/nand/raw/denali_pci.c @@ -68,7 +68,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) denali->clk_rate = 50000000; /* 50 MHz */ denali->clk_x_rate = 200000000; /* 200 MHz */ - ret = pci_request_regions(dev, DENALI_NAND_NAME); + ret = pcim_request_all_regions(dev, DENALI_NAND_NAME); if (ret) { dev_err(&dev->dev, "Spectra: Unable to request memory regions\n"); return ret; @@ -77,20 +77,18 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) denali->reg = devm_ioremap(denali->dev, csr_base, csr_len); if (!denali->reg) { dev_err(&dev->dev, "Spectra: Unable to remap memory region\n"); - ret = -ENOMEM; - goto regions_release; + return -ENOMEM; } denali->host = devm_ioremap(denali->dev, mem_base, mem_len); if (!denali->host) { dev_err(&dev->dev, "Spectra: ioremap failed!"); - ret = -ENOMEM; - goto regions_release; + return -ENOMEM; } ret = denali_init(denali); if (ret) - goto regions_release; + return ret; nsels = denali->nbanks; @@ -118,8 +116,6 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) out_remove_denali: denali_remove(denali); -regions_release: - pci_release_regions(dev); return ret; } @@ -127,7 +123,6 @@ static void denali_pci_remove(struct pci_dev *dev) { struct denali_controller *denali = pci_get_drvdata(dev); - pci_release_regions(dev); denali_remove(denali); } diff --git a/drivers/mtd/nand/raw/loongson1-nand-controller.c b/drivers/mtd/nand/raw/loongson1-nand-controller.c new file mode 100644 index 000000000000..ef8e4f9ce287 --- /dev/null +++ b/drivers/mtd/nand/raw/loongson1-nand-controller.c @@ -0,0 +1,836 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * NAND Controller Driver for Loongson-1 SoC + * + * Copyright (C) 2015-2025 Keguang Zhang <keguang.zhang@gmail.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/iopoll.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/rawnand.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/sizes.h> + +/* Loongson-1 NAND Controller Registers */ +#define LS1X_NAND_CMD 0x0 +#define LS1X_NAND_ADDR1 0x4 +#define LS1X_NAND_ADDR2 0x8 +#define LS1X_NAND_TIMING 0xc +#define LS1X_NAND_IDL 0x10 +#define LS1X_NAND_IDH_STATUS 0x14 +#define LS1X_NAND_PARAM 0x18 +#define LS1X_NAND_OP_NUM 0x1c + +/* NAND Command Register Bits */ +#define LS1X_NAND_CMD_OP_DONE BIT(10) +#define LS1X_NAND_CMD_OP_SPARE BIT(9) +#define LS1X_NAND_CMD_OP_MAIN BIT(8) +#define LS1X_NAND_CMD_STATUS BIT(7) +#define LS1X_NAND_CMD_RESET BIT(6) +#define LS1X_NAND_CMD_READID BIT(5) +#define LS1X_NAND_CMD_BLOCKS_ERASE BIT(4) +#define LS1X_NAND_CMD_ERASE BIT(3) +#define LS1X_NAND_CMD_WRITE BIT(2) +#define LS1X_NAND_CMD_READ BIT(1) +#define LS1X_NAND_CMD_VALID BIT(0) + +#define LS1X_NAND_WAIT_CYCLE_MASK GENMASK(7, 0) +#define LS1X_NAND_HOLD_CYCLE_MASK GENMASK(15, 8) +#define LS1X_NAND_CELL_SIZE_MASK GENMASK(11, 8) + +#define LS1X_NAND_COL_ADDR_CYC 2U +#define LS1X_NAND_MAX_ADDR_CYC 5U + +#define BITS_PER_WORD (4 * BITS_PER_BYTE) + +struct ls1x_nand_host; + +struct ls1x_nand_op { + char addrs[LS1X_NAND_MAX_ADDR_CYC]; + unsigned int naddrs; + unsigned int addrs_offset; + unsigned int aligned_offset; + unsigned int cmd_reg; + unsigned int row_start; + unsigned int rdy_timeout_ms; + unsigned int orig_len; + bool is_readid; + bool is_erase; + bool is_write; + bool is_read; + bool is_change_column; + size_t len; + char *buf; +}; + +struct ls1x_nand_data { + unsigned int status_field; + unsigned int op_scope_field; + unsigned int hold_cycle; + unsigned int wait_cycle; + void (*set_addr)(struct ls1x_nand_host *host, struct ls1x_nand_op *op); +}; + +struct ls1x_nand_host { + struct device *dev; + struct nand_chip chip; + struct nand_controller controller; + const struct ls1x_nand_data *data; + void __iomem *reg_base; + struct regmap *regmap; + /* DMA Engine stuff */ + dma_addr_t dma_base; + struct dma_chan *dma_chan; + dma_cookie_t dma_cookie; + struct completion dma_complete; +}; + +static const struct regmap_config ls1x_nand_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + +static int ls1x_nand_op_cmd_mapping(struct nand_chip *chip, struct ls1x_nand_op *op, u8 opcode) +{ + struct ls1x_nand_host *host = nand_get_controller_data(chip); + + op->row_start = chip->page_shift + 1; + + /* The controller abstracts the following NAND operations. */ + switch (opcode) { + case NAND_CMD_STATUS: + op->cmd_reg = LS1X_NAND_CMD_STATUS; + break; + case NAND_CMD_RESET: + op->cmd_reg = LS1X_NAND_CMD_RESET; + break; + case NAND_CMD_READID: + op->is_readid = true; + op->cmd_reg = LS1X_NAND_CMD_READID; + break; + case NAND_CMD_ERASE1: + op->is_erase = true; + op->addrs_offset = LS1X_NAND_COL_ADDR_CYC; + break; + case NAND_CMD_ERASE2: + if (!op->is_erase) + return -EOPNOTSUPP; + /* During erasing, row_start differs from the default value. */ + op->row_start = chip->page_shift; + op->cmd_reg = LS1X_NAND_CMD_ERASE; + break; + case NAND_CMD_SEQIN: + op->is_write = true; + break; + case NAND_CMD_PAGEPROG: + if (!op->is_write) + return -EOPNOTSUPP; + op->cmd_reg = LS1X_NAND_CMD_WRITE; + break; + case NAND_CMD_READ0: + op->is_read = true; + break; + case NAND_CMD_READSTART: + if (!op->is_read) + return -EOPNOTSUPP; + op->cmd_reg = LS1X_NAND_CMD_READ; + break; + case NAND_CMD_RNDOUT: + op->is_change_column = true; + break; + case NAND_CMD_RNDOUTSTART: + if (!op->is_change_column) + return -EOPNOTSUPP; + op->cmd_reg = LS1X_NAND_CMD_READ; + break; + default: + dev_dbg(host->dev, "unsupported opcode: %u\n", opcode); + return -EOPNOTSUPP; + } + + return 0; +} + +static int ls1x_nand_parse_instructions(struct nand_chip *chip, + const struct nand_subop *subop, struct ls1x_nand_op *op) +{ + unsigned int op_id; + int ret; + + for (op_id = 0; op_id < subop->ninstrs; op_id++) { + const struct nand_op_instr *instr = &subop->instrs[op_id]; + unsigned int offset, naddrs; + const u8 *addrs; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + ret = ls1x_nand_op_cmd_mapping(chip, op, instr->ctx.cmd.opcode); + if (ret < 0) + return ret; + + break; + case NAND_OP_ADDR_INSTR: + naddrs = nand_subop_get_num_addr_cyc(subop, op_id); + if (naddrs > LS1X_NAND_MAX_ADDR_CYC) + return -EOPNOTSUPP; + op->naddrs = naddrs; + offset = nand_subop_get_addr_start_off(subop, op_id); + addrs = &instr->ctx.addr.addrs[offset]; + memcpy(op->addrs + op->addrs_offset, addrs, naddrs); + break; + case NAND_OP_DATA_IN_INSTR: + case NAND_OP_DATA_OUT_INSTR: + offset = nand_subop_get_data_start_off(subop, op_id); + op->orig_len = nand_subop_get_data_len(subop, op_id); + if (instr->type == NAND_OP_DATA_IN_INSTR) + op->buf = instr->ctx.data.buf.in + offset; + else if (instr->type == NAND_OP_DATA_OUT_INSTR) + op->buf = (void *)instr->ctx.data.buf.out + offset; + + break; + case NAND_OP_WAITRDY_INSTR: + op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms; + break; + default: + break; + } + } + + return 0; +} + +static void ls1b_nand_set_addr(struct ls1x_nand_host *host, struct ls1x_nand_op *op) +{ + struct nand_chip *chip = &host->chip; + int i; + + for (i = 0; i < LS1X_NAND_MAX_ADDR_CYC; i++) { + int shift, mask, val; + + if (i < LS1X_NAND_COL_ADDR_CYC) { + shift = i * BITS_PER_BYTE; + mask = (u32)0xff << shift; + mask &= GENMASK(chip->page_shift, 0); + val = (u32)op->addrs[i] << shift; + regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val); + } else if (!op->is_change_column) { + shift = op->row_start + (i - LS1X_NAND_COL_ADDR_CYC) * BITS_PER_BYTE; + mask = (u32)0xff << shift; + val = (u32)op->addrs[i] << shift; + regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val); + + if (i == 4) { + mask = (u32)0xff >> (BITS_PER_WORD - shift); + val = (u32)op->addrs[i] >> (BITS_PER_WORD - shift); + regmap_update_bits(host->regmap, LS1X_NAND_ADDR2, mask, val); + } + } + } +} + +static void ls1c_nand_set_addr(struct ls1x_nand_host *host, struct ls1x_nand_op *op) +{ + int i; + + for (i = 0; i < LS1X_NAND_MAX_ADDR_CYC; i++) { + int shift, mask, val; + + if (i < LS1X_NAND_COL_ADDR_CYC) { + shift = i * BITS_PER_BYTE; + mask = (u32)0xff << shift; + val = (u32)op->addrs[i] << shift; + regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val); + } else if (!op->is_change_column) { + shift = (i - LS1X_NAND_COL_ADDR_CYC) * BITS_PER_BYTE; + mask = (u32)0xff << shift; + val = (u32)op->addrs[i] << shift; + regmap_update_bits(host->regmap, LS1X_NAND_ADDR2, mask, val); + } + } +} + +static void ls1x_nand_trigger_op(struct ls1x_nand_host *host, struct ls1x_nand_op *op) +{ + struct nand_chip *chip = &host->chip; + struct mtd_info *mtd = nand_to_mtd(chip); + int col0 = op->addrs[0]; + short col; + + if (!IS_ALIGNED(col0, chip->buf_align)) { + col0 = ALIGN_DOWN(op->addrs[0], chip->buf_align); + op->aligned_offset = op->addrs[0] - col0; + op->addrs[0] = col0; + } + + if (host->data->set_addr) + host->data->set_addr(host, op); + + /* set operation length */ + if (op->is_write || op->is_read || op->is_change_column) + op->len = ALIGN(op->orig_len + op->aligned_offset, chip->buf_align); + else if (op->is_erase) + op->len = 1; + else + op->len = op->orig_len; + + writel(op->len, host->reg_base + LS1X_NAND_OP_NUM); + + /* set operation area and scope */ + col = op->addrs[1] << BITS_PER_BYTE | op->addrs[0]; + if (op->orig_len && !op->is_readid) { + unsigned int op_scope = 0; + + if (col < mtd->writesize) { + op->cmd_reg |= LS1X_NAND_CMD_OP_MAIN; + op_scope = mtd->writesize; + } + + op->cmd_reg |= LS1X_NAND_CMD_OP_SPARE; + op_scope += mtd->oobsize; + + op_scope <<= __ffs(host->data->op_scope_field); + regmap_update_bits(host->regmap, LS1X_NAND_PARAM, + host->data->op_scope_field, op_scope); + } + + /* set command */ + writel(op->cmd_reg, host->reg_base + LS1X_NAND_CMD); + + /* trigger operation */ + regmap_write_bits(host->regmap, LS1X_NAND_CMD, LS1X_NAND_CMD_VALID, LS1X_NAND_CMD_VALID); +} + +static int ls1x_nand_wait_for_op_done(struct ls1x_nand_host *host, struct ls1x_nand_op *op) +{ + unsigned int val; + int ret = 0; + + if (op->rdy_timeout_ms) { + ret = regmap_read_poll_timeout(host->regmap, LS1X_NAND_CMD, + val, val & LS1X_NAND_CMD_OP_DONE, + 0, op->rdy_timeout_ms * MSEC_PER_SEC); + if (ret) + dev_err(host->dev, "operation failed\n"); + } + + return ret; +} + +static void ls1x_nand_dma_callback(void *data) +{ + struct ls1x_nand_host *host = (struct ls1x_nand_host *)data; + struct dma_chan *chan = host->dma_chan; + struct device *dev = chan->device->dev; + enum dma_status status; + + status = dmaengine_tx_status(chan, host->dma_cookie, NULL); + if (likely(status == DMA_COMPLETE)) { + dev_dbg(dev, "DMA complete with cookie=%d\n", host->dma_cookie); + complete(&host->dma_complete); + } else { + dev_err(dev, "DMA error with cookie=%d\n", host->dma_cookie); + } +} + +static int ls1x_nand_dma_transfer(struct ls1x_nand_host *host, struct ls1x_nand_op *op) +{ + struct nand_chip *chip = &host->chip; + struct dma_chan *chan = host->dma_chan; + struct device *dev = chan->device->dev; + struct dma_async_tx_descriptor *desc; + enum dma_data_direction data_dir = op->is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + enum dma_transfer_direction xfer_dir = op->is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; + void *buf = op->buf; + char *dma_buf = NULL; + dma_addr_t dma_addr; + int ret; + + if (IS_ALIGNED((uintptr_t)buf, chip->buf_align) && + IS_ALIGNED(op->orig_len, chip->buf_align)) { + dma_addr = dma_map_single(dev, buf, op->orig_len, data_dir); + if (dma_mapping_error(dev, dma_addr)) { + dev_err(dev, "failed to map DMA buffer\n"); + return -ENXIO; + } + } else if (!op->is_write) { + dma_buf = dma_alloc_coherent(dev, op->len, &dma_addr, GFP_KERNEL); + if (!dma_buf) + return -ENOMEM; + } else { + dev_err(dev, "subpage writing not supported\n"); + return -EOPNOTSUPP; + } + + desc = dmaengine_prep_slave_single(chan, dma_addr, op->len, xfer_dir, DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(dev, "failed to prepare DMA descriptor\n"); + ret = -ENOMEM; + goto err; + } + desc->callback = ls1x_nand_dma_callback; + desc->callback_param = host; + + host->dma_cookie = dmaengine_submit(desc); + ret = dma_submit_error(host->dma_cookie); + if (ret) { + dev_err(dev, "failed to submit DMA descriptor\n"); + goto err; + } + + dev_dbg(dev, "issue DMA with cookie=%d\n", host->dma_cookie); + dma_async_issue_pending(chan); + + if (!wait_for_completion_timeout(&host->dma_complete, msecs_to_jiffies(1000))) { + dmaengine_terminate_sync(chan); + reinit_completion(&host->dma_complete); + ret = -ETIMEDOUT; + goto err; + } + + if (dma_buf) + memcpy(buf, dma_buf + op->aligned_offset, op->orig_len); +err: + if (dma_buf) + dma_free_coherent(dev, op->len, dma_buf, dma_addr); + else + dma_unmap_single(dev, dma_addr, op->orig_len, data_dir); + + return ret; +} + +static int ls1x_nand_data_type_exec(struct nand_chip *chip, const struct nand_subop *subop) +{ + struct ls1x_nand_host *host = nand_get_controller_data(chip); + struct ls1x_nand_op op = {}; + int ret; + + ret = ls1x_nand_parse_instructions(chip, subop, &op); + if (ret) + return ret; + + ls1x_nand_trigger_op(host, &op); + + ret = ls1x_nand_dma_transfer(host, &op); + if (ret) + return ret; + + return ls1x_nand_wait_for_op_done(host, &op); +} + +static int ls1x_nand_misc_type_exec(struct nand_chip *chip, + const struct nand_subop *subop, struct ls1x_nand_op *op) +{ + struct ls1x_nand_host *host = nand_get_controller_data(chip); + int ret; + + ret = ls1x_nand_parse_instructions(chip, subop, op); + if (ret) + return ret; + + ls1x_nand_trigger_op(host, op); + + return ls1x_nand_wait_for_op_done(host, op); +} + +static int ls1x_nand_zerolen_type_exec(struct nand_chip *chip, const struct nand_subop *subop) +{ + struct ls1x_nand_op op = {}; + + return ls1x_nand_misc_type_exec(chip, subop, &op); +} + +static int ls1x_nand_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop) +{ + struct ls1x_nand_host *host = nand_get_controller_data(chip); + struct ls1x_nand_op op = {}; + int i, ret; + union { + char ids[5]; + struct { + int idl; + char idh; + }; + } nand_id; + + ret = ls1x_nand_misc_type_exec(chip, subop, &op); + if (ret) + return ret; + + nand_id.idl = readl(host->reg_base + LS1X_NAND_IDL); + nand_id.idh = readb(host->reg_base + LS1X_NAND_IDH_STATUS); + + for (i = 0; i < min(sizeof(nand_id.ids), op.orig_len); i++) + op.buf[i] = nand_id.ids[sizeof(nand_id.ids) - 1 - i]; + + return ret; +} + +static int ls1x_nand_read_status_type_exec(struct nand_chip *chip, const struct nand_subop *subop) +{ + struct ls1x_nand_host *host = nand_get_controller_data(chip); + struct ls1x_nand_op op = {}; + int val, ret; + + ret = ls1x_nand_misc_type_exec(chip, subop, &op); + if (ret) + return ret; + + val = readl(host->reg_base + LS1X_NAND_IDH_STATUS); + val &= ~host->data->status_field; + op.buf[0] = val << ffs(host->data->status_field); + + return ret; +} + +static const struct nand_op_parser ls1x_nand_op_parser = NAND_OP_PARSER( + NAND_OP_PARSER_PATTERN( + ls1x_nand_read_id_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)), + NAND_OP_PARSER_PATTERN( + ls1x_nand_read_status_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)), + NAND_OP_PARSER_PATTERN( + ls1x_nand_zerolen_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + ls1x_nand_zerolen_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + ls1x_nand_data_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)), + NAND_OP_PARSER_PATTERN( + ls1x_nand_data_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)), + ); + +static int ls1x_nand_is_valid_cmd(u8 opcode) +{ + if (opcode == NAND_CMD_STATUS || opcode == NAND_CMD_RESET || opcode == NAND_CMD_READID) + return 0; + + return -EOPNOTSUPP; +} + +static int ls1x_nand_is_valid_cmd_seq(u8 opcode1, u8 opcode2) +{ + if (opcode1 == NAND_CMD_RNDOUT && opcode2 == NAND_CMD_RNDOUTSTART) + return 0; + + if (opcode1 == NAND_CMD_READ0 && opcode2 == NAND_CMD_READSTART) + return 0; + + if (opcode1 == NAND_CMD_ERASE1 && opcode2 == NAND_CMD_ERASE2) + return 0; + + if (opcode1 == NAND_CMD_SEQIN && opcode2 == NAND_CMD_PAGEPROG) + return 0; + + return -EOPNOTSUPP; +} + +static int ls1x_nand_check_op(struct nand_chip *chip, const struct nand_operation *op) +{ + const struct nand_op_instr *instr1 = NULL, *instr2 = NULL; + int op_id; + + for (op_id = 0; op_id < op->ninstrs; op_id++) { + const struct nand_op_instr *instr = &op->instrs[op_id]; + + if (instr->type == NAND_OP_CMD_INSTR) { + if (!instr1) + instr1 = instr; + else if (!instr2) + instr2 = instr; + else + break; + } + } + + if (!instr1) + return -EOPNOTSUPP; + + if (!instr2) + return ls1x_nand_is_valid_cmd(instr1->ctx.cmd.opcode); + + return ls1x_nand_is_valid_cmd_seq(instr1->ctx.cmd.opcode, instr2->ctx.cmd.opcode); +} + +static int ls1x_nand_exec_op(struct nand_chip *chip, + const struct nand_operation *op, bool check_only) +{ + if (check_only) + return ls1x_nand_check_op(chip, op); + + return nand_op_parser_exec_op(chip, &ls1x_nand_op_parser, op, check_only); +} + +static int ls1x_nand_attach_chip(struct nand_chip *chip) +{ + struct ls1x_nand_host *host = nand_get_controller_data(chip); + u64 chipsize = nanddev_target_size(&chip->base); + int cell_size = 0; + + switch (chipsize) { + case SZ_128M: + cell_size = 0x0; + break; + case SZ_256M: + cell_size = 0x1; + break; + case SZ_512M: + cell_size = 0x2; + break; + case SZ_1G: + cell_size = 0x3; + break; + case SZ_2G: + cell_size = 0x4; + break; + case SZ_4G: + cell_size = 0x5; + break; + case SZ_8G: + cell_size = 0x6; + break; + case SZ_16G: + cell_size = 0x7; + break; + default: + dev_err(host->dev, "unsupported chip size: %llu MB\n", chipsize); + return -EINVAL; + } + + switch (chip->ecc.engine_type) { + case NAND_ECC_ENGINE_TYPE_NONE: + break; + case NAND_ECC_ENGINE_TYPE_SOFT: + break; + default: + return -EINVAL; + } + + /* set cell size */ + regmap_update_bits(host->regmap, LS1X_NAND_PARAM, LS1X_NAND_CELL_SIZE_MASK, + FIELD_PREP(LS1X_NAND_CELL_SIZE_MASK, cell_size)); + + regmap_update_bits(host->regmap, LS1X_NAND_TIMING, LS1X_NAND_HOLD_CYCLE_MASK, + FIELD_PREP(LS1X_NAND_HOLD_CYCLE_MASK, host->data->hold_cycle)); + + regmap_update_bits(host->regmap, LS1X_NAND_TIMING, LS1X_NAND_WAIT_CYCLE_MASK, + FIELD_PREP(LS1X_NAND_WAIT_CYCLE_MASK, host->data->wait_cycle)); + + chip->ecc.read_page_raw = nand_monolithic_read_page_raw; + chip->ecc.write_page_raw = nand_monolithic_write_page_raw; + + return 0; +} + +static const struct nand_controller_ops ls1x_nand_controller_ops = { + .exec_op = ls1x_nand_exec_op, + .attach_chip = ls1x_nand_attach_chip, +}; + +static void ls1x_nand_controller_cleanup(struct ls1x_nand_host *host) +{ + if (host->dma_chan) + dma_release_channel(host->dma_chan); +} + +static int ls1x_nand_controller_init(struct ls1x_nand_host *host) +{ + struct device *dev = host->dev; + struct dma_chan *chan; + struct dma_slave_config cfg = {}; + int ret; + + host->regmap = devm_regmap_init_mmio(dev, host->reg_base, &ls1x_nand_regmap_config); + if (IS_ERR(host->regmap)) + return dev_err_probe(dev, PTR_ERR(host->regmap), "failed to init regmap\n"); + + chan = dma_request_chan(dev, "rxtx"); + if (IS_ERR(chan)) + return dev_err_probe(dev, PTR_ERR(chan), "failed to request DMA channel\n"); + host->dma_chan = chan; + + cfg.src_addr = host->dma_base; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr = host->dma_base; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + ret = dmaengine_slave_config(host->dma_chan, &cfg); + if (ret) + return dev_err_probe(dev, ret, "failed to config DMA channel\n"); + + init_completion(&host->dma_complete); + + return 0; +} + +static int ls1x_nand_chip_init(struct ls1x_nand_host *host) +{ + struct device *dev = host->dev; + int nchips = of_get_child_count(dev->of_node); + struct device_node *chip_np; + struct nand_chip *chip = &host->chip; + struct mtd_info *mtd = nand_to_mtd(chip); + int ret; + + if (nchips != 1) + return dev_err_probe(dev, -EINVAL, "Currently one NAND chip supported\n"); + + chip_np = of_get_next_child(dev->of_node, NULL); + if (!chip_np) + return dev_err_probe(dev, -ENODEV, "failed to get child node for NAND chip\n"); + + nand_set_flash_node(chip, chip_np); + of_node_put(chip_np); + if (!mtd->name) + return dev_err_probe(dev, -EINVAL, "Missing MTD label\n"); + + nand_set_controller_data(chip, host); + chip->controller = &host->controller; + chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | NAND_BROKEN_XD; + chip->buf_align = 16; + mtd->dev.parent = dev; + mtd->owner = THIS_MODULE; + + ret = nand_scan(chip, 1); + if (ret) + return dev_err_probe(dev, ret, "failed to scan NAND chip\n"); + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + nand_cleanup(chip); + return dev_err_probe(dev, ret, "failed to register MTD device\n"); + } + + return 0; +} + +static int ls1x_nand_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct ls1x_nand_data *data; + struct ls1x_nand_host *host; + struct resource *res; + int ret; + + data = of_device_get_match_data(dev); + if (!data) + return -ENODEV; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + if (!host) + return -ENOMEM; + + host->reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(host->reg_base)) + return PTR_ERR(host->reg_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-dma"); + if (!res) + return dev_err_probe(dev, -EINVAL, "Missing 'nand-dma' in reg-names property\n"); + + host->dma_base = dma_map_resource(dev, res->start, resource_size(res), + DMA_BIDIRECTIONAL, 0); + if (dma_mapping_error(dev, host->dma_base)) + return -ENXIO; + + host->dev = dev; + host->data = data; + host->controller.ops = &ls1x_nand_controller_ops; + + nand_controller_init(&host->controller); + + ret = ls1x_nand_controller_init(host); + if (ret) + goto err; + + ret = ls1x_nand_chip_init(host); + if (ret) + goto err; + + platform_set_drvdata(pdev, host); + + return 0; +err: + ls1x_nand_controller_cleanup(host); + + return ret; +} + +static void ls1x_nand_remove(struct platform_device *pdev) +{ + struct ls1x_nand_host *host = platform_get_drvdata(pdev); + struct nand_chip *chip = &host->chip; + int ret; + + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + ls1x_nand_controller_cleanup(host); +} + +static const struct ls1x_nand_data ls1b_nand_data = { + .status_field = GENMASK(15, 8), + .hold_cycle = 0x2, + .wait_cycle = 0xc, + .set_addr = ls1b_nand_set_addr, +}; + +static const struct ls1x_nand_data ls1c_nand_data = { + .status_field = GENMASK(23, 16), + .op_scope_field = GENMASK(29, 16), + .hold_cycle = 0x2, + .wait_cycle = 0xc, + .set_addr = ls1c_nand_set_addr, +}; + +static const struct of_device_id ls1x_nand_match[] = { + { + .compatible = "loongson,ls1b-nand-controller", + .data = &ls1b_nand_data, + }, + { + .compatible = "loongson,ls1c-nand-controller", + .data = &ls1c_nand_data, + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ls1x_nand_match); + +static struct platform_driver ls1x_nand_driver = { + .probe = ls1x_nand_probe, + .remove = ls1x_nand_remove, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = ls1x_nand_match, + }, +}; + +module_platform_driver(ls1x_nand_driver); + +MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>"); +MODULE_DESCRIPTION("Loongson-1 NAND Controller Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 5eaa0be367cd..1003cf118c01 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -1863,7 +1863,12 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_ const struct nand_op_instr *instr = NULL; unsigned int op_id = 0; unsigned int len = 0; - int ret; + int ret, reg_base; + + reg_base = NAND_READ_LOCATION_0; + + if (nandc->props->qpic_version2) + reg_base = NAND_READ_LOCATION_LAST_CW_0; ret = qcom_parse_instructions(chip, subop, &q_op); if (ret) @@ -1915,14 +1920,17 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_ op_id = q_op.data_instr_idx; len = nand_subop_get_data_len(subop, op_id); - nandc_set_read_loc(chip, 0, 0, 0, len, 1); + if (nandc->props->qpic_version2) + nandc_set_read_loc_last(chip, reg_base, 0, len, 1); + else + nandc_set_read_loc_first(chip, reg_base, 0, len, 1); if (!nandc->props->qpic_version2) { qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0); qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL); } - nandc->buf_count = len; + nandc->buf_count = 512; memset(nandc->data_buffer, 0xff, nandc->buf_count); config_nand_single_cw_page_read(chip, false, 0); @@ -2360,6 +2368,7 @@ static const struct qcom_nandc_props ipq806x_nandc_props = { .supports_bam = false, .use_codeword_fixup = true, .dev_cmd_reg_start = 0x0, + .bam_offset = 0x30000, }; static const struct qcom_nandc_props ipq4019_nandc_props = { @@ -2367,6 +2376,7 @@ static const struct qcom_nandc_props ipq4019_nandc_props = { .supports_bam = true, .nandc_part_of_qpic = true, .dev_cmd_reg_start = 0x0, + .bam_offset = 0x30000, }; static const struct qcom_nandc_props ipq8074_nandc_props = { @@ -2374,6 +2384,7 @@ static const struct qcom_nandc_props ipq8074_nandc_props = { .supports_bam = true, .nandc_part_of_qpic = true, .dev_cmd_reg_start = 0x7000, + .bam_offset = 0x30000, }; static const struct qcom_nandc_props sdx55_nandc_props = { @@ -2382,6 +2393,7 @@ static const struct qcom_nandc_props sdx55_nandc_props = { .nandc_part_of_qpic = true, .qpic_version2 = true, .dev_cmd_reg_start = 0x7000, + .bam_offset = 0x30000, }; /* diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index fab371e3e9b7..162cd5f4f234 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -817,6 +817,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand, if (ret) return ret; + sunxi_nfc_randomizer_config(nand, page, false); sunxi_nfc_randomizer_enable(nand); writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP, nfc->regs + NFC_REG_CMD); @@ -1049,6 +1050,7 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand, if (ret) return ret; + sunxi_nfc_randomizer_config(nand, page, false); sunxi_nfc_randomizer_enable(nand); sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page); diff --git a/drivers/mtd/nand/spi/alliancememory.c b/drivers/mtd/nand/spi/alliancememory.c index 6046c73f8424..2ee498230ec1 100644 --- a/drivers/mtd/nand/spi/alliancememory.c +++ b/drivers/mtd/nand/spi/alliancememory.c @@ -17,20 +17,20 @@ #define AM_STATUS_ECC_MAX_CORRECTED (3 << 4) static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int am_get_eccsize(struct mtd_info *mtd) { diff --git a/drivers/mtd/nand/spi/ato.c b/drivers/mtd/nand/spi/ato.c index bb5298911137..2b4df1d917ac 100644 --- a/drivers/mtd/nand/spi/ato.c +++ b/drivers/mtd/nand/spi/ato.c @@ -14,17 +14,17 @@ static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int ato25d1ga_ooblayout_ecc(struct mtd_info *mtd, int section, diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index d16e42cf8fae..7099db7a62be 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -22,7 +22,7 @@ static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val) { - struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg, + struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(reg, spinand->scratchbuf); int ret; @@ -36,7 +36,7 @@ static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val) int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val) { - struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg, + struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(reg, spinand->scratchbuf); *spinand->scratchbuf = val; @@ -362,7 +362,7 @@ static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status) static int spinand_write_enable_op(struct spinand_device *spinand) { - struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true); + struct spi_mem_op op = SPINAND_WR_EN_DIS_1S_0_0_OP(true); return spi_mem_exec_op(spinand->spimem, &op); } @@ -372,7 +372,7 @@ static int spinand_load_page_op(struct spinand_device *spinand, { struct nand_device *nand = spinand_to_nand(spinand); unsigned int row = nanddev_pos_to_row(nand, &req->pos); - struct spi_mem_op op = SPINAND_PAGE_READ_OP(row); + struct spi_mem_op op = SPINAND_PAGE_READ_1S_1S_0_OP(row); return spi_mem_exec_op(spinand->spimem, &op); } @@ -519,7 +519,7 @@ static int spinand_program_op(struct spinand_device *spinand, { struct nand_device *nand = spinand_to_nand(spinand); unsigned int row = nanddev_pos_to_row(nand, &req->pos); - struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row); + struct spi_mem_op op = SPINAND_PROG_EXEC_1S_1S_0_OP(row); return spi_mem_exec_op(spinand->spimem, &op); } @@ -529,7 +529,7 @@ static int spinand_erase_op(struct spinand_device *spinand, { struct nand_device *nand = spinand_to_nand(spinand); unsigned int row = nanddev_pos_to_row(nand, pos); - struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row); + struct spi_mem_op op = SPINAND_BLK_ERASE_1S_1S_0_OP(row); return spi_mem_exec_op(spinand->spimem, &op); } @@ -549,8 +549,8 @@ static int spinand_erase_op(struct spinand_device *spinand, int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us, unsigned long poll_delay_us, u8 *s) { - struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS, - spinand->scratchbuf); + struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(REG_STATUS, + spinand->scratchbuf); u8 status; int ret; @@ -583,7 +583,7 @@ out: static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr, u8 ndummy, u8 *buf) { - struct spi_mem_op op = SPINAND_READID_OP( + struct spi_mem_op op = SPINAND_READID_1S_1S_1S_OP( naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN); int ret; @@ -596,7 +596,7 @@ static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr, static int spinand_reset_op(struct spinand_device *spinand) { - struct spi_mem_op op = SPINAND_RESET_OP; + struct spi_mem_op op = SPINAND_RESET_1S_0_0_OP; int ret; ret = spi_mem_exec_op(spinand->spimem, &op); diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c index a164d821464d..9e286612a296 100644 --- a/drivers/mtd/nand/spi/esmt.c +++ b/drivers/mtd/nand/spi/esmt.c @@ -18,18 +18,18 @@ (CFG_OTP_ENABLE | ESMT_F50L1G41LB_CFG_OTP_PROTECT) static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); /* * OOB spare area map (64 bytes) @@ -137,8 +137,8 @@ static int f50l1g41lb_user_otp_info(struct spinand_device *spinand, size_t len, static int f50l1g41lb_otp_lock(struct spinand_device *spinand, loff_t from, size_t len) { - struct spi_mem_op write_op = SPINAND_WR_EN_DIS_OP(true); - struct spi_mem_op exec_op = SPINAND_PROG_EXEC_OP(0); + struct spi_mem_op write_op = SPINAND_WR_EN_DIS_1S_0_0_OP(true); + struct spi_mem_op exec_op = SPINAND_PROG_EXEC_1S_1S_0_OP(0); u8 status; int ret; @@ -199,7 +199,7 @@ static const struct spinand_info esmt_c8_spinand_table[] = { SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)), SPINAND_INFO("F50D1G41LB", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11, 0x7f, - 0x7f, 0x7f), + 0x7f), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), NAND_ECCREQ(1, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, diff --git a/drivers/mtd/nand/spi/foresee.c b/drivers/mtd/nand/spi/foresee.c index ecd5f6bffa33..7c61644bfb10 100644 --- a/drivers/mtd/nand/spi/foresee.c +++ b/drivers/mtd/nand/spi/foresee.c @@ -12,18 +12,18 @@ #define SPINAND_MFR_FORESEE 0xCD static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int f35sqa002g_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c index d620bb02a20a..cb1d316fc4d8 100644 --- a/drivers/mtd/nand/spi/gigadevice.c +++ b/drivers/mtd/nand/spi/gigadevice.c @@ -24,44 +24,44 @@ #define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4) static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(read_cache_variants_f, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP_3A(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP_3A(0, 0, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(0, 0, NULL, 0)); static SPINAND_OP_VARIANTS(read_cache_variants_1gq5, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(read_cache_variants_2gq5, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int gd5fxgq4xa_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) @@ -185,7 +185,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand, u8 status) { u8 status2; - struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2, + struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(GD5FXGQXXEXXG_REG_STATUS2, spinand->scratchbuf); int ret; @@ -228,7 +228,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand, u8 status) { u8 status2; - struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2, + struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(GD5FXGQXXEXXG_REG_STATUS2, spinand->scratchbuf); int ret; diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c index 1ef08ad850a2..eeaf5bf9f082 100644 --- a/drivers/mtd/nand/spi/macronix.c +++ b/drivers/mtd/nand/spi/macronix.c @@ -28,18 +28,18 @@ struct macronix_priv { }; static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int mx35lfxge4ab_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) @@ -148,8 +148,8 @@ static int macronix_set_cont_read(struct spinand_device *spinand, bool enable) static int macronix_set_read_retry(struct spinand_device *spinand, unsigned int retry_mode) { - struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MACRONIX_FEATURE_ADDR_READ_RETRY, - spinand->scratchbuf); + struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(MACRONIX_FEATURE_ADDR_READ_RETRY, + spinand->scratchbuf); *spinand->scratchbuf = retry_mode; return spi_mem_exec_op(spinand->spimem, &op); diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c index 691f8a2e0791..8281c9d3f4f7 100644 --- a/drivers/mtd/nand/spi/micron.c +++ b/drivers/mtd/nand/spi/micron.c @@ -35,33 +35,33 @@ (CFG_OTP_ENABLE | MICRON_MT29F2G01ABAGD_CFG_OTP_STATE) static SPINAND_OP_VARIANTS(quadio_read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(x4_write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(x4_update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); /* Micron MT29F2G01AAAED Device */ static SPINAND_OP_VARIANTS(x4_read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(x1_write_cache_variants, - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(x1_update_cache_variants, - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) @@ -137,7 +137,7 @@ static const struct mtd_ooblayout_ops micron_4_ooblayout = { static int micron_select_target(struct spinand_device *spinand, unsigned int target) { - struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MICRON_DIE_SELECT_REG, + struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(MICRON_DIE_SELECT_REG, spinand->scratchbuf); if (target > 1) @@ -251,8 +251,8 @@ static int mt29f2g01abagd_user_otp_info(struct spinand_device *spinand, static int mt29f2g01abagd_otp_lock(struct spinand_device *spinand, loff_t from, size_t len) { - struct spi_mem_op write_op = SPINAND_WR_EN_DIS_OP(true); - struct spi_mem_op exec_op = SPINAND_PROG_EXEC_OP(0); + struct spi_mem_op write_op = SPINAND_WR_EN_DIS_1S_0_0_OP(true); + struct spi_mem_op exec_op = SPINAND_PROG_EXEC_1S_1S_0_OP(0); u8 status; int ret; diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c index 6e7cc6995380..4670bac41245 100644 --- a/drivers/mtd/nand/spi/paragon.c +++ b/drivers/mtd/nand/spi/paragon.c @@ -22,20 +22,20 @@ static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int pn26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section, diff --git a/drivers/mtd/nand/spi/skyhigh.c b/drivers/mtd/nand/spi/skyhigh.c index 961df0d74984..51d61785df61 100644 --- a/drivers/mtd/nand/spi/skyhigh.c +++ b/drivers/mtd/nand/spi/skyhigh.c @@ -17,20 +17,20 @@ #define SKYHIGH_CONFIG_PROTECT_EN BIT(1) static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int skyhigh_spinand_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c index 2e2106b2705f..4c6923047aeb 100644 --- a/drivers/mtd/nand/spi/toshiba.c +++ b/drivers/mtd/nand/spi/toshiba.c @@ -15,28 +15,28 @@ #define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4) static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_x4_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_x4_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); /* * Backward compatibility for 1st generation Serial NAND devices * which don't support Quad Program Load operation. */ static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int tx58cxgxsxraix_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) @@ -73,7 +73,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand, { struct nand_device *nand = spinand_to_nand(spinand); u8 mbf = 0; - struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf); + struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(0x30, spinand->scratchbuf); switch (status & STATUS_ECC_MASK) { case STATUS_ECC_NO_BITFLIPS: diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c index 8394a1b1fb0c..19f8dd4a6370 100644 --- a/drivers/mtd/nand/spi/winbond.c +++ b/drivers/mtd/nand/spi/winbond.c @@ -23,34 +23,50 @@ * "X4" in the core is equivalent to "quad output" in the datasheets. */ -static SPINAND_OP_VARIANTS(read_cache_dtr_variants, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_DTR_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ), - SPINAND_PAGE_READ_FROM_CACHE_X4_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ), - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_DTR_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ), - SPINAND_PAGE_READ_FROM_CACHE_X2_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0, 54 * HZ_PER_MHZ)); +static SPINAND_OP_VARIANTS(read_cache_octal_variants, + SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(0, 2, NULL, 0, 105 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 16, NULL, 0, 86 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(0, 1, NULL, 0, 133 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); + +static SPINAND_OP_VARIANTS(write_cache_octal_variants, + SPINAND_PROG_LOAD_1S_8S_8S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_8S_OP(0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); + +static SPINAND_OP_VARIANTS(update_cache_octal_variants, + SPINAND_PROG_LOAD_1S_8S_8S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); + +static SPINAND_OP_VARIANTS(read_cache_dual_quad_dtr_variants, + SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 54 * HZ_PER_MHZ)); static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int w25m02gv_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) @@ -141,12 +157,47 @@ static const struct mtd_ooblayout_ops w25n02kv_ooblayout = { .free = w25n02kv_ooblayout_free, }; +static int w35n01jw_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + if (section > 7) + return -ERANGE; + + region->offset = (16 * section) + 12; + region->length = 4; + + return 0; +} + +static int w35n01jw_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + if (section > 7) + return -ERANGE; + + region->offset = 16 * section; + region->length = 12; + + /* Extract BBM */ + if (!section) { + region->offset += 2; + region->length -= 2; + } + + return 0; +} + +static const struct mtd_ooblayout_ops w35n01jw_ooblayout = { + .ecc = w35n01jw_ooblayout_ecc, + .free = w35n01jw_ooblayout_free, +}; + static int w25n02kv_ecc_get_status(struct spinand_device *spinand, u8 status) { struct nand_device *nand = spinand_to_nand(spinand); u8 mbf = 0; - struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf); + struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(0x30, spinand->scratchbuf); switch (status & STATUS_ECC_MASK) { case STATUS_ECC_NO_BITFLIPS: @@ -213,7 +264,7 @@ static const struct spinand_info winbond_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbc, 0x21), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), NAND_ECCREQ(1, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_dtr_variants, + SPINAND_INFO_OP_VARIANTS(&read_cache_dual_quad_dtr_variants, &write_cache_variants, &update_cache_variants), 0, @@ -227,6 +278,33 @@ static const struct spinand_info winbond_spinand_table[] = { &update_cache_variants), 0, SPINAND_ECCINFO(&w25n01kv_ooblayout, w25n02kv_ecc_get_status)), + SPINAND_INFO("W35N01JW", /* 1.8V */ + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdc, 0x21), + NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 1, 1), + NAND_ECCREQ(1, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants, + &write_cache_octal_variants, + &update_cache_octal_variants), + 0, + SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)), + SPINAND_INFO("W35N02JW", /* 1.8V */ + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x22), + NAND_MEMORG(1, 4096, 128, 64, 512, 10, 2, 1, 1), + NAND_ECCREQ(1, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants, + &write_cache_octal_variants, + &update_cache_octal_variants), + 0, + SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)), + SPINAND_INFO("W35N04JW", /* 1.8V */ + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x23), + NAND_MEMORG(1, 4096, 128, 64, 512, 10, 4, 1, 1), + NAND_ECCREQ(1, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants, + &write_cache_octal_variants, + &update_cache_octal_variants), + 0, + SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)), /* 2G-bit densities */ SPINAND_INFO("W25M02GV", /* 2x1G-bit 3.3V */ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab, 0x21), @@ -242,7 +320,7 @@ static const struct spinand_info winbond_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbf, 0x22), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 2, 1), NAND_ECCREQ(1, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_dtr_variants, + SPINAND_INFO_OP_VARIANTS(&read_cache_dual_quad_dtr_variants, &write_cache_variants, &update_cache_variants), 0, diff --git a/drivers/mtd/nand/spi/xtx.c b/drivers/mtd/nand/spi/xtx.c index 3f539ca0de86..37336d5958a9 100644 --- a/drivers/mtd/nand/spi/xtx.c +++ b/drivers/mtd/nand/spi/xtx.c @@ -23,20 +23,20 @@ #define XT26XXXD_STATUS_ECC_UNCOR_ERROR (2) static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int xt26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c index 55644a3cd88c..e97f5cbd9aad 100644 --- a/drivers/mtd/spi-nor/macronix.c +++ b/drivers/mtd/spi-nor/macronix.c @@ -58,6 +58,31 @@ macronix_qpp4b_post_sfdp_fixups(struct spi_nor *nor) return 0; } +static int +mx25l3255e_late_init_fixups(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter *params = nor->params; + + /* + * SFDP of MX25L3255E is JESD216, which does not include the Quad + * Enable bit Requirement in BFPT. As a result, during BFPT parsing, + * the quad_enable method is not set to spi_nor_sr1_bit6_quad_enable. + * Therefore, it is necessary to correct this setting by late_init. + */ + params->quad_enable = spi_nor_sr1_bit6_quad_enable; + + /* + * In addition, MX25L3255E also supports 1-4-4 page program in 3-byte + * address mode. However, since the 3-byte address 1-4-4 page program + * is not defined in SFDP, it needs to be configured in late_init. + */ + params->hwcaps.mask |= SNOR_HWCAPS_PP_1_4_4; + spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_1_4_4], + SPINOR_OP_PP_1_4_4, SNOR_PROTO_1_4_4); + + return 0; +} + static const struct spi_nor_fixups mx25l25635_fixups = { .post_bfpt = mx25l25635_post_bfpt_fixups, .post_sfdp = macronix_qpp4b_post_sfdp_fixups, @@ -67,6 +92,10 @@ static const struct spi_nor_fixups macronix_qpp4b_fixups = { .post_sfdp = macronix_qpp4b_post_sfdp_fixups, }; +static const struct spi_nor_fixups mx25l3255e_fixups = { + .late_init = mx25l3255e_late_init_fixups, +}; + static const struct flash_info macronix_nor_parts[] = { { .id = SNOR_ID(0xc2, 0x20, 0x10), @@ -88,10 +117,8 @@ static const struct flash_info macronix_nor_parts[] = { .name = "mx25l8005", .size = SZ_1M, }, { + /* MX25L1606E */ .id = SNOR_ID(0xc2, 0x20, 0x15), - .name = "mx25l1606e", - .size = SZ_2M, - .no_sfdp_flags = SECT_4K, }, { .id = SNOR_ID(0xc2, 0x20, 0x16), .name = "mx25l3205d", @@ -103,29 +130,21 @@ static const struct flash_info macronix_nor_parts[] = { .size = SZ_8M, .no_sfdp_flags = SECT_4K, }, { + /* MX25L12805D */ .id = SNOR_ID(0xc2, 0x20, 0x18), - .name = "mx25l12805d", - .size = SZ_16M, .flags = SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP, - .no_sfdp_flags = SECT_4K, }, { + /* MX25L25635E, MX25L25645G */ .id = SNOR_ID(0xc2, 0x20, 0x19), - .name = "mx25l25635e", - .size = SZ_32M, - .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, .fixups = &mx25l25635_fixups }, { + /* MX66L51235F */ .id = SNOR_ID(0xc2, 0x20, 0x1a), - .name = "mx66l51235f", - .size = SZ_64M, - .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, .fixup_flags = SPI_NOR_4B_OPCODES, .fixups = ¯onix_qpp4b_fixups, }, { + /* MX66L1G45G */ .id = SNOR_ID(0xc2, 0x20, 0x1b), - .name = "mx66l1g45g", - .size = SZ_128M, - .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, .fixups = ¯onix_qpp4b_fixups, }, { /* MX66L2G45G */ @@ -167,29 +186,16 @@ static const struct flash_info macronix_nor_parts[] = { .size = SZ_16M, .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, }, { + /* MX25U51245G */ .id = SNOR_ID(0xc2, 0x25, 0x3a), - .name = "mx25u51245g", - .size = SZ_64M, - .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, - .fixup_flags = SPI_NOR_4B_OPCODES, - .fixups = ¯onix_qpp4b_fixups, - }, { - .id = SNOR_ID(0xc2, 0x25, 0x3a), - .name = "mx66u51235f", - .size = SZ_64M, - .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, - .fixup_flags = SPI_NOR_4B_OPCODES, .fixups = ¯onix_qpp4b_fixups, }, { /* MX66U1G45G */ .id = SNOR_ID(0xc2, 0x25, 0x3b), .fixups = ¯onix_qpp4b_fixups, }, { + /* MX66U2G45G */ .id = SNOR_ID(0xc2, 0x25, 0x3c), - .name = "mx66u2g45g", - .size = SZ_256M, - .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, - .fixup_flags = SPI_NOR_4B_OPCODES, .fixups = ¯onix_qpp4b_fixups, }, { .id = SNOR_ID(0xc2, 0x26, 0x18), @@ -215,15 +221,14 @@ static const struct flash_info macronix_nor_parts[] = { .size = SZ_4M, .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, }, { + /* MX25UW51245G */ .id = SNOR_ID(0xc2, 0x81, 0x3a), - .name = "mx25uw51245g", .n_banks = 4, .flags = SPI_NOR_RWW, }, { + /* MX25L3255E */ .id = SNOR_ID(0xc2, 0x9e, 0x16), - .name = "mx25l3255e", - .size = SZ_4M, - .no_sfdp_flags = SECT_4K, + .fixups = &mx25l3255e_fixups, }, /* * This spares us of adding new flash entries for flashes that can be diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.c b/drivers/net/wireless/intel/iwlwifi/mld/d3.c index 339b148d6793..c776543cbba5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.c @@ -1757,7 +1757,7 @@ iwl_mld_send_proto_offload(struct iwl_mld *mld, addrconf_addr_solict_mult(&wowlan_data->target_ipv6_addrs[i], &solicited_addr); - for (j = 0; j < c; j++) + for (j = 0; j < n_nsc && j < c; j++) if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr, &solicited_addr) == 0) break; diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index 4c253b433bf7..4904097dfd49 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig @@ -3,7 +3,7 @@ config NVME_TARGET tristate "NVMe Target support" depends on BLOCK - depends on CONFIGFS_FS + select CONFIGFS_FS select NVME_KEYRING if NVME_TARGET_TCP_TLS select KEYS if NVME_TARGET_TCP_TLS select SGL_ALLOC diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index aedd0e2dcd89..0edd639898a6 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -25,6 +25,7 @@ #include <linux/serial_core.h> #include <linux/sysfs.h> #include <linux/random.h> +#include <linux/kexec_handover.h> #include <asm/setup.h> /* for COMMAND_LINE_SIZE */ #include <asm/page.h> @@ -875,6 +876,36 @@ void __init early_init_dt_check_for_usable_mem_range(void) memblock_add(rgn[i].base, rgn[i].size); } +/** + * early_init_dt_check_kho - Decode info required for kexec handover from DT + */ +static void __init early_init_dt_check_kho(void) +{ + unsigned long node = chosen_node_offset; + u64 fdt_start, fdt_size, scratch_start, scratch_size; + const __be32 *p; + int l; + + if (!IS_ENABLED(CONFIG_KEXEC_HANDOVER) || (long)node < 0) + return; + + p = of_get_flat_dt_prop(node, "linux,kho-fdt", &l); + if (l != (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32)) + return; + + fdt_start = dt_mem_next_cell(dt_root_addr_cells, &p); + fdt_size = dt_mem_next_cell(dt_root_addr_cells, &p); + + p = of_get_flat_dt_prop(node, "linux,kho-scratch", &l); + if (l != (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32)) + return; + + scratch_start = dt_mem_next_cell(dt_root_addr_cells, &p); + scratch_size = dt_mem_next_cell(dt_root_addr_cells, &p); + + kho_populate(fdt_start, fdt_size, scratch_start, scratch_size); +} + #ifdef CONFIG_SERIAL_EARLYCON int __init early_init_dt_scan_chosen_stdout(void) @@ -1169,6 +1200,9 @@ void __init early_init_dt_scan_nodes(void) /* Handle linux,usable-memory-range property */ early_init_dt_check_for_usable_mem_range(); + + /* Handle kexec handover */ + early_init_dt_check_kho(); } bool __init early_init_dt_scan(void *dt_virt, phys_addr_t dt_phys) diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c index 5b924597a4de..1ee2d31816ae 100644 --- a/drivers/of/kexec.c +++ b/drivers/of/kexec.c @@ -264,6 +264,43 @@ static inline int setup_ima_buffer(const struct kimage *image, void *fdt, } #endif /* CONFIG_IMA_KEXEC */ +static int kho_add_chosen(const struct kimage *image, void *fdt, int chosen_node) +{ + int ret = 0; +#ifdef CONFIG_KEXEC_HANDOVER + phys_addr_t fdt_mem = 0; + phys_addr_t fdt_len = 0; + phys_addr_t scratch_mem = 0; + phys_addr_t scratch_len = 0; + + ret = fdt_delprop(fdt, chosen_node, "linux,kho-fdt"); + if (ret && ret != -FDT_ERR_NOTFOUND) + return ret; + ret = fdt_delprop(fdt, chosen_node, "linux,kho-scratch"); + if (ret && ret != -FDT_ERR_NOTFOUND) + return ret; + + if (!image->kho.fdt || !image->kho.scratch) + return 0; + + fdt_mem = image->kho.fdt; + fdt_len = PAGE_SIZE; + scratch_mem = image->kho.scratch->mem; + scratch_len = image->kho.scratch->bufsz; + + pr_debug("Adding kho metadata to DT"); + + ret = fdt_appendprop_addrrange(fdt, 0, chosen_node, "linux,kho-fdt", + fdt_mem, fdt_len); + if (ret) + return ret; + ret = fdt_appendprop_addrrange(fdt, 0, chosen_node, "linux,kho-scratch", + scratch_mem, scratch_len); + +#endif /* CONFIG_KEXEC_HANDOVER */ + return ret; +} + /* * of_kexec_alloc_and_setup_fdt - Alloc and setup a new Flattened Device Tree * @@ -414,6 +451,11 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image, #endif } + /* Add kho metadata if this is a KHO image */ + ret = kho_add_chosen(image, fdt, chosen_node); + if (ret) + goto out; + /* add bootargs */ if (cmdline) { ret = fdt_setprop_string(fdt, chosen_node, "bootargs", cmdline); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index e1eaa24559a2..ef5d655a0052 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -50,6 +50,7 @@ #include <linux/irqdomain.h> #include <linux/acpi.h> #include <linux/sizes.h> +#include <linux/of_irq.h> #include <asm/mshyperv.h> /* @@ -309,8 +310,6 @@ struct pci_packet { void (*completion_func)(void *context, struct pci_response *resp, int resp_packet_size); void *compl_ctxt; - - struct pci_message message[]; }; /* @@ -817,9 +816,17 @@ static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain, int ret; fwspec.fwnode = domain->parent->fwnode; - fwspec.param_count = 2; - fwspec.param[0] = hwirq; - fwspec.param[1] = IRQ_TYPE_EDGE_RISING; + if (is_of_node(fwspec.fwnode)) { + /* SPI lines for OF translations start at offset 32 */ + fwspec.param_count = 3; + fwspec.param[0] = 0; + fwspec.param[1] = hwirq - 32; + fwspec.param[2] = IRQ_TYPE_EDGE_RISING; + } else { + fwspec.param_count = 2; + fwspec.param[0] = hwirq; + fwspec.param[1] = IRQ_TYPE_EDGE_RISING; + } ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); if (ret) @@ -887,10 +894,44 @@ static const struct irq_domain_ops hv_pci_domain_ops = { .activate = hv_pci_vec_irq_domain_activate, }; +#ifdef CONFIG_OF + +static struct irq_domain *hv_pci_of_irq_domain_parent(void) +{ + struct device_node *parent; + struct irq_domain *domain; + + parent = of_irq_find_parent(hv_get_vmbus_root_device()->of_node); + if (!parent) + return NULL; + domain = irq_find_host(parent); + of_node_put(parent); + + return domain; +} + +#endif + +#ifdef CONFIG_ACPI + +static struct irq_domain *hv_pci_acpi_irq_domain_parent(void) +{ + acpi_gsi_domain_disp_fn gsi_domain_disp_fn; + + gsi_domain_disp_fn = acpi_get_gsi_dispatcher(); + if (!gsi_domain_disp_fn) + return NULL; + return irq_find_matching_fwnode(gsi_domain_disp_fn(0), + DOMAIN_BUS_ANY); +} + +#endif + static int hv_pci_irqchip_init(void) { static struct hv_pci_chip_data *chip_data; struct fwnode_handle *fn = NULL; + struct irq_domain *irq_domain_parent = NULL; int ret = -ENOMEM; chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL); @@ -907,9 +948,24 @@ static int hv_pci_irqchip_init(void) * way to ensure that all the corresponding devices are also gone and * no interrupts will be generated. */ - hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR, - fn, &hv_pci_domain_ops, - chip_data); +#ifdef CONFIG_ACPI + if (!acpi_disabled) + irq_domain_parent = hv_pci_acpi_irq_domain_parent(); +#endif +#ifdef CONFIG_OF + if (!irq_domain_parent) + irq_domain_parent = hv_pci_of_irq_domain_parent(); +#endif + if (!irq_domain_parent) { + WARN_ONCE(1, "Invalid firmware configuration for VMBus interrupts\n"); + ret = -EINVAL; + goto free_chip; + } + + hv_msi_gic_irq_domain = irq_domain_create_hierarchy(irq_domain_parent, 0, + HV_PCI_MSI_SPI_NR, + fn, &hv_pci_domain_ops, + chip_data); if (!hv_msi_gic_irq_domain) { pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n"); @@ -1438,7 +1494,7 @@ static int hv_read_config_block(struct pci_dev *pdev, void *buf, memset(&pkt, 0, sizeof(pkt)); pkt.pkt.completion_func = hv_pci_read_config_compl; pkt.pkt.compl_ctxt = &comp_pkt; - read_blk = (struct pci_read_block *)&pkt.pkt.message; + read_blk = (struct pci_read_block *)pkt.buf; read_blk->message_type.type = PCI_READ_BLOCK; read_blk->wslot.slot = devfn_to_wslot(pdev->devfn); read_blk->block_id = block_id; @@ -1518,7 +1574,7 @@ static int hv_write_config_block(struct pci_dev *pdev, void *buf, memset(&pkt, 0, sizeof(pkt)); pkt.pkt.completion_func = hv_pci_write_config_compl; pkt.pkt.compl_ctxt = &comp_pkt; - write_blk = (struct pci_write_block *)&pkt.pkt.message; + write_blk = (struct pci_write_block *)pkt.buf; write_blk->message_type.type = PCI_WRITE_BLOCK; write_blk->wslot.slot = devfn_to_wslot(pdev->devfn); write_blk->block_id = block_id; @@ -1599,7 +1655,7 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev, return; } memset(&ctxt, 0, sizeof(ctxt)); - int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message; + int_pkt = (struct pci_delete_interrupt *)ctxt.buffer; int_pkt->message_type.type = PCI_DELETE_INTERRUPT_MESSAGE; int_pkt->wslot.slot = hpdev->desc.win_slot.slot; @@ -2482,7 +2538,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus, comp_pkt.hpdev = hpdev; pkt.init_packet.compl_ctxt = &comp_pkt; pkt.init_packet.completion_func = q_resource_requirements; - res_req = (struct pci_child_message *)&pkt.init_packet.message; + res_req = (struct pci_child_message *)pkt.buffer; res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS; res_req->wslot.slot = desc->win_slot.slot; @@ -2860,7 +2916,7 @@ static void hv_eject_device_work(struct work_struct *work) pci_destroy_slot(hpdev->pci_slot); memset(&ctxt, 0, sizeof(ctxt)); - ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; + ejct_pkt = (struct pci_eject_response *)ctxt.buffer; ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot; vmbus_sendpacket(hbus->hdev->channel, ejct_pkt, @@ -3118,7 +3174,7 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev, init_completion(&comp_pkt.host_event); pkt->completion_func = hv_pci_generic_compl; pkt->compl_ctxt = &comp_pkt; - version_req = (struct pci_version_request *)&pkt->message; + version_req = (struct pci_version_request *)(pkt + 1); version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION; for (i = 0; i < num_version; i++) { @@ -3340,7 +3396,7 @@ enter_d0_retry: init_completion(&comp_pkt.host_event); pkt->completion_func = hv_pci_generic_compl; pkt->compl_ctxt = &comp_pkt; - d0_entry = (struct pci_bus_d0_entry *)&pkt->message; + d0_entry = (struct pci_bus_d0_entry *)(pkt + 1); d0_entry->message_type.type = PCI_BUS_D0ENTRY; d0_entry->mmio_base = hbus->mem_config->start; @@ -3498,20 +3554,20 @@ static int hv_send_resources_allocated(struct hv_device *hdev) if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) { res_assigned = - (struct pci_resources_assigned *)&pkt->message; + (struct pci_resources_assigned *)(pkt + 1); res_assigned->message_type.type = PCI_RESOURCES_ASSIGNED; res_assigned->wslot.slot = hpdev->desc.win_slot.slot; } else { res_assigned2 = - (struct pci_resources_assigned2 *)&pkt->message; + (struct pci_resources_assigned2 *)(pkt + 1); res_assigned2->message_type.type = PCI_RESOURCES_ASSIGNED2; res_assigned2->wslot.slot = hpdev->desc.win_slot.slot; } put_pcichild(hpdev); - ret = vmbus_sendpacket(hdev->channel, &pkt->message, + ret = vmbus_sendpacket(hdev->channel, pkt + 1, size_res, (unsigned long)pkt, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); @@ -3809,6 +3865,7 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs) struct pci_packet teardown_packet; u8 buffer[sizeof(struct pci_message)]; } pkt; + struct pci_message *msg; struct hv_pci_compl comp_pkt; struct hv_pci_dev *hpdev, *tmp; unsigned long flags; @@ -3854,10 +3911,10 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs) init_completion(&comp_pkt.host_event); pkt.teardown_packet.completion_func = hv_pci_generic_compl; pkt.teardown_packet.compl_ctxt = &comp_pkt; - pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT; + msg = (struct pci_message *)pkt.buffer; + msg->type = PCI_BUS_D0EXIT; - ret = vmbus_sendpacket_getid(chan, &pkt.teardown_packet.message, - sizeof(struct pci_message), + ret = vmbus_sendpacket_getid(chan, msg, sizeof(*msg), (unsigned long)&pkt.teardown_packet, &trans_id, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); diff --git a/drivers/power/supply/qcom_pmi8998_charger.c b/drivers/power/supply/qcom_pmi8998_charger.c index 74a8d8ed8d9f..c2f8f2e24398 100644 --- a/drivers/power/supply/qcom_pmi8998_charger.c +++ b/drivers/power/supply/qcom_pmi8998_charger.c @@ -2,7 +2,7 @@ /* * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2023, Linaro Ltd. - * Author: Caleb Connolly <caleb.connolly@linaro.org> + * Author: Casey Connolly <casey.connolly@linaro.org> * * This driver is for the switch-mode battery charger and boost * hardware found in pmi8998 and related PMICs. @@ -1045,6 +1045,6 @@ static struct platform_driver qcom_spmi_smb2 = { module_platform_driver(qcom_spmi_smb2); -MODULE_AUTHOR("Caleb Connolly <caleb.connolly@linaro.org>"); +MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>"); MODULE_DESCRIPTION("Qualcomm SMB2 Charger Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index cbf531d0ba68..995cfeca972b 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -98,18 +98,6 @@ MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); #endif /* - * An internal DMA coherent buffer - */ -struct mport_dma_buf { - void *ib_base; - dma_addr_t ib_phys; - u32 ib_size; - u64 ib_rio_base; - bool ib_map; - struct file *filp; -}; - -/* * Internal memory mapping structure */ enum rio_mport_map_dir { @@ -131,14 +119,6 @@ struct rio_mport_mapping { struct file *filp; }; -struct rio_mport_dma_map { - int valid; - u64 length; - void *vaddr; - dma_addr_t paddr; -}; - -#define MPORT_MAX_DMA_BUFS 16 #define MPORT_EVENT_DEPTH 10 /* diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index 9544b8ee0c96..46daf32ea13b 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c @@ -1775,19 +1775,6 @@ struct dma_chan *rio_request_mport_dma(struct rio_mport *mport) EXPORT_SYMBOL_GPL(rio_request_mport_dma); /** - * rio_request_dma - request RapidIO capable DMA channel that supports - * specified target RapidIO device. - * @rdev: RIO device associated with DMA transfer - * - * Returns pointer to allocated DMA channel or NULL if failed. - */ -struct dma_chan *rio_request_dma(struct rio_dev *rdev) -{ - return rio_request_mport_dma(rdev->net->hport); -} -EXPORT_SYMBOL_GPL(rio_request_dma); - -/** * rio_release_dma - release specified DMA channel * @dchan: DMA channel to release */ @@ -1834,57 +1821,9 @@ struct dma_async_tx_descriptor *rio_dma_prep_xfer(struct dma_chan *dchan, } EXPORT_SYMBOL_GPL(rio_dma_prep_xfer); -/** - * rio_dma_prep_slave_sg - RapidIO specific wrapper - * for device_prep_slave_sg callback defined by DMAENGINE. - * @rdev: RIO device control structure - * @dchan: DMA channel to configure - * @data: RIO specific data descriptor - * @direction: DMA data transfer direction (TO or FROM the device) - * @flags: dmaengine defined flags - * - * Initializes RapidIO capable DMA channel for the specified data transfer. - * Uses DMA channel private extension to pass information related to remote - * target RIO device. - * - * Returns: pointer to DMA transaction descriptor if successful, - * error-valued pointer or NULL if failed. - */ -struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev, - struct dma_chan *dchan, struct rio_dma_data *data, - enum dma_transfer_direction direction, unsigned long flags) -{ - return rio_dma_prep_xfer(dchan, rdev->destid, data, direction, flags); -} -EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg); - #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ /** - * rio_find_mport - find RIO mport by its ID - * @mport_id: number (ID) of mport device - * - * Given a RIO mport number, the desired mport is located - * in the global list of mports. If the mport is found, a pointer to its - * data structure is returned. If no mport is found, %NULL is returned. - */ -struct rio_mport *rio_find_mport(int mport_id) -{ - struct rio_mport *port; - - mutex_lock(&rio_mport_list_lock); - list_for_each_entry(port, &rio_mports, node) { - if (port->id == mport_id) - goto found; - } - port = NULL; -found: - mutex_unlock(&rio_mport_list_lock); - - return port; -} - -/** * rio_register_scan - enumeration/discovery method registration interface * @mport_id: mport device ID for which fabric scan routine has to be set * (RIO_MPORT_ANY = set for all available mports) @@ -1962,48 +1901,6 @@ err_out: EXPORT_SYMBOL_GPL(rio_register_scan); /** - * rio_unregister_scan - removes enumeration/discovery method from mport - * @mport_id: mport device ID for which fabric scan routine has to be - * unregistered (RIO_MPORT_ANY = apply to all mports that use - * the specified scan_ops) - * @scan_ops: enumeration/discovery operations structure - * - * Removes enumeration or discovery method assigned to the specified mport - * device. If RIO_MPORT_ANY is specified, removes the specified operations from - * all mports that have them attached. - */ -int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops) -{ - struct rio_mport *port; - struct rio_scan_node *scan; - - pr_debug("RIO: %s for mport_id=%d\n", __func__, mport_id); - - if (mport_id != RIO_MPORT_ANY && mport_id >= RIO_MAX_MPORTS) - return -EINVAL; - - mutex_lock(&rio_mport_list_lock); - - list_for_each_entry(port, &rio_mports, node) - if (port->id == mport_id || - (mport_id == RIO_MPORT_ANY && port->nscan == scan_ops)) - port->nscan = NULL; - - list_for_each_entry(scan, &rio_scans, node) { - if (scan->mport_id == mport_id) { - list_del(&scan->node); - kfree(scan); - break; - } - } - - mutex_unlock(&rio_mport_list_lock); - - return 0; -} -EXPORT_SYMBOL_GPL(rio_unregister_scan); - -/** * rio_mport_scan - execute enumeration/discovery on the specified mport * @mport_id: number (ID) of mport device */ diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h index f482de0d0370..a0e2a09ddb8e 100644 --- a/drivers/rapidio/rio.h +++ b/drivers/rapidio/rio.h @@ -41,9 +41,7 @@ extern void rio_del_device(struct rio_dev *rdev, enum rio_device_state state); extern int rio_enable_rx_tx_port(struct rio_mport *port, int local, u16 destid, u8 hopcount, u8 port_num); extern int rio_register_scan(int mport_id, struct rio_scan *scan_ops); -extern int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops); extern void rio_attach_device(struct rio_dev *rdev); -extern struct rio_mport *rio_find_mport(int mport_id); extern int rio_mport_scan(int mport_id); /* Structures internal to the RIO core code */ diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c index 9135227301c8..97287e838ce1 100644 --- a/drivers/rapidio/rio_cm.c +++ b/drivers/rapidio/rio_cm.c @@ -198,12 +198,6 @@ struct cm_peer { struct rio_dev *rdev; }; -struct rio_cm_work { - struct work_struct work; - struct cm_dev *cm; - void *data; -}; - struct conn_req { struct list_head node; u32 destid; /* requester destID */ diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile index 5ff4e2fee4ab..1c7598b8475d 100644 --- a/drivers/remoteproc/Makefile +++ b/drivers/remoteproc/Makefile @@ -36,7 +36,7 @@ obj-$(CONFIG_RCAR_REMOTEPROC) += rcar_rproc.o obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o -obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o -obj-$(CONFIG_TI_K3_M4_REMOTEPROC) += ti_k3_m4_remoteproc.o -obj-$(CONFIG_TI_K3_R5_REMOTEPROC) += ti_k3_r5_remoteproc.o +obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o ti_k3_common.o +obj-$(CONFIG_TI_K3_M4_REMOTEPROC) += ti_k3_m4_remoteproc.o ti_k3_common.o +obj-$(CONFIG_TI_K3_R5_REMOTEPROC) += ti_k3_r5_remoteproc.o ti_k3_common.o obj-$(CONFIG_XLNX_R5_REMOTEPROC) += xlnx_r5_remoteproc.o diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c index 90cb1fc13e71..5ee622bf5352 100644 --- a/drivers/remoteproc/imx_dsp_rproc.c +++ b/drivers/remoteproc/imx_dsp_rproc.c @@ -36,9 +36,18 @@ module_param_named(no_mailboxes, no_mailboxes, int, 0644); MODULE_PARM_DESC(no_mailboxes, "There is no mailbox between cores, so ignore remote proc reply after start, default is 0 (off)."); +/* Flag indicating that the remote is up and running */ #define REMOTE_IS_READY BIT(0) +/* Flag indicating that the host should wait for a firmware-ready response */ +#define WAIT_FW_READY BIT(1) #define REMOTE_READY_WAIT_MAX_RETRIES 500 +/* + * This flag is set in the DSP resource table's features field to indicate + * that the firmware requires the host NOT to wait for a FW_READY response. + */ +#define FEATURE_DONT_WAIT_FW_READY BIT(0) + /* att flags */ /* DSP own area */ #define ATT_OWN BIT(31) @@ -73,6 +82,10 @@ MODULE_PARM_DESC(no_mailboxes, #define IMX8ULP_SIP_HIFI_XRDC 0xc200000e +#define FW_RSC_NXP_S_MAGIC ((uint32_t)'n' << 24 | \ + (uint32_t)'x' << 16 | \ + (uint32_t)'p' << 8 | \ + (uint32_t)'s') /* * enum - Predefined Mailbox Messages * @@ -139,6 +152,24 @@ struct imx_dsp_rproc_dcfg { int (*reset)(struct imx_dsp_rproc *priv); }; +/** + * struct fw_rsc_imx_dsp - i.MX DSP specific info + * + * @len: length of the resource entry + * @magic_num: 32-bit magic number + * @version: version of data structure + * @features: feature flags supported by the i.MX DSP firmware + * + * This represents a DSP-specific resource in the firmware's + * resource table, providing information on supported features. + */ +struct fw_rsc_imx_dsp { + uint32_t len; + uint32_t magic_num; + uint32_t version; + uint32_t features; +} __packed; + static const struct imx_rproc_att imx_dsp_rproc_att_imx8qm[] = { /* dev addr , sys addr , size , flags */ { 0x596e8000, 0x556e8000, 0x00008000, ATT_OWN }, @@ -297,6 +328,66 @@ static int imx_dsp_rproc_ready(struct rproc *rproc) return -ETIMEDOUT; } +/** + * imx_dsp_rproc_handle_rsc() - Handle DSP-specific resource table entries + * @rproc: remote processor instance + * @rsc_type: resource type identifier + * @rsc: pointer to the resource entry + * @offset: offset of the resource entry + * @avail: available space in the resource table + * + * Parse the DSP-specific resource entry and update flags accordingly. + * If the WAIT_FW_READY feature is set, the host must wait for the firmware + * to signal readiness before proceeding with execution. + * + * Return: RSC_HANDLED if processed successfully, RSC_IGNORED otherwise. + */ +static int imx_dsp_rproc_handle_rsc(struct rproc *rproc, u32 rsc_type, + void *rsc, int offset, int avail) +{ + struct imx_dsp_rproc *priv = rproc->priv; + struct fw_rsc_imx_dsp *imx_dsp_rsc = rsc; + struct device *dev = rproc->dev.parent; + + if (!imx_dsp_rsc) { + dev_dbg(dev, "Invalid fw_rsc_imx_dsp.\n"); + return RSC_IGNORED; + } + + /* Make sure resource isn't truncated */ + if (sizeof(struct fw_rsc_imx_dsp) > avail || + sizeof(struct fw_rsc_imx_dsp) != imx_dsp_rsc->len) { + dev_dbg(dev, "Resource fw_rsc_imx_dsp is truncated.\n"); + return RSC_IGNORED; + } + + /* + * If FW_RSC_NXP_S_MAGIC number is not found then + * wait for fw_ready reply (default work flow) + */ + if (imx_dsp_rsc->magic_num != FW_RSC_NXP_S_MAGIC) { + dev_dbg(dev, "Invalid resource table magic number.\n"); + return RSC_IGNORED; + } + + /* + * For now, in struct fw_rsc_imx_dsp, version 0, + * only FEATURE_DONT_WAIT_FW_READY is valid. + * + * When adding new features, please upgrade version. + */ + if (imx_dsp_rsc->version > 0) { + dev_warn(dev, "Unexpected fw_rsc_imx_dsp version %d.\n", + imx_dsp_rsc->version); + return RSC_IGNORED; + } + + if (imx_dsp_rsc->features & FEATURE_DONT_WAIT_FW_READY) + priv->flags &= ~WAIT_FW_READY; + + return RSC_HANDLED; +} + /* * Start function for rproc_ops * @@ -335,8 +426,8 @@ static int imx_dsp_rproc_start(struct rproc *rproc) if (ret) dev_err(dev, "Failed to enable remote core!\n"); - else - ret = imx_dsp_rproc_ready(rproc); + else if (priv->flags & WAIT_FW_READY) + return imx_dsp_rproc_ready(rproc); return ret; } @@ -939,6 +1030,7 @@ static const struct rproc_ops imx_dsp_rproc_ops = { .kick = imx_dsp_rproc_kick, .load = imx_dsp_rproc_elf_load_segments, .parse_fw = imx_dsp_rproc_parse_fw, + .handle_rsc = imx_dsp_rproc_handle_rsc, .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table, .sanity_check = rproc_elf_sanity_check, .get_boot_addr = rproc_elf_get_boot_addr, @@ -1058,6 +1150,8 @@ static int imx_dsp_rproc_probe(struct platform_device *pdev) priv = rproc->priv; priv->rproc = rproc; priv->dsp_dcfg = dsp_dcfg; + /* By default, host waits for fw_ready reply */ + priv->flags |= WAIT_FW_READY; if (no_mailboxes) imx_dsp_rproc_mbox_init = imx_dsp_rproc_mbox_no_alloc; diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c index b989718776bd..2b52b403eb3f 100644 --- a/drivers/remoteproc/qcom_wcnss_iris.c +++ b/drivers/remoteproc/qcom_wcnss_iris.c @@ -196,6 +196,7 @@ struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo) err_device_del: device_del(&iris->dev); + put_device(&iris->dev); return ERR_PTR(ret); } @@ -203,4 +204,5 @@ err_device_del: void qcom_iris_remove(struct qcom_iris *iris) { device_del(&iris->dev); + put_device(&iris->dev); } diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index b21eedefff87..81b2ccf988e8 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -1617,7 +1617,7 @@ static int rproc_attach(struct rproc *rproc) ret = rproc_set_rsc_table(rproc); if (ret) { dev_err(dev, "can't load resource table: %d\n", ret); - goto unprepare_device; + goto clean_up_resources; } /* reset max_notifyid */ @@ -1634,7 +1634,7 @@ static int rproc_attach(struct rproc *rproc) ret = rproc_handle_resources(rproc, rproc_loading_handlers); if (ret) { dev_err(dev, "Failed to process resources: %d\n", ret); - goto unprepare_device; + goto clean_up_resources; } /* Allocate carveout resources associated to rproc */ @@ -1653,9 +1653,9 @@ static int rproc_attach(struct rproc *rproc) clean_up_resources: rproc_resource_cleanup(rproc); -unprepare_device: /* release HW resources if needed */ rproc_unprepare_device(rproc); + kfree(rproc->clean_table); disable_iommu: rproc_disable_iommu(rproc); return ret; @@ -2025,7 +2025,6 @@ int rproc_shutdown(struct rproc *rproc) kfree(rproc->cached_table); rproc->cached_table = NULL; rproc->table_ptr = NULL; - rproc->table_sz = 0; out: mutex_unlock(&rproc->lock); return ret; diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c index b02b36a3f515..431648607d53 100644 --- a/drivers/remoteproc/stm32_rproc.c +++ b/drivers/remoteproc/stm32_rproc.c @@ -835,6 +835,7 @@ static int stm32_rproc_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct stm32_rproc *ddata; struct device_node *np = dev->of_node; + const char *fw_name; struct rproc *rproc; unsigned int state; int ret; @@ -843,7 +844,12 @@ static int stm32_rproc_probe(struct platform_device *pdev) if (ret) return ret; - rproc = devm_rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata)); + /* Look for an optional firmware name */ + ret = rproc_of_parse_firmware(dev, 0, &fw_name); + if (ret < 0 && ret != -EINVAL) + return ret; + + rproc = devm_rproc_alloc(dev, np->name, &st_rproc_ops, fw_name, sizeof(*ddata)); if (!rproc) return -ENOMEM; diff --git a/drivers/remoteproc/ti_k3_common.c b/drivers/remoteproc/ti_k3_common.c new file mode 100644 index 000000000000..d5dccc81d460 --- /dev/null +++ b/drivers/remoteproc/ti_k3_common.c @@ -0,0 +1,551 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * TI K3 Remote Processor(s) driver common code + * + * Refactored out of ti_k3_r5_remoteproc.c, ti_k3_dsp_remoteproc.c and + * ti_k3_m4_remoteproc.c. + * + * ti_k3_r5_remoteproc.c: + * Copyright (C) 2017-2022 Texas Instruments Incorporated - https://www.ti.com/ + * Suman Anna <s-anna@ti.com> + * + * ti_k3_dsp_remoteproc.c: + * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ + * Suman Anna <s-anna@ti.com> + * + * ti_k3_m4_remoteproc.c: + * Copyright (C) 2021-2024 Texas Instruments Incorporated - https://www.ti.com/ + * Hari Nagalla <hnagalla@ti.com> + */ + +#include <linux/io.h> +#include <linux/mailbox_client.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_reserved_mem.h> +#include <linux/omap-mailbox.h> +#include <linux/platform_device.h> +#include <linux/remoteproc.h> +#include <linux/reset.h> +#include <linux/slab.h> + +#include "omap_remoteproc.h" +#include "remoteproc_internal.h" +#include "ti_sci_proc.h" +#include "ti_k3_common.h" + +/** + * k3_rproc_mbox_callback() - inbound mailbox message handler + * @client: mailbox client pointer used for requesting the mailbox channel + * @data: mailbox payload + * + * This handler is invoked by the K3 mailbox driver whenever a mailbox + * message is received. Usually, the mailbox payload simply contains + * the index of the virtqueue that is kicked by the remote processor, + * and we let remoteproc core handle it. + * + * In addition to virtqueue indices, we also have some out-of-band values + * that indicate different events. Those values are deliberately very + * large so they don't coincide with virtqueue indices. + */ +void k3_rproc_mbox_callback(struct mbox_client *client, void *data) +{ + struct k3_rproc *kproc = container_of(client, struct k3_rproc, client); + struct device *dev = kproc->rproc->dev.parent; + struct rproc *rproc = kproc->rproc; + u32 msg = (u32)(uintptr_t)(data); + + dev_dbg(dev, "mbox msg: 0x%x\n", msg); + + switch (msg) { + case RP_MBOX_CRASH: + /* + * remoteproc detected an exception, but error recovery is not + * supported. So, just log this for now + */ + dev_err(dev, "K3 rproc %s crashed\n", rproc->name); + break; + case RP_MBOX_ECHO_REPLY: + dev_info(dev, "received echo reply from %s\n", rproc->name); + break; + default: + /* silently handle all other valid messages */ + if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG) + return; + if (msg > rproc->max_notifyid) { + dev_dbg(dev, "dropping unknown message 0x%x", msg); + return; + } + /* msg contains the index of the triggered vring */ + if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE) + dev_dbg(dev, "no message was found in vqid %d\n", msg); + } +} +EXPORT_SYMBOL_GPL(k3_rproc_mbox_callback); + +/* + * Kick the remote processor to notify about pending unprocessed messages. + * The vqid usage is not used and is inconsequential, as the kick is performed + * through a simulated GPIO (a bit in an IPC interrupt-triggering register), + * the remote processor is expected to process both its Tx and Rx virtqueues. + */ +void k3_rproc_kick(struct rproc *rproc, int vqid) +{ + struct k3_rproc *kproc = rproc->priv; + struct device *dev = kproc->dev; + u32 msg = (u32)vqid; + int ret; + + /* + * Send the index of the triggered virtqueue in the mailbox payload. + * NOTE: msg is cast to uintptr_t to prevent compiler warnings when + * void* is 64bit. It is safely cast back to u32 in the mailbox driver. + */ + ret = mbox_send_message(kproc->mbox, (void *)(uintptr_t)msg); + if (ret < 0) + dev_err(dev, "failed to send mailbox message, status = %d\n", + ret); +} +EXPORT_SYMBOL_GPL(k3_rproc_kick); + +/* Put the remote processor into reset */ +int k3_rproc_reset(struct k3_rproc *kproc) +{ + struct device *dev = kproc->dev; + int ret; + + if (kproc->data->uses_lreset) { + ret = reset_control_assert(kproc->reset); + if (ret) + dev_err(dev, "local-reset assert failed (%pe)\n", ERR_PTR(ret)); + } else { + ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, + kproc->ti_sci_id); + if (ret) + dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret)); + } + + return ret; +} +EXPORT_SYMBOL_GPL(k3_rproc_reset); + +/* Release the remote processor from reset */ +int k3_rproc_release(struct k3_rproc *kproc) +{ + struct device *dev = kproc->dev; + int ret; + + if (kproc->data->uses_lreset) { + ret = reset_control_deassert(kproc->reset); + if (ret) { + dev_err(dev, "local-reset deassert failed, (%pe)\n", ERR_PTR(ret)); + if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, + kproc->ti_sci_id)) + dev_warn(dev, "module-reset assert back failed\n"); + } + } else { + ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci, + kproc->ti_sci_id); + if (ret) + dev_err(dev, "module-reset deassert failed (%pe)\n", ERR_PTR(ret)); + } + + return ret; +} +EXPORT_SYMBOL_GPL(k3_rproc_release); + +int k3_rproc_request_mbox(struct rproc *rproc) +{ + struct k3_rproc *kproc = rproc->priv; + struct mbox_client *client = &kproc->client; + struct device *dev = kproc->dev; + int ret; + + client->dev = dev; + client->tx_done = NULL; + client->rx_callback = k3_rproc_mbox_callback; + client->tx_block = false; + client->knows_txdone = false; + + kproc->mbox = mbox_request_channel(client, 0); + if (IS_ERR(kproc->mbox)) + return dev_err_probe(dev, PTR_ERR(kproc->mbox), + "mbox_request_channel failed\n"); + + /* + * Ping the remote processor, this is only for sanity-sake for now; + * there is no functional effect whatsoever. + * + * Note that the reply will _not_ arrive immediately: this message + * will wait in the mailbox fifo until the remote processor is booted. + */ + ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST); + if (ret < 0) { + dev_err(dev, "mbox_send_message failed (%pe)\n", ERR_PTR(ret)); + mbox_free_channel(kproc->mbox); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(k3_rproc_request_mbox); + +/* + * The K3 DSP and M4 cores have a local reset that affects only the CPU, and a + * generic module reset that powers on the device and allows the internal + * memories to be accessed while the local reset is asserted. This function is + * used to release the global reset on remote cores to allow loading into the + * internal RAMs. The .prepare() ops is invoked by remoteproc core before any + * firmware loading, and is followed by the .start() ops after loading to + * actually let the remote cores to run. + */ +int k3_rproc_prepare(struct rproc *rproc) +{ + struct k3_rproc *kproc = rproc->priv; + struct device *dev = kproc->dev; + int ret; + + /* If the core is running already no need to deassert the module reset */ + if (rproc->state == RPROC_DETACHED) + return 0; + + /* + * Ensure the local reset is asserted so the core doesn't + * execute bogus code when the module reset is released. + */ + if (kproc->data->uses_lreset) { + ret = k3_rproc_reset(kproc); + if (ret) + return ret; + + ret = reset_control_status(kproc->reset); + if (ret <= 0) { + dev_err(dev, "local reset still not asserted\n"); + return ret; + } + } + + ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci, + kproc->ti_sci_id); + if (ret) { + dev_err(dev, "could not deassert module-reset for internal RAM loading\n"); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(k3_rproc_prepare); + +/* + * This function implements the .unprepare() ops and performs the complimentary + * operations to that of the .prepare() ops. The function is used to assert the + * global reset on applicable K3 DSP and M4 cores. This completes the second + * portion of powering down the remote core. The cores themselves are only + * halted in the .stop() callback through the local reset, and the .unprepare() + * ops is invoked by the remoteproc core after the remoteproc is stopped to + * balance the global reset. + */ +int k3_rproc_unprepare(struct rproc *rproc) +{ + struct k3_rproc *kproc = rproc->priv; + struct device *dev = kproc->dev; + int ret; + + /* If the core is going to be detached do not assert the module reset */ + if (rproc->state == RPROC_DETACHED) + return 0; + + ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, + kproc->ti_sci_id); + if (ret) { + dev_err(dev, "module-reset assert failed\n"); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(k3_rproc_unprepare); + +/* + * Power up the remote processor. + * + * This function will be invoked only after the firmware for this rproc + * was loaded, parsed successfully, and all of its resource requirements + * were met. This callback is invoked only in remoteproc mode. + */ +int k3_rproc_start(struct rproc *rproc) +{ + struct k3_rproc *kproc = rproc->priv; + + return k3_rproc_release(kproc); +} +EXPORT_SYMBOL_GPL(k3_rproc_start); + +/* + * Stop the remote processor. + * + * This function puts the remote processor into reset, and finishes processing + * of any pending messages. This callback is invoked only in remoteproc mode. + */ +int k3_rproc_stop(struct rproc *rproc) +{ + struct k3_rproc *kproc = rproc->priv; + + return k3_rproc_reset(kproc); +} +EXPORT_SYMBOL_GPL(k3_rproc_stop); + +/* + * Attach to a running remote processor (IPC-only mode) + * + * The rproc attach callback is a NOP. The remote processor is already booted, + * and all required resources have been acquired during probe routine, so there + * is no need to issue any TI-SCI commands to boot the remote cores in IPC-only + * mode. This callback is invoked only in IPC-only mode and exists because + * rproc_validate() checks for its existence. + */ +int k3_rproc_attach(struct rproc *rproc) { return 0; } +EXPORT_SYMBOL_GPL(k3_rproc_attach); + +/* + * Detach from a running remote processor (IPC-only mode) + * + * The rproc detach callback is a NOP. The remote processor is not stopped and + * will be left in booted state in IPC-only mode. This callback is invoked only + * in IPC-only mode and exists for sanity sake + */ +int k3_rproc_detach(struct rproc *rproc) { return 0; } +EXPORT_SYMBOL_GPL(k3_rproc_detach); + +/* + * This function implements the .get_loaded_rsc_table() callback and is used + * to provide the resource table for a booted remote processor in IPC-only + * mode. The remote processor firmwares follow a design-by-contract approach + * and are expected to have the resource table at the base of the DDR region + * reserved for firmware usage. This provides flexibility for the remote + * processor to be booted by different bootloaders that may or may not have the + * ability to publish the resource table address and size through a DT + * property. + */ +struct resource_table *k3_get_loaded_rsc_table(struct rproc *rproc, + size_t *rsc_table_sz) +{ + struct k3_rproc *kproc = rproc->priv; + struct device *dev = kproc->dev; + + if (!kproc->rmem[0].cpu_addr) { + dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found"); + return ERR_PTR(-ENOMEM); + } + + /* + * NOTE: The resource table size is currently hard-coded to a maximum + * of 256 bytes. The most common resource table usage for K3 firmwares + * is to only have the vdev resource entry and an optional trace entry. + * The exact size could be computed based on resource table address, but + * the hard-coded value suffices to support the IPC-only mode. + */ + *rsc_table_sz = 256; + return (__force struct resource_table *)kproc->rmem[0].cpu_addr; +} +EXPORT_SYMBOL_GPL(k3_get_loaded_rsc_table); + +/* + * Custom function to translate a remote processor device address (internal + * RAMs only) to a kernel virtual address. The remote processors can access + * their RAMs at either an internal address visible only from a remote + * processor, or at the SoC-level bus address. Both these addresses need to be + * looked through for translation. The translated addresses can be used either + * by the remoteproc core for loading (when using kernel remoteproc loader), or + * by any rpmsg bus drivers. + */ +void *k3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) +{ + struct k3_rproc *kproc = rproc->priv; + void __iomem *va = NULL; + phys_addr_t bus_addr; + u32 dev_addr, offset; + size_t size; + int i; + + if (len == 0) + return NULL; + + for (i = 0; i < kproc->num_mems; i++) { + bus_addr = kproc->mem[i].bus_addr; + dev_addr = kproc->mem[i].dev_addr; + size = kproc->mem[i].size; + + /* handle rproc-view addresses */ + if (da >= dev_addr && ((da + len) <= (dev_addr + size))) { + offset = da - dev_addr; + va = kproc->mem[i].cpu_addr + offset; + return (__force void *)va; + } + + /* handle SoC-view addresses */ + if (da >= bus_addr && (da + len) <= (bus_addr + size)) { + offset = da - bus_addr; + va = kproc->mem[i].cpu_addr + offset; + return (__force void *)va; + } + } + + /* handle static DDR reserved memory regions */ + for (i = 0; i < kproc->num_rmems; i++) { + dev_addr = kproc->rmem[i].dev_addr; + size = kproc->rmem[i].size; + + if (da >= dev_addr && ((da + len) <= (dev_addr + size))) { + offset = da - dev_addr; + va = kproc->rmem[i].cpu_addr + offset; + return (__force void *)va; + } + } + + return NULL; +} +EXPORT_SYMBOL_GPL(k3_rproc_da_to_va); + +int k3_rproc_of_get_memories(struct platform_device *pdev, + struct k3_rproc *kproc) +{ + const struct k3_rproc_dev_data *data = kproc->data; + struct device *dev = &pdev->dev; + struct resource *res; + int num_mems = 0; + int i; + + num_mems = data->num_mems; + kproc->mem = devm_kcalloc(kproc->dev, num_mems, + sizeof(*kproc->mem), GFP_KERNEL); + if (!kproc->mem) + return -ENOMEM; + + for (i = 0; i < num_mems; i++) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + data->mems[i].name); + if (!res) { + dev_err(dev, "found no memory resource for %s\n", + data->mems[i].name); + return -EINVAL; + } + if (!devm_request_mem_region(dev, res->start, + resource_size(res), + dev_name(dev))) { + dev_err(dev, "could not request %s region for resource\n", + data->mems[i].name); + return -EBUSY; + } + + kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start, + resource_size(res)); + if (!kproc->mem[i].cpu_addr) { + dev_err(dev, "failed to map %s memory\n", + data->mems[i].name); + return -ENOMEM; + } + kproc->mem[i].bus_addr = res->start; + kproc->mem[i].dev_addr = data->mems[i].dev_addr; + kproc->mem[i].size = resource_size(res); + + dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n", + data->mems[i].name, &kproc->mem[i].bus_addr, + kproc->mem[i].size, kproc->mem[i].cpu_addr, + kproc->mem[i].dev_addr); + } + kproc->num_mems = num_mems; + + return 0; +} +EXPORT_SYMBOL_GPL(k3_rproc_of_get_memories); + +void k3_mem_release(void *data) +{ + struct device *dev = data; + + of_reserved_mem_device_release(dev); +} +EXPORT_SYMBOL_GPL(k3_mem_release); + +int k3_reserved_mem_init(struct k3_rproc *kproc) +{ + struct device *dev = kproc->dev; + struct device_node *np = dev->of_node; + struct device_node *rmem_np; + struct reserved_mem *rmem; + int num_rmems; + int ret, i; + + num_rmems = of_property_count_elems_of_size(np, "memory-region", + sizeof(phandle)); + if (num_rmems < 0) { + dev_err(dev, "device does not reserved memory regions (%d)\n", + num_rmems); + return -EINVAL; + } + if (num_rmems < 2) { + dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n", + num_rmems); + return -EINVAL; + } + + /* use reserved memory region 0 for vring DMA allocations */ + ret = of_reserved_mem_device_init_by_idx(dev, np, 0); + if (ret) { + dev_err(dev, "device cannot initialize DMA pool (%d)\n", ret); + return ret; + } + ret = devm_add_action_or_reset(dev, k3_mem_release, dev); + if (ret) + return ret; + + num_rmems--; + kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL); + if (!kproc->rmem) + return -ENOMEM; + + /* use remaining reserved memory regions for static carveouts */ + for (i = 0; i < num_rmems; i++) { + rmem_np = of_parse_phandle(np, "memory-region", i + 1); + if (!rmem_np) + return -EINVAL; + + rmem = of_reserved_mem_lookup(rmem_np); + of_node_put(rmem_np); + if (!rmem) + return -EINVAL; + + kproc->rmem[i].bus_addr = rmem->base; + /* 64-bit address regions currently not supported */ + kproc->rmem[i].dev_addr = (u32)rmem->base; + kproc->rmem[i].size = rmem->size; + kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size); + if (!kproc->rmem[i].cpu_addr) { + dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n", + i + 1, &rmem->base, &rmem->size); + return -ENOMEM; + } + + dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n", + i + 1, &kproc->rmem[i].bus_addr, + kproc->rmem[i].size, kproc->rmem[i].cpu_addr, + kproc->rmem[i].dev_addr); + } + kproc->num_rmems = num_rmems; + + return 0; +} +EXPORT_SYMBOL_GPL(k3_reserved_mem_init); + +void k3_release_tsp(void *data) +{ + struct ti_sci_proc *tsp = data; + + ti_sci_proc_release(tsp); +} +EXPORT_SYMBOL_GPL(k3_release_tsp); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("TI K3 common Remoteproc code"); diff --git a/drivers/remoteproc/ti_k3_common.h b/drivers/remoteproc/ti_k3_common.h new file mode 100644 index 000000000000..aee3c28dbe51 --- /dev/null +++ b/drivers/remoteproc/ti_k3_common.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * TI K3 Remote Processor(s) driver common code + * + * Refactored out of ti_k3_r5_remoteproc.c, ti_k3_dsp_remoteproc.c and + * ti_k3_m4_remoteproc.c. + * + * ti_k3_r5_remoteproc.c: + * Copyright (C) 2017-2022 Texas Instruments Incorporated - https://www.ti.com/ + * Suman Anna <s-anna@ti.com> + * + * ti_k3_dsp_remoteproc.c: + * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ + * Suman Anna <s-anna@ti.com> + * + * ti_k3_m4_remoteproc.c: + * Copyright (C) 2021-2024 Texas Instruments Incorporated - https://www.ti.com/ + * Hari Nagalla <hnagalla@ti.com> + */ + +#ifndef REMOTEPROC_TI_K3_COMMON_H +#define REMOTEPROC_TI_K3_COMMON_H + +#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1) + +/** + * struct k3_rproc_mem - internal memory structure + * @cpu_addr: MPU virtual address of the memory region + * @bus_addr: Bus address used to access the memory region + * @dev_addr: Device address of the memory region from remote processor view + * @size: Size of the memory region + */ +struct k3_rproc_mem { + void __iomem *cpu_addr; + phys_addr_t bus_addr; + u32 dev_addr; + size_t size; +}; + +/** + * struct k3_rproc_mem_data - memory definitions for a remote processor + * @name: name for this memory entry + * @dev_addr: device address for the memory entry + */ +struct k3_rproc_mem_data { + const char *name; + const u32 dev_addr; +}; + +/** + * struct k3_rproc_dev_data - device data structure for a remote processor + * @mems: pointer to memory definitions for a remote processor + * @num_mems: number of memory regions in @mems + * @boot_align_addr: boot vector address alignment granularity + * @uses_lreset: flag to denote the need for local reset management + */ +struct k3_rproc_dev_data { + const struct k3_rproc_mem_data *mems; + u32 num_mems; + u32 boot_align_addr; + bool uses_lreset; +}; + +/** + * struct k3_rproc - k3 remote processor driver structure + * @dev: cached device pointer + * @rproc: remoteproc device handle + * @mem: internal memory regions data + * @num_mems: number of internal memory regions + * @rmem: reserved memory regions data + * @num_rmems: number of reserved memory regions + * @reset: reset control handle + * @data: pointer to DSP-specific device data + * @tsp: TI-SCI processor control handle + * @ti_sci: TI-SCI handle + * @ti_sci_id: TI-SCI device identifier + * @mbox: mailbox channel handle + * @client: mailbox client to request the mailbox channel + * @priv: void pointer to carry any private data + */ +struct k3_rproc { + struct device *dev; + struct rproc *rproc; + struct k3_rproc_mem *mem; + int num_mems; + struct k3_rproc_mem *rmem; + int num_rmems; + struct reset_control *reset; + const struct k3_rproc_dev_data *data; + struct ti_sci_proc *tsp; + const struct ti_sci_handle *ti_sci; + u32 ti_sci_id; + struct mbox_chan *mbox; + struct mbox_client client; + void *priv; +}; + +void k3_rproc_mbox_callback(struct mbox_client *client, void *data); +void k3_rproc_kick(struct rproc *rproc, int vqid); +int k3_rproc_reset(struct k3_rproc *kproc); +int k3_rproc_release(struct k3_rproc *kproc); +int k3_rproc_request_mbox(struct rproc *rproc); +int k3_rproc_prepare(struct rproc *rproc); +int k3_rproc_unprepare(struct rproc *rproc); +int k3_rproc_start(struct rproc *rproc); +int k3_rproc_stop(struct rproc *rproc); +int k3_rproc_attach(struct rproc *rproc); +int k3_rproc_detach(struct rproc *rproc); +struct resource_table *k3_get_loaded_rsc_table(struct rproc *rproc, + size_t *rsc_table_sz); +void *k3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, + bool *is_iomem); +int k3_rproc_of_get_memories(struct platform_device *pdev, + struct k3_rproc *kproc); +void k3_mem_release(void *data); +int k3_reserved_mem_init(struct k3_rproc *kproc); +void k3_release_tsp(void *data); +#endif /* REMOTEPROC_TI_K3_COMMON_H */ diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c index a695890254ff..7a72933bd403 100644 --- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c +++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c @@ -20,291 +20,7 @@ #include "omap_remoteproc.h" #include "remoteproc_internal.h" #include "ti_sci_proc.h" - -#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1) - -/** - * struct k3_dsp_mem - internal memory structure - * @cpu_addr: MPU virtual address of the memory region - * @bus_addr: Bus address used to access the memory region - * @dev_addr: Device address of the memory region from DSP view - * @size: Size of the memory region - */ -struct k3_dsp_mem { - void __iomem *cpu_addr; - phys_addr_t bus_addr; - u32 dev_addr; - size_t size; -}; - -/** - * struct k3_dsp_mem_data - memory definitions for a DSP - * @name: name for this memory entry - * @dev_addr: device address for the memory entry - */ -struct k3_dsp_mem_data { - const char *name; - const u32 dev_addr; -}; - -/** - * struct k3_dsp_dev_data - device data structure for a DSP - * @mems: pointer to memory definitions for a DSP - * @num_mems: number of memory regions in @mems - * @boot_align_addr: boot vector address alignment granularity - * @uses_lreset: flag to denote the need for local reset management - */ -struct k3_dsp_dev_data { - const struct k3_dsp_mem_data *mems; - u32 num_mems; - u32 boot_align_addr; - bool uses_lreset; -}; - -/** - * struct k3_dsp_rproc - k3 DSP remote processor driver structure - * @dev: cached device pointer - * @rproc: remoteproc device handle - * @mem: internal memory regions data - * @num_mems: number of internal memory regions - * @rmem: reserved memory regions data - * @num_rmems: number of reserved memory regions - * @reset: reset control handle - * @data: pointer to DSP-specific device data - * @tsp: TI-SCI processor control handle - * @ti_sci: TI-SCI handle - * @ti_sci_id: TI-SCI device identifier - * @mbox: mailbox channel handle - * @client: mailbox client to request the mailbox channel - */ -struct k3_dsp_rproc { - struct device *dev; - struct rproc *rproc; - struct k3_dsp_mem *mem; - int num_mems; - struct k3_dsp_mem *rmem; - int num_rmems; - struct reset_control *reset; - const struct k3_dsp_dev_data *data; - struct ti_sci_proc *tsp; - const struct ti_sci_handle *ti_sci; - u32 ti_sci_id; - struct mbox_chan *mbox; - struct mbox_client client; -}; - -/** - * k3_dsp_rproc_mbox_callback() - inbound mailbox message handler - * @client: mailbox client pointer used for requesting the mailbox channel - * @data: mailbox payload - * - * This handler is invoked by the OMAP mailbox driver whenever a mailbox - * message is received. Usually, the mailbox payload simply contains - * the index of the virtqueue that is kicked by the remote processor, - * and we let remoteproc core handle it. - * - * In addition to virtqueue indices, we also have some out-of-band values - * that indicate different events. Those values are deliberately very - * large so they don't coincide with virtqueue indices. - */ -static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data) -{ - struct k3_dsp_rproc *kproc = container_of(client, struct k3_dsp_rproc, - client); - struct device *dev = kproc->rproc->dev.parent; - const char *name = kproc->rproc->name; - u32 msg = omap_mbox_message(data); - - /* Do not forward messages from a detached core */ - if (kproc->rproc->state == RPROC_DETACHED) - return; - - dev_dbg(dev, "mbox msg: 0x%x\n", msg); - - switch (msg) { - case RP_MBOX_CRASH: - /* - * remoteproc detected an exception, but error recovery is not - * supported. So, just log this for now - */ - dev_err(dev, "K3 DSP rproc %s crashed\n", name); - break; - case RP_MBOX_ECHO_REPLY: - dev_info(dev, "received echo reply from %s\n", name); - break; - default: - /* silently handle all other valid messages */ - if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG) - return; - if (msg > kproc->rproc->max_notifyid) { - dev_dbg(dev, "dropping unknown message 0x%x", msg); - return; - } - /* msg contains the index of the triggered vring */ - if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE) - dev_dbg(dev, "no message was found in vqid %d\n", msg); - } -} - -/* - * Kick the remote processor to notify about pending unprocessed messages. - * The vqid usage is not used and is inconsequential, as the kick is performed - * through a simulated GPIO (a bit in an IPC interrupt-triggering register), - * the remote processor is expected to process both its Tx and Rx virtqueues. - */ -static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid) -{ - struct k3_dsp_rproc *kproc = rproc->priv; - struct device *dev = rproc->dev.parent; - mbox_msg_t msg = (mbox_msg_t)vqid; - int ret; - - /* Do not forward messages to a detached core */ - if (kproc->rproc->state == RPROC_DETACHED) - return; - - /* send the index of the triggered virtqueue in the mailbox payload */ - ret = mbox_send_message(kproc->mbox, (void *)msg); - if (ret < 0) - dev_err(dev, "failed to send mailbox message (%pe)\n", - ERR_PTR(ret)); -} - -/* Put the DSP processor into reset */ -static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc) -{ - struct device *dev = kproc->dev; - int ret; - - ret = reset_control_assert(kproc->reset); - if (ret) { - dev_err(dev, "local-reset assert failed (%pe)\n", ERR_PTR(ret)); - return ret; - } - - if (kproc->data->uses_lreset) - return ret; - - ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, - kproc->ti_sci_id); - if (ret) { - dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret)); - if (reset_control_deassert(kproc->reset)) - dev_warn(dev, "local-reset deassert back failed\n"); - } - - return ret; -} - -/* Release the DSP processor from reset */ -static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc) -{ - struct device *dev = kproc->dev; - int ret; - - if (kproc->data->uses_lreset) - goto lreset; - - ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci, - kproc->ti_sci_id); - if (ret) { - dev_err(dev, "module-reset deassert failed (%pe)\n", ERR_PTR(ret)); - return ret; - } - -lreset: - ret = reset_control_deassert(kproc->reset); - if (ret) { - dev_err(dev, "local-reset deassert failed, (%pe)\n", ERR_PTR(ret)); - if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, - kproc->ti_sci_id)) - dev_warn(dev, "module-reset assert back failed\n"); - } - - return ret; -} - -static int k3_dsp_rproc_request_mbox(struct rproc *rproc) -{ - struct k3_dsp_rproc *kproc = rproc->priv; - struct mbox_client *client = &kproc->client; - struct device *dev = kproc->dev; - int ret; - - client->dev = dev; - client->tx_done = NULL; - client->rx_callback = k3_dsp_rproc_mbox_callback; - client->tx_block = false; - client->knows_txdone = false; - - kproc->mbox = mbox_request_channel(client, 0); - if (IS_ERR(kproc->mbox)) - return dev_err_probe(dev, PTR_ERR(kproc->mbox), - "mbox_request_channel failed\n"); - - /* - * Ping the remote processor, this is only for sanity-sake for now; - * there is no functional effect whatsoever. - * - * Note that the reply will _not_ arrive immediately: this message - * will wait in the mailbox fifo until the remote processor is booted. - */ - ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST); - if (ret < 0) { - dev_err(dev, "mbox_send_message failed (%pe)\n", ERR_PTR(ret)); - mbox_free_channel(kproc->mbox); - return ret; - } - - return 0; -} -/* - * The C66x DSP cores have a local reset that affects only the CPU, and a - * generic module reset that powers on the device and allows the DSP internal - * memories to be accessed while the local reset is asserted. This function is - * used to release the global reset on C66x DSPs to allow loading into the DSP - * internal RAMs. The .prepare() ops is invoked by remoteproc core before any - * firmware loading, and is followed by the .start() ops after loading to - * actually let the C66x DSP cores run. This callback is invoked only in - * remoteproc mode. - */ -static int k3_dsp_rproc_prepare(struct rproc *rproc) -{ - struct k3_dsp_rproc *kproc = rproc->priv; - struct device *dev = kproc->dev; - int ret; - - ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci, - kproc->ti_sci_id); - if (ret) - dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading (%pe)\n", - ERR_PTR(ret)); - - return ret; -} - -/* - * This function implements the .unprepare() ops and performs the complimentary - * operations to that of the .prepare() ops. The function is used to assert the - * global reset on applicable C66x cores. This completes the second portion of - * powering down the C66x DSP cores. The cores themselves are only halted in the - * .stop() callback through the local reset, and the .unprepare() ops is invoked - * by the remoteproc core after the remoteproc is stopped to balance the global - * reset. This callback is invoked only in remoteproc mode. - */ -static int k3_dsp_rproc_unprepare(struct rproc *rproc) -{ - struct k3_dsp_rproc *kproc = rproc->priv; - struct device *dev = kproc->dev; - int ret; - - ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, - kproc->ti_sci_id); - if (ret) - dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret)); - - return ret; -} +#include "ti_k3_common.h" /* * Power up the DSP remote processor. @@ -315,7 +31,7 @@ static int k3_dsp_rproc_unprepare(struct rproc *rproc) */ static int k3_dsp_rproc_start(struct rproc *rproc) { - struct k3_dsp_rproc *kproc = rproc->priv; + struct k3_rproc *kproc = rproc->priv; struct device *dev = kproc->dev; u32 boot_addr; int ret; @@ -332,288 +48,30 @@ static int k3_dsp_rproc_start(struct rproc *rproc) if (ret) return ret; - ret = k3_dsp_rproc_release(kproc); + /* Call the K3 common start function after doing DSP specific stuff */ + ret = k3_rproc_start(rproc); if (ret) return ret; return 0; } -/* - * Stop the DSP remote processor. - * - * This function puts the DSP processor into reset, and finishes processing - * of any pending messages. This callback is invoked only in remoteproc mode. - */ -static int k3_dsp_rproc_stop(struct rproc *rproc) -{ - struct k3_dsp_rproc *kproc = rproc->priv; - - k3_dsp_rproc_reset(kproc); - - return 0; -} - -/* - * Attach to a running DSP remote processor (IPC-only mode) - * - * This rproc attach callback is a NOP. The remote processor is already booted, - * and all required resources have been acquired during probe routine, so there - * is no need to issue any TI-SCI commands to boot the DSP core. This callback - * is invoked only in IPC-only mode and exists because rproc_validate() checks - * for its existence. - */ -static int k3_dsp_rproc_attach(struct rproc *rproc) { return 0; } - -/* - * Detach from a running DSP remote processor (IPC-only mode) - * - * This rproc detach callback is a NOP. The DSP core is not stopped and will be - * left to continue to run its booted firmware. This callback is invoked only in - * IPC-only mode and exists for sanity sake. - */ -static int k3_dsp_rproc_detach(struct rproc *rproc) { return 0; } - -/* - * This function implements the .get_loaded_rsc_table() callback and is used - * to provide the resource table for a booted DSP in IPC-only mode. The K3 DSP - * firmwares follow a design-by-contract approach and are expected to have the - * resource table at the base of the DDR region reserved for firmware usage. - * This provides flexibility for the remote processor to be booted by different - * bootloaders that may or may not have the ability to publish the resource table - * address and size through a DT property. This callback is invoked only in - * IPC-only mode. - */ -static struct resource_table *k3_dsp_get_loaded_rsc_table(struct rproc *rproc, - size_t *rsc_table_sz) -{ - struct k3_dsp_rproc *kproc = rproc->priv; - struct device *dev = kproc->dev; - - if (!kproc->rmem[0].cpu_addr) { - dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found"); - return ERR_PTR(-ENOMEM); - } - - /* - * NOTE: The resource table size is currently hard-coded to a maximum - * of 256 bytes. The most common resource table usage for K3 firmwares - * is to only have the vdev resource entry and an optional trace entry. - * The exact size could be computed based on resource table address, but - * the hard-coded value suffices to support the IPC-only mode. - */ - *rsc_table_sz = 256; - return (__force struct resource_table *)kproc->rmem[0].cpu_addr; -} - -/* - * Custom function to translate a DSP device address (internal RAMs only) to a - * kernel virtual address. The DSPs can access their RAMs at either an internal - * address visible only from a DSP, or at the SoC-level bus address. Both these - * addresses need to be looked through for translation. The translated addresses - * can be used either by the remoteproc core for loading (when using kernel - * remoteproc loader), or by any rpmsg bus drivers. - */ -static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) -{ - struct k3_dsp_rproc *kproc = rproc->priv; - void __iomem *va = NULL; - phys_addr_t bus_addr; - u32 dev_addr, offset; - size_t size; - int i; - - if (len == 0) - return NULL; - - for (i = 0; i < kproc->num_mems; i++) { - bus_addr = kproc->mem[i].bus_addr; - dev_addr = kproc->mem[i].dev_addr; - size = kproc->mem[i].size; - - if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) { - /* handle DSP-view addresses */ - if (da >= dev_addr && - ((da + len) <= (dev_addr + size))) { - offset = da - dev_addr; - va = kproc->mem[i].cpu_addr + offset; - return (__force void *)va; - } - } else { - /* handle SoC-view addresses */ - if (da >= bus_addr && - (da + len) <= (bus_addr + size)) { - offset = da - bus_addr; - va = kproc->mem[i].cpu_addr + offset; - return (__force void *)va; - } - } - } - - /* handle static DDR reserved memory regions */ - for (i = 0; i < kproc->num_rmems; i++) { - dev_addr = kproc->rmem[i].dev_addr; - size = kproc->rmem[i].size; - - if (da >= dev_addr && ((da + len) <= (dev_addr + size))) { - offset = da - dev_addr; - va = kproc->rmem[i].cpu_addr + offset; - return (__force void *)va; - } - } - - return NULL; -} - static const struct rproc_ops k3_dsp_rproc_ops = { - .start = k3_dsp_rproc_start, - .stop = k3_dsp_rproc_stop, - .kick = k3_dsp_rproc_kick, - .da_to_va = k3_dsp_rproc_da_to_va, + .start = k3_dsp_rproc_start, + .stop = k3_rproc_stop, + .attach = k3_rproc_attach, + .detach = k3_rproc_detach, + .kick = k3_rproc_kick, + .da_to_va = k3_rproc_da_to_va, + .get_loaded_rsc_table = k3_get_loaded_rsc_table, }; -static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev, - struct k3_dsp_rproc *kproc) -{ - const struct k3_dsp_dev_data *data = kproc->data; - struct device *dev = &pdev->dev; - struct resource *res; - int num_mems = 0; - int i; - - num_mems = kproc->data->num_mems; - kproc->mem = devm_kcalloc(kproc->dev, num_mems, - sizeof(*kproc->mem), GFP_KERNEL); - if (!kproc->mem) - return -ENOMEM; - - for (i = 0; i < num_mems; i++) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - data->mems[i].name); - if (!res) { - dev_err(dev, "found no memory resource for %s\n", - data->mems[i].name); - return -EINVAL; - } - if (!devm_request_mem_region(dev, res->start, - resource_size(res), - dev_name(dev))) { - dev_err(dev, "could not request %s region for resource\n", - data->mems[i].name); - return -EBUSY; - } - - kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start, - resource_size(res)); - if (!kproc->mem[i].cpu_addr) { - dev_err(dev, "failed to map %s memory\n", - data->mems[i].name); - return -ENOMEM; - } - kproc->mem[i].bus_addr = res->start; - kproc->mem[i].dev_addr = data->mems[i].dev_addr; - kproc->mem[i].size = resource_size(res); - - dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n", - data->mems[i].name, &kproc->mem[i].bus_addr, - kproc->mem[i].size, kproc->mem[i].cpu_addr, - kproc->mem[i].dev_addr); - } - kproc->num_mems = num_mems; - - return 0; -} - -static void k3_dsp_mem_release(void *data) -{ - struct device *dev = data; - - of_reserved_mem_device_release(dev); -} - -static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc) -{ - struct device *dev = kproc->dev; - struct device_node *np = dev->of_node; - struct device_node *rmem_np; - struct reserved_mem *rmem; - int num_rmems; - int ret, i; - - num_rmems = of_property_count_elems_of_size(np, "memory-region", - sizeof(phandle)); - if (num_rmems < 0) { - dev_err(dev, "device does not reserved memory regions (%pe)\n", - ERR_PTR(num_rmems)); - return -EINVAL; - } - if (num_rmems < 2) { - dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n", - num_rmems); - return -EINVAL; - } - - /* use reserved memory region 0 for vring DMA allocations */ - ret = of_reserved_mem_device_init_by_idx(dev, np, 0); - if (ret) { - dev_err(dev, "device cannot initialize DMA pool (%pe)\n", - ERR_PTR(ret)); - return ret; - } - ret = devm_add_action_or_reset(dev, k3_dsp_mem_release, dev); - if (ret) - return ret; - - num_rmems--; - kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL); - if (!kproc->rmem) - return -ENOMEM; - - /* use remaining reserved memory regions for static carveouts */ - for (i = 0; i < num_rmems; i++) { - rmem_np = of_parse_phandle(np, "memory-region", i + 1); - if (!rmem_np) - return -EINVAL; - - rmem = of_reserved_mem_lookup(rmem_np); - of_node_put(rmem_np); - if (!rmem) - return -EINVAL; - - kproc->rmem[i].bus_addr = rmem->base; - /* 64-bit address regions currently not supported */ - kproc->rmem[i].dev_addr = (u32)rmem->base; - kproc->rmem[i].size = rmem->size; - kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size); - if (!kproc->rmem[i].cpu_addr) { - dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n", - i + 1, &rmem->base, &rmem->size); - return -ENOMEM; - } - - dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n", - i + 1, &kproc->rmem[i].bus_addr, - kproc->rmem[i].size, kproc->rmem[i].cpu_addr, - kproc->rmem[i].dev_addr); - } - kproc->num_rmems = num_rmems; - - return 0; -} - -static void k3_dsp_release_tsp(void *data) -{ - struct ti_sci_proc *tsp = data; - - ti_sci_proc_release(tsp); -} - static int k3_dsp_rproc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - const struct k3_dsp_dev_data *data; - struct k3_dsp_rproc *kproc; + const struct k3_rproc_dev_data *data; + struct k3_rproc *kproc; struct rproc *rproc; const char *fw_name; bool p_state = false; @@ -635,15 +93,15 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev) rproc->has_iommu = false; rproc->recovery_disabled = true; if (data->uses_lreset) { - rproc->ops->prepare = k3_dsp_rproc_prepare; - rproc->ops->unprepare = k3_dsp_rproc_unprepare; + rproc->ops->prepare = k3_rproc_prepare; + rproc->ops->unprepare = k3_rproc_unprepare; } kproc = rproc->priv; kproc->rproc = rproc; kproc->dev = dev; kproc->data = data; - ret = k3_dsp_rproc_request_mbox(rproc); + ret = k3_rproc_request_mbox(rproc); if (ret) return ret; @@ -671,15 +129,15 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev) dev_err_probe(dev, ret, "ti_sci_proc_request failed\n"); return ret; } - ret = devm_add_action_or_reset(dev, k3_dsp_release_tsp, kproc->tsp); + ret = devm_add_action_or_reset(dev, k3_release_tsp, kproc->tsp); if (ret) return ret; - ret = k3_dsp_rproc_of_get_memories(pdev, kproc); + ret = k3_rproc_of_get_memories(pdev, kproc); if (ret) return ret; - ret = k3_dsp_reserved_mem_init(kproc); + ret = k3_reserved_mem_init(kproc); if (ret) return dev_err_probe(dev, ret, "reserved memory init failed\n"); @@ -692,30 +150,8 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev) if (p_state) { dev_info(dev, "configured DSP for IPC-only mode\n"); rproc->state = RPROC_DETACHED; - /* override rproc ops with only required IPC-only mode ops */ - rproc->ops->prepare = NULL; - rproc->ops->unprepare = NULL; - rproc->ops->start = NULL; - rproc->ops->stop = NULL; - rproc->ops->attach = k3_dsp_rproc_attach; - rproc->ops->detach = k3_dsp_rproc_detach; - rproc->ops->get_loaded_rsc_table = k3_dsp_get_loaded_rsc_table; } else { dev_info(dev, "configured DSP for remoteproc mode\n"); - /* - * ensure the DSP local reset is asserted to ensure the DSP - * doesn't execute bogus code in .prepare() when the module - * reset is released. - */ - if (data->uses_lreset) { - ret = reset_control_status(kproc->reset); - if (ret < 0) { - return dev_err_probe(dev, ret, "failed to get reset status\n"); - } else if (ret == 0) { - dev_warn(dev, "local reset is deasserted for device\n"); - k3_dsp_rproc_reset(kproc); - } - } } ret = devm_rproc_add(dev, rproc); @@ -729,7 +165,7 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev) static void k3_dsp_rproc_remove(struct platform_device *pdev) { - struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev); + struct k3_rproc *kproc = platform_get_drvdata(pdev); struct rproc *rproc = kproc->rproc; struct device *dev = &pdev->dev; int ret; @@ -743,37 +179,37 @@ static void k3_dsp_rproc_remove(struct platform_device *pdev) mbox_free_channel(kproc->mbox); } -static const struct k3_dsp_mem_data c66_mems[] = { +static const struct k3_rproc_mem_data c66_mems[] = { { .name = "l2sram", .dev_addr = 0x800000 }, { .name = "l1pram", .dev_addr = 0xe00000 }, { .name = "l1dram", .dev_addr = 0xf00000 }, }; /* C71x cores only have a L1P Cache, there are no L1P SRAMs */ -static const struct k3_dsp_mem_data c71_mems[] = { +static const struct k3_rproc_mem_data c71_mems[] = { { .name = "l2sram", .dev_addr = 0x800000 }, { .name = "l1dram", .dev_addr = 0xe00000 }, }; -static const struct k3_dsp_mem_data c7xv_mems[] = { +static const struct k3_rproc_mem_data c7xv_mems[] = { { .name = "l2sram", .dev_addr = 0x800000 }, }; -static const struct k3_dsp_dev_data c66_data = { +static const struct k3_rproc_dev_data c66_data = { .mems = c66_mems, .num_mems = ARRAY_SIZE(c66_mems), .boot_align_addr = SZ_1K, .uses_lreset = true, }; -static const struct k3_dsp_dev_data c71_data = { +static const struct k3_rproc_dev_data c71_data = { .mems = c71_mems, .num_mems = ARRAY_SIZE(c71_mems), .boot_align_addr = SZ_2M, .uses_lreset = false, }; -static const struct k3_dsp_dev_data c7xv_data = { +static const struct k3_rproc_dev_data c7xv_data = { .mems = c7xv_mems, .num_mems = ARRAY_SIZE(c7xv_mems), .boot_align_addr = SZ_2M, diff --git a/drivers/remoteproc/ti_k3_m4_remoteproc.c b/drivers/remoteproc/ti_k3_m4_remoteproc.c index a16fb165fced..3a11fd24eb52 100644 --- a/drivers/remoteproc/ti_k3_m4_remoteproc.c +++ b/drivers/remoteproc/ti_k3_m4_remoteproc.c @@ -19,552 +19,35 @@ #include "omap_remoteproc.h" #include "remoteproc_internal.h" #include "ti_sci_proc.h" - -#define K3_M4_IRAM_DEV_ADDR 0x00000 -#define K3_M4_DRAM_DEV_ADDR 0x30000 - -/** - * struct k3_m4_rproc_mem - internal memory structure - * @cpu_addr: MPU virtual address of the memory region - * @bus_addr: Bus address used to access the memory region - * @dev_addr: Device address of the memory region from remote processor view - * @size: Size of the memory region - */ -struct k3_m4_rproc_mem { - void __iomem *cpu_addr; - phys_addr_t bus_addr; - u32 dev_addr; - size_t size; -}; - -/** - * struct k3_m4_rproc_mem_data - memory definitions for a remote processor - * @name: name for this memory entry - * @dev_addr: device address for the memory entry - */ -struct k3_m4_rproc_mem_data { - const char *name; - const u32 dev_addr; -}; - -/** - * struct k3_m4_rproc - k3 remote processor driver structure - * @dev: cached device pointer - * @mem: internal memory regions data - * @num_mems: number of internal memory regions - * @rmem: reserved memory regions data - * @num_rmems: number of reserved memory regions - * @reset: reset control handle - * @tsp: TI-SCI processor control handle - * @ti_sci: TI-SCI handle - * @ti_sci_id: TI-SCI device identifier - * @mbox: mailbox channel handle - * @client: mailbox client to request the mailbox channel - */ -struct k3_m4_rproc { - struct device *dev; - struct k3_m4_rproc_mem *mem; - int num_mems; - struct k3_m4_rproc_mem *rmem; - int num_rmems; - struct reset_control *reset; - struct ti_sci_proc *tsp; - const struct ti_sci_handle *ti_sci; - u32 ti_sci_id; - struct mbox_chan *mbox; - struct mbox_client client; -}; - -/** - * k3_m4_rproc_mbox_callback() - inbound mailbox message handler - * @client: mailbox client pointer used for requesting the mailbox channel - * @data: mailbox payload - * - * This handler is invoked by the K3 mailbox driver whenever a mailbox - * message is received. Usually, the mailbox payload simply contains - * the index of the virtqueue that is kicked by the remote processor, - * and we let remoteproc core handle it. - * - * In addition to virtqueue indices, we also have some out-of-band values - * that indicate different events. Those values are deliberately very - * large so they don't coincide with virtqueue indices. - */ -static void k3_m4_rproc_mbox_callback(struct mbox_client *client, void *data) -{ - struct device *dev = client->dev; - struct rproc *rproc = dev_get_drvdata(dev); - u32 msg = (u32)(uintptr_t)(data); - - dev_dbg(dev, "mbox msg: 0x%x\n", msg); - - switch (msg) { - case RP_MBOX_CRASH: - /* - * remoteproc detected an exception, but error recovery is not - * supported. So, just log this for now - */ - dev_err(dev, "K3 rproc %s crashed\n", rproc->name); - break; - case RP_MBOX_ECHO_REPLY: - dev_info(dev, "received echo reply from %s\n", rproc->name); - break; - default: - /* silently handle all other valid messages */ - if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG) - return; - if (msg > rproc->max_notifyid) { - dev_dbg(dev, "dropping unknown message 0x%x", msg); - return; - } - /* msg contains the index of the triggered vring */ - if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE) - dev_dbg(dev, "no message was found in vqid %d\n", msg); - } -} - -/* - * Kick the remote processor to notify about pending unprocessed messages. - * The vqid usage is not used and is inconsequential, as the kick is performed - * through a simulated GPIO (a bit in an IPC interrupt-triggering register), - * the remote processor is expected to process both its Tx and Rx virtqueues. - */ -static void k3_m4_rproc_kick(struct rproc *rproc, int vqid) -{ - struct k3_m4_rproc *kproc = rproc->priv; - struct device *dev = kproc->dev; - u32 msg = (u32)vqid; - int ret; - - /* - * Send the index of the triggered virtqueue in the mailbox payload. - * NOTE: msg is cast to uintptr_t to prevent compiler warnings when - * void* is 64bit. It is safely cast back to u32 in the mailbox driver. - */ - ret = mbox_send_message(kproc->mbox, (void *)(uintptr_t)msg); - if (ret < 0) - dev_err(dev, "failed to send mailbox message, status = %d\n", - ret); -} - -static int k3_m4_rproc_ping_mbox(struct k3_m4_rproc *kproc) -{ - struct device *dev = kproc->dev; - int ret; - - /* - * Ping the remote processor, this is only for sanity-sake for now; - * there is no functional effect whatsoever. - * - * Note that the reply will _not_ arrive immediately: this message - * will wait in the mailbox fifo until the remote processor is booted. - */ - ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST); - if (ret < 0) { - dev_err(dev, "mbox_send_message failed: %d\n", ret); - return ret; - } - - return 0; -} - -/* - * The M4 cores have a local reset that affects only the CPU, and a - * generic module reset that powers on the device and allows the internal - * memories to be accessed while the local reset is asserted. This function is - * used to release the global reset on remote cores to allow loading into the - * internal RAMs. The .prepare() ops is invoked by remoteproc core before any - * firmware loading, and is followed by the .start() ops after loading to - * actually let the remote cores to run. - */ -static int k3_m4_rproc_prepare(struct rproc *rproc) -{ - struct k3_m4_rproc *kproc = rproc->priv; - struct device *dev = kproc->dev; - int ret; - - /* If the core is running already no need to deassert the module reset */ - if (rproc->state == RPROC_DETACHED) - return 0; - - /* - * Ensure the local reset is asserted so the core doesn't - * execute bogus code when the module reset is released. - */ - ret = reset_control_assert(kproc->reset); - if (ret) { - dev_err(dev, "could not assert local reset\n"); - return ret; - } - - ret = reset_control_status(kproc->reset); - if (ret <= 0) { - dev_err(dev, "local reset still not asserted\n"); - return ret; - } - - ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci, - kproc->ti_sci_id); - if (ret) { - dev_err(dev, "could not deassert module-reset for internal RAM loading\n"); - return ret; - } - - return 0; -} - -/* - * This function implements the .unprepare() ops and performs the complimentary - * operations to that of the .prepare() ops. The function is used to assert the - * global reset on applicable cores. This completes the second portion of - * powering down the remote core. The cores themselves are only halted in the - * .stop() callback through the local reset, and the .unprepare() ops is invoked - * by the remoteproc core after the remoteproc is stopped to balance the global - * reset. - */ -static int k3_m4_rproc_unprepare(struct rproc *rproc) -{ - struct k3_m4_rproc *kproc = rproc->priv; - struct device *dev = kproc->dev; - int ret; - - /* If the core is going to be detached do not assert the module reset */ - if (rproc->state == RPROC_ATTACHED) - return 0; - - ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, - kproc->ti_sci_id); - if (ret) { - dev_err(dev, "module-reset assert failed\n"); - return ret; - } - - return 0; -} - -/* - * This function implements the .get_loaded_rsc_table() callback and is used - * to provide the resource table for a booted remote processor in IPC-only - * mode. The remote processor firmwares follow a design-by-contract approach - * and are expected to have the resource table at the base of the DDR region - * reserved for firmware usage. This provides flexibility for the remote - * processor to be booted by different bootloaders that may or may not have the - * ability to publish the resource table address and size through a DT - * property. - */ -static struct resource_table *k3_m4_get_loaded_rsc_table(struct rproc *rproc, - size_t *rsc_table_sz) -{ - struct k3_m4_rproc *kproc = rproc->priv; - struct device *dev = kproc->dev; - - if (!kproc->rmem[0].cpu_addr) { - dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found"); - return ERR_PTR(-ENOMEM); - } - - /* - * NOTE: The resource table size is currently hard-coded to a maximum - * of 256 bytes. The most common resource table usage for K3 firmwares - * is to only have the vdev resource entry and an optional trace entry. - * The exact size could be computed based on resource table address, but - * the hard-coded value suffices to support the IPC-only mode. - */ - *rsc_table_sz = 256; - return (__force struct resource_table *)kproc->rmem[0].cpu_addr; -} - -/* - * Custom function to translate a remote processor device address (internal - * RAMs only) to a kernel virtual address. The remote processors can access - * their RAMs at either an internal address visible only from a remote - * processor, or at the SoC-level bus address. Both these addresses need to be - * looked through for translation. The translated addresses can be used either - * by the remoteproc core for loading (when using kernel remoteproc loader), or - * by any rpmsg bus drivers. - */ -static void *k3_m4_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) -{ - struct k3_m4_rproc *kproc = rproc->priv; - void __iomem *va = NULL; - phys_addr_t bus_addr; - u32 dev_addr, offset; - size_t size; - int i; - - if (len == 0) - return NULL; - - for (i = 0; i < kproc->num_mems; i++) { - bus_addr = kproc->mem[i].bus_addr; - dev_addr = kproc->mem[i].dev_addr; - size = kproc->mem[i].size; - - /* handle M4-view addresses */ - if (da >= dev_addr && ((da + len) <= (dev_addr + size))) { - offset = da - dev_addr; - va = kproc->mem[i].cpu_addr + offset; - return (__force void *)va; - } - - /* handle SoC-view addresses */ - if (da >= bus_addr && ((da + len) <= (bus_addr + size))) { - offset = da - bus_addr; - va = kproc->mem[i].cpu_addr + offset; - return (__force void *)va; - } - } - - /* handle static DDR reserved memory regions */ - for (i = 0; i < kproc->num_rmems; i++) { - dev_addr = kproc->rmem[i].dev_addr; - size = kproc->rmem[i].size; - - if (da >= dev_addr && ((da + len) <= (dev_addr + size))) { - offset = da - dev_addr; - va = kproc->rmem[i].cpu_addr + offset; - return (__force void *)va; - } - } - - return NULL; -} - -static int k3_m4_rproc_of_get_memories(struct platform_device *pdev, - struct k3_m4_rproc *kproc) -{ - static const char * const mem_names[] = { "iram", "dram" }; - static const u32 mem_addrs[] = { K3_M4_IRAM_DEV_ADDR, K3_M4_DRAM_DEV_ADDR }; - struct device *dev = &pdev->dev; - struct resource *res; - int num_mems; - int i; - - num_mems = ARRAY_SIZE(mem_names); - kproc->mem = devm_kcalloc(kproc->dev, num_mems, - sizeof(*kproc->mem), GFP_KERNEL); - if (!kproc->mem) - return -ENOMEM; - - for (i = 0; i < num_mems; i++) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - mem_names[i]); - if (!res) { - dev_err(dev, "found no memory resource for %s\n", - mem_names[i]); - return -EINVAL; - } - if (!devm_request_mem_region(dev, res->start, - resource_size(res), - dev_name(dev))) { - dev_err(dev, "could not request %s region for resource\n", - mem_names[i]); - return -EBUSY; - } - - kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start, - resource_size(res)); - if (!kproc->mem[i].cpu_addr) { - dev_err(dev, "failed to map %s memory\n", - mem_names[i]); - return -ENOMEM; - } - kproc->mem[i].bus_addr = res->start; - kproc->mem[i].dev_addr = mem_addrs[i]; - kproc->mem[i].size = resource_size(res); - - dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n", - mem_names[i], &kproc->mem[i].bus_addr, - kproc->mem[i].size, kproc->mem[i].cpu_addr, - kproc->mem[i].dev_addr); - } - kproc->num_mems = num_mems; - - return 0; -} - -static void k3_m4_rproc_dev_mem_release(void *data) -{ - struct device *dev = data; - - of_reserved_mem_device_release(dev); -} - -static int k3_m4_reserved_mem_init(struct k3_m4_rproc *kproc) -{ - struct device *dev = kproc->dev; - struct device_node *np = dev->of_node; - struct device_node *rmem_np; - struct reserved_mem *rmem; - int num_rmems; - int ret, i; - - num_rmems = of_property_count_elems_of_size(np, "memory-region", - sizeof(phandle)); - if (num_rmems < 0) { - dev_err(dev, "device does not reserved memory regions (%d)\n", - num_rmems); - return -EINVAL; - } - if (num_rmems < 2) { - dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n", - num_rmems); - return -EINVAL; - } - - /* use reserved memory region 0 for vring DMA allocations */ - ret = of_reserved_mem_device_init_by_idx(dev, np, 0); - if (ret) { - dev_err(dev, "device cannot initialize DMA pool (%d)\n", ret); - return ret; - } - ret = devm_add_action_or_reset(dev, k3_m4_rproc_dev_mem_release, dev); - if (ret) - return ret; - - num_rmems--; - kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL); - if (!kproc->rmem) - return -ENOMEM; - - /* use remaining reserved memory regions for static carveouts */ - for (i = 0; i < num_rmems; i++) { - rmem_np = of_parse_phandle(np, "memory-region", i + 1); - if (!rmem_np) - return -EINVAL; - - rmem = of_reserved_mem_lookup(rmem_np); - of_node_put(rmem_np); - if (!rmem) - return -EINVAL; - - kproc->rmem[i].bus_addr = rmem->base; - /* 64-bit address regions currently not supported */ - kproc->rmem[i].dev_addr = (u32)rmem->base; - kproc->rmem[i].size = rmem->size; - kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size); - if (!kproc->rmem[i].cpu_addr) { - dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n", - i + 1, &rmem->base, &rmem->size); - return -ENOMEM; - } - - dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n", - i + 1, &kproc->rmem[i].bus_addr, - kproc->rmem[i].size, kproc->rmem[i].cpu_addr, - kproc->rmem[i].dev_addr); - } - kproc->num_rmems = num_rmems; - - return 0; -} - -static void k3_m4_release_tsp(void *data) -{ - struct ti_sci_proc *tsp = data; - - ti_sci_proc_release(tsp); -} - -/* - * Power up the M4 remote processor. - * - * This function will be invoked only after the firmware for this rproc - * was loaded, parsed successfully, and all of its resource requirements - * were met. This callback is invoked only in remoteproc mode. - */ -static int k3_m4_rproc_start(struct rproc *rproc) -{ - struct k3_m4_rproc *kproc = rproc->priv; - struct device *dev = kproc->dev; - int ret; - - ret = k3_m4_rproc_ping_mbox(kproc); - if (ret) - return ret; - - ret = reset_control_deassert(kproc->reset); - if (ret) { - dev_err(dev, "local-reset deassert failed, ret = %d\n", ret); - return ret; - } - - return 0; -} - -/* - * Stop the M4 remote processor. - * - * This function puts the M4 processor into reset, and finishes processing - * of any pending messages. This callback is invoked only in remoteproc mode. - */ -static int k3_m4_rproc_stop(struct rproc *rproc) -{ - struct k3_m4_rproc *kproc = rproc->priv; - struct device *dev = kproc->dev; - int ret; - - ret = reset_control_assert(kproc->reset); - if (ret) { - dev_err(dev, "local-reset assert failed, ret = %d\n", ret); - return ret; - } - - return 0; -} - -/* - * Attach to a running M4 remote processor (IPC-only mode) - * - * The remote processor is already booted, so there is no need to issue any - * TI-SCI commands to boot the M4 core. This callback is used only in IPC-only - * mode. - */ -static int k3_m4_rproc_attach(struct rproc *rproc) -{ - struct k3_m4_rproc *kproc = rproc->priv; - int ret; - - ret = k3_m4_rproc_ping_mbox(kproc); - if (ret) - return ret; - - return 0; -} - -/* - * Detach from a running M4 remote processor (IPC-only mode) - * - * This rproc detach callback performs the opposite operation to attach - * callback, the M4 core is not stopped and will be left to continue to - * run its booted firmware. This callback is invoked only in IPC-only mode. - */ -static int k3_m4_rproc_detach(struct rproc *rproc) -{ - return 0; -} +#include "ti_k3_common.h" static const struct rproc_ops k3_m4_rproc_ops = { - .prepare = k3_m4_rproc_prepare, - .unprepare = k3_m4_rproc_unprepare, - .start = k3_m4_rproc_start, - .stop = k3_m4_rproc_stop, - .attach = k3_m4_rproc_attach, - .detach = k3_m4_rproc_detach, - .kick = k3_m4_rproc_kick, - .da_to_va = k3_m4_rproc_da_to_va, - .get_loaded_rsc_table = k3_m4_get_loaded_rsc_table, + .prepare = k3_rproc_prepare, + .unprepare = k3_rproc_unprepare, + .start = k3_rproc_start, + .stop = k3_rproc_stop, + .attach = k3_rproc_attach, + .detach = k3_rproc_detach, + .kick = k3_rproc_kick, + .da_to_va = k3_rproc_da_to_va, + .get_loaded_rsc_table = k3_get_loaded_rsc_table, }; static int k3_m4_rproc_probe(struct platform_device *pdev) { + const struct k3_rproc_dev_data *data; struct device *dev = &pdev->dev; - struct k3_m4_rproc *kproc; + struct k3_rproc *kproc; struct rproc *rproc; const char *fw_name; bool r_state = false; bool p_state = false; int ret; + data = of_device_get_match_data(dev); + if (!data) + return -ENODEV; + ret = rproc_of_parse_firmware(dev, 0, &fw_name); if (ret) return dev_err_probe(dev, ret, "failed to parse firmware-name property\n"); @@ -578,6 +61,8 @@ static int k3_m4_rproc_probe(struct platform_device *pdev) rproc->recovery_disabled = true; kproc = rproc->priv; kproc->dev = dev; + kproc->rproc = rproc; + kproc->data = data; platform_set_drvdata(pdev, rproc); kproc->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci"); @@ -601,15 +86,15 @@ static int k3_m4_rproc_probe(struct platform_device *pdev) ret = ti_sci_proc_request(kproc->tsp); if (ret < 0) return dev_err_probe(dev, ret, "ti_sci_proc_request failed\n"); - ret = devm_add_action_or_reset(dev, k3_m4_release_tsp, kproc->tsp); + ret = devm_add_action_or_reset(dev, k3_release_tsp, kproc->tsp); if (ret) return ret; - ret = k3_m4_rproc_of_get_memories(pdev, kproc); + ret = k3_rproc_of_get_memories(pdev, kproc); if (ret) return ret; - ret = k3_m4_reserved_mem_init(kproc); + ret = k3_reserved_mem_init(kproc); if (ret) return dev_err_probe(dev, ret, "reserved memory init failed\n"); @@ -627,15 +112,9 @@ static int k3_m4_rproc_probe(struct platform_device *pdev) dev_info(dev, "configured M4F for remoteproc mode\n"); } - kproc->client.dev = dev; - kproc->client.tx_done = NULL; - kproc->client.rx_callback = k3_m4_rproc_mbox_callback; - kproc->client.tx_block = false; - kproc->client.knows_txdone = false; - kproc->mbox = mbox_request_channel(&kproc->client, 0); - if (IS_ERR(kproc->mbox)) - return dev_err_probe(dev, PTR_ERR(kproc->mbox), - "mbox_request_channel failed\n"); + ret = k3_rproc_request_mbox(rproc); + if (ret) + return ret; ret = devm_rproc_add(dev, rproc); if (ret) @@ -645,8 +124,20 @@ static int k3_m4_rproc_probe(struct platform_device *pdev) return 0; } +static const struct k3_rproc_mem_data am64_m4_mems[] = { + { .name = "iram", .dev_addr = 0x0 }, + { .name = "dram", .dev_addr = 0x30000 }, +}; + +static const struct k3_rproc_dev_data am64_m4_data = { + .mems = am64_m4_mems, + .num_mems = ARRAY_SIZE(am64_m4_mems), + .boot_align_addr = SZ_1K, + .uses_lreset = true, +}; + static const struct of_device_id k3_m4_of_match[] = { - { .compatible = "ti,am64-m4fss", }, + { .compatible = "ti,am64-m4fss", .data = &am64_m4_data, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, k3_m4_of_match); diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c index dbc513c5569c..e34c04c135fc 100644 --- a/drivers/remoteproc/ti_k3_r5_remoteproc.c +++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c @@ -26,6 +26,7 @@ #include "omap_remoteproc.h" #include "remoteproc_internal.h" #include "ti_sci_proc.h" +#include "ti_k3_common.h" /* This address can either be for ATCM or BTCM with the other at address 0x0 */ #define K3_R5_TCM_DEV_ADDR 0x41010000 @@ -55,20 +56,6 @@ /* Applicable to only AM64x SoCs */ #define PROC_BOOT_STATUS_FLAG_R5_SINGLECORE_ONLY 0x00000200 -/** - * struct k3_r5_mem - internal memory structure - * @cpu_addr: MPU virtual address of the memory region - * @bus_addr: Bus address used to access the memory region - * @dev_addr: Device address from remoteproc view - * @size: Size of the memory region - */ -struct k3_r5_mem { - void __iomem *cpu_addr; - phys_addr_t bus_addr; - u32 dev_addr; - size_t size; -}; - /* * All cluster mode values are not applicable on all SoCs. The following * are the modes supported on various SoCs: @@ -90,12 +77,14 @@ enum cluster_mode { * @tcm_ecc_autoinit: flag to denote the auto-initialization of TCMs for ECC * @single_cpu_mode: flag to denote if SoC/IP supports Single-CPU mode * @is_single_core: flag to denote if SoC/IP has only single core R5 + * @core_data: pointer to R5-core-specific device data */ struct k3_r5_soc_data { bool tcm_is_double; bool tcm_ecc_autoinit; bool single_cpu_mode; bool is_single_core; + const struct k3_rproc_dev_data *core_data; }; /** @@ -118,15 +107,10 @@ struct k3_r5_cluster { * struct k3_r5_core - K3 R5 core structure * @elem: linked list item * @dev: cached device pointer - * @rproc: rproc handle representing this core - * @mem: internal memory regions data + * @kproc: K3 rproc handle representing this core + * @cluster: cached pointer to parent cluster structure * @sram: on-chip SRAM memory regions data - * @num_mems: number of internal memory regions * @num_sram: number of on-chip SRAM memory regions - * @reset: reset control handle - * @tsp: TI-SCI processor control handle - * @ti_sci: TI-SCI handle - * @ti_sci_id: TI-SCI device identifier * @atcm_enable: flag to control ATCM enablement * @btcm_enable: flag to control BTCM enablement * @loczrama: flag to dictate which TCM is at device address 0x0 @@ -135,157 +119,58 @@ struct k3_r5_cluster { struct k3_r5_core { struct list_head elem; struct device *dev; - struct rproc *rproc; - struct k3_r5_mem *mem; - struct k3_r5_mem *sram; - int num_mems; + struct k3_rproc *kproc; + struct k3_r5_cluster *cluster; + struct k3_rproc_mem *sram; int num_sram; - struct reset_control *reset; - struct ti_sci_proc *tsp; - const struct ti_sci_handle *ti_sci; - u32 ti_sci_id; u32 atcm_enable; u32 btcm_enable; u32 loczrama; bool released_from_reset; }; -/** - * struct k3_r5_rproc - K3 remote processor state - * @dev: cached device pointer - * @cluster: cached pointer to parent cluster structure - * @mbox: mailbox channel handle - * @client: mailbox client to request the mailbox channel - * @rproc: rproc handle - * @core: cached pointer to r5 core structure being used - * @rmem: reserved memory regions data - * @num_rmems: number of reserved memory regions - */ -struct k3_r5_rproc { - struct device *dev; - struct k3_r5_cluster *cluster; - struct mbox_chan *mbox; - struct mbox_client client; - struct rproc *rproc; - struct k3_r5_core *core; - struct k3_r5_mem *rmem; - int num_rmems; -}; - -/** - * k3_r5_rproc_mbox_callback() - inbound mailbox message handler - * @client: mailbox client pointer used for requesting the mailbox channel - * @data: mailbox payload - * - * This handler is invoked by the OMAP mailbox driver whenever a mailbox - * message is received. Usually, the mailbox payload simply contains - * the index of the virtqueue that is kicked by the remote processor, - * and we let remoteproc core handle it. - * - * In addition to virtqueue indices, we also have some out-of-band values - * that indicate different events. Those values are deliberately very - * large so they don't coincide with virtqueue indices. - */ -static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data) -{ - struct k3_r5_rproc *kproc = container_of(client, struct k3_r5_rproc, - client); - struct device *dev = kproc->rproc->dev.parent; - const char *name = kproc->rproc->name; - u32 msg = omap_mbox_message(data); - - /* Do not forward message from a detached core */ - if (kproc->rproc->state == RPROC_DETACHED) - return; - - dev_dbg(dev, "mbox msg: 0x%x\n", msg); - - switch (msg) { - case RP_MBOX_CRASH: - /* - * remoteproc detected an exception, but error recovery is not - * supported. So, just log this for now - */ - dev_err(dev, "K3 R5F rproc %s crashed\n", name); - break; - case RP_MBOX_ECHO_REPLY: - dev_info(dev, "received echo reply from %s\n", name); - break; - default: - /* silently handle all other valid messages */ - if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG) - return; - if (msg > kproc->rproc->max_notifyid) { - dev_dbg(dev, "dropping unknown message 0x%x", msg); - return; - } - /* msg contains the index of the triggered vring */ - if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE) - dev_dbg(dev, "no message was found in vqid %d\n", msg); - } -} - -/* kick a virtqueue */ -static void k3_r5_rproc_kick(struct rproc *rproc, int vqid) -{ - struct k3_r5_rproc *kproc = rproc->priv; - struct device *dev = rproc->dev.parent; - mbox_msg_t msg = (mbox_msg_t)vqid; - int ret; - - /* Do not forward message to a detached core */ - if (kproc->rproc->state == RPROC_DETACHED) - return; - - /* send the index of the triggered virtqueue in the mailbox payload */ - ret = mbox_send_message(kproc->mbox, (void *)msg); - if (ret < 0) - dev_err(dev, "failed to send mailbox message, status = %d\n", - ret); -} - -static int k3_r5_split_reset(struct k3_r5_core *core) +static int k3_r5_split_reset(struct k3_rproc *kproc) { int ret; - ret = reset_control_assert(core->reset); + ret = reset_control_assert(kproc->reset); if (ret) { - dev_err(core->dev, "local-reset assert failed, ret = %d\n", + dev_err(kproc->dev, "local-reset assert failed, ret = %d\n", ret); return ret; } - ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci, - core->ti_sci_id); + ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, + kproc->ti_sci_id); if (ret) { - dev_err(core->dev, "module-reset assert failed, ret = %d\n", + dev_err(kproc->dev, "module-reset assert failed, ret = %d\n", ret); - if (reset_control_deassert(core->reset)) - dev_warn(core->dev, "local-reset deassert back failed\n"); + if (reset_control_deassert(kproc->reset)) + dev_warn(kproc->dev, "local-reset deassert back failed\n"); } return ret; } -static int k3_r5_split_release(struct k3_r5_core *core) +static int k3_r5_split_release(struct k3_rproc *kproc) { int ret; - ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci, - core->ti_sci_id); + ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci, + kproc->ti_sci_id); if (ret) { - dev_err(core->dev, "module-reset deassert failed, ret = %d\n", + dev_err(kproc->dev, "module-reset deassert failed, ret = %d\n", ret); return ret; } - ret = reset_control_deassert(core->reset); + ret = reset_control_deassert(kproc->reset); if (ret) { - dev_err(core->dev, "local-reset deassert failed, ret = %d\n", + dev_err(kproc->dev, "local-reset deassert failed, ret = %d\n", ret); - if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci, - core->ti_sci_id)) - dev_warn(core->dev, "module-reset assert back failed\n"); + if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, + kproc->ti_sci_id)) + dev_warn(kproc->dev, "module-reset assert back failed\n"); } return ret; @@ -294,11 +179,12 @@ static int k3_r5_split_release(struct k3_r5_core *core) static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster) { struct k3_r5_core *core; + struct k3_rproc *kproc; int ret; /* assert local reset on all applicable cores */ list_for_each_entry(core, &cluster->cores, elem) { - ret = reset_control_assert(core->reset); + ret = reset_control_assert(core->kproc->reset); if (ret) { dev_err(core->dev, "local-reset assert failed, ret = %d\n", ret); @@ -309,8 +195,9 @@ static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster) /* disable PSC modules on all applicable cores */ list_for_each_entry(core, &cluster->cores, elem) { - ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci, - core->ti_sci_id); + kproc = core->kproc; + ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, + kproc->ti_sci_id); if (ret) { dev_err(core->dev, "module-reset assert failed, ret = %d\n", ret); @@ -322,14 +209,15 @@ static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster) unroll_module_reset: list_for_each_entry_continue_reverse(core, &cluster->cores, elem) { - if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci, - core->ti_sci_id)) + kproc = core->kproc; + if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, + kproc->ti_sci_id)) dev_warn(core->dev, "module-reset assert back failed\n"); } core = list_last_entry(&cluster->cores, struct k3_r5_core, elem); unroll_local_reset: list_for_each_entry_from_reverse(core, &cluster->cores, elem) { - if (reset_control_deassert(core->reset)) + if (reset_control_deassert(core->kproc->reset)) dev_warn(core->dev, "local-reset deassert back failed\n"); } @@ -339,12 +227,14 @@ unroll_local_reset: static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster) { struct k3_r5_core *core; + struct k3_rproc *kproc; int ret; /* enable PSC modules on all applicable cores */ list_for_each_entry_reverse(core, &cluster->cores, elem) { - ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci, - core->ti_sci_id); + kproc = core->kproc; + ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci, + kproc->ti_sci_id); if (ret) { dev_err(core->dev, "module-reset deassert failed, ret = %d\n", ret); @@ -355,7 +245,7 @@ static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster) /* deassert local reset on all applicable cores */ list_for_each_entry_reverse(core, &cluster->cores, elem) { - ret = reset_control_deassert(core->reset); + ret = reset_control_deassert(core->kproc->reset); if (ret) { dev_err(core->dev, "module-reset deassert failed, ret = %d\n", ret); @@ -367,67 +257,33 @@ static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster) unroll_local_reset: list_for_each_entry_continue(core, &cluster->cores, elem) { - if (reset_control_assert(core->reset)) + if (reset_control_assert(core->kproc->reset)) dev_warn(core->dev, "local-reset assert back failed\n"); } core = list_first_entry(&cluster->cores, struct k3_r5_core, elem); unroll_module_reset: list_for_each_entry_from(core, &cluster->cores, elem) { - if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci, - core->ti_sci_id)) + kproc = core->kproc; + if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, + kproc->ti_sci_id)) dev_warn(core->dev, "module-reset assert back failed\n"); } return ret; } -static inline int k3_r5_core_halt(struct k3_r5_core *core) +static inline int k3_r5_core_halt(struct k3_rproc *kproc) { - return ti_sci_proc_set_control(core->tsp, + return ti_sci_proc_set_control(kproc->tsp, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0); } -static inline int k3_r5_core_run(struct k3_r5_core *core) +static inline int k3_r5_core_run(struct k3_rproc *kproc) { - return ti_sci_proc_set_control(core->tsp, + return ti_sci_proc_set_control(kproc->tsp, 0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT); } -static int k3_r5_rproc_request_mbox(struct rproc *rproc) -{ - struct k3_r5_rproc *kproc = rproc->priv; - struct mbox_client *client = &kproc->client; - struct device *dev = kproc->dev; - int ret; - - client->dev = dev; - client->tx_done = NULL; - client->rx_callback = k3_r5_rproc_mbox_callback; - client->tx_block = false; - client->knows_txdone = false; - - kproc->mbox = mbox_request_channel(client, 0); - if (IS_ERR(kproc->mbox)) - return dev_err_probe(dev, PTR_ERR(kproc->mbox), - "mbox_request_channel failed\n"); - - /* - * Ping the remote processor, this is only for sanity-sake for now; - * there is no functional effect whatsoever. - * - * Note that the reply will _not_ arrive immediately: this message - * will wait in the mailbox fifo until the remote processor is booted. - */ - ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST); - if (ret < 0) { - dev_err(dev, "mbox_send_message failed: %d\n", ret); - mbox_free_channel(kproc->mbox); - return ret; - } - - return 0; -} - /* * The R5F cores have controls for both a reset and a halt/run. The code * execution from DDR requires the initial boot-strapping code to be run @@ -446,16 +302,39 @@ static int k3_r5_rproc_request_mbox(struct rproc *rproc) */ static int k3_r5_rproc_prepare(struct rproc *rproc) { - struct k3_r5_rproc *kproc = rproc->priv; - struct k3_r5_cluster *cluster = kproc->cluster; - struct k3_r5_core *core = kproc->core; + struct k3_rproc *kproc = rproc->priv; + struct k3_r5_core *core = kproc->priv, *core0, *core1; + struct k3_r5_cluster *cluster = core->cluster; struct device *dev = kproc->dev; u32 ctrl = 0, cfg = 0, stat = 0; u64 boot_vec = 0; bool mem_init_dis; int ret; - ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl, &stat); + /* + * R5 cores require to be powered on sequentially, core0 should be in + * higher power state than core1 in a cluster. So, wait for core0 to + * power up before proceeding to core1 and put timeout of 2sec. This + * waiting mechanism is necessary because rproc_auto_boot_callback() for + * core1 can be called before core0 due to thread execution order. + * + * By placing the wait mechanism here in .prepare() ops, this condition + * is enforced for rproc boot requests from sysfs as well. + */ + core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem); + core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem); + if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1 && + !core0->released_from_reset) { + ret = wait_event_interruptible_timeout(cluster->core_transition, + core0->released_from_reset, + msecs_to_jiffies(2000)); + if (ret <= 0) { + dev_err(dev, "can not power up core1 before core0"); + return -EPERM; + } + } + + ret = ti_sci_proc_get_status(kproc->tsp, &boot_vec, &cfg, &ctrl, &stat); if (ret < 0) return ret; mem_init_dis = !!(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS); @@ -463,7 +342,7 @@ static int k3_r5_rproc_prepare(struct rproc *rproc) /* Re-use LockStep-mode reset logic for Single-CPU mode */ ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP || cluster->mode == CLUSTER_MODE_SINGLECPU) ? - k3_r5_lockstep_release(cluster) : k3_r5_split_release(core); + k3_r5_lockstep_release(cluster) : k3_r5_split_release(kproc); if (ret) { dev_err(dev, "unable to enable cores for TCM loading, ret = %d\n", ret); @@ -471,6 +350,14 @@ static int k3_r5_rproc_prepare(struct rproc *rproc) } /* + * Notify all threads in the wait queue when core0 state has changed so + * that threads waiting for this condition can be executed. + */ + core->released_from_reset = true; + if (core == core0) + wake_up_interruptible(&cluster->core_transition); + + /* * Newer IP revisions like on J7200 SoCs support h/w auto-initialization * of TCMs, so there is no need to perform the s/w memzero. This bit is * configurable through System Firmware, the default value does perform @@ -487,10 +374,10 @@ static int k3_r5_rproc_prepare(struct rproc *rproc) * can be effective on all TCM addresses. */ dev_dbg(dev, "zeroing out ATCM memory\n"); - memset_io(core->mem[0].cpu_addr, 0x00, core->mem[0].size); + memset_io(kproc->mem[0].cpu_addr, 0x00, kproc->mem[0].size); dev_dbg(dev, "zeroing out BTCM memory\n"); - memset_io(core->mem[1].cpu_addr, 0x00, core->mem[1].size); + memset_io(kproc->mem[1].cpu_addr, 0x00, kproc->mem[1].size); return 0; } @@ -513,19 +400,47 @@ static int k3_r5_rproc_prepare(struct rproc *rproc) */ static int k3_r5_rproc_unprepare(struct rproc *rproc) { - struct k3_r5_rproc *kproc = rproc->priv; - struct k3_r5_cluster *cluster = kproc->cluster; - struct k3_r5_core *core = kproc->core; + struct k3_rproc *kproc = rproc->priv; + struct k3_r5_core *core = kproc->priv, *core0, *core1; + struct k3_r5_cluster *cluster = core->cluster; struct device *dev = kproc->dev; int ret; + /* + * Ensure power-down of cores is sequential in split mode. Core1 must + * power down before Core0 to maintain the expected state. By placing + * the wait mechanism here in .unprepare() ops, this condition is + * enforced for rproc stop or shutdown requests from sysfs and device + * removal as well. + */ + core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem); + core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem); + if (cluster->mode == CLUSTER_MODE_SPLIT && core == core0 && + core1->released_from_reset) { + ret = wait_event_interruptible_timeout(cluster->core_transition, + !core1->released_from_reset, + msecs_to_jiffies(2000)); + if (ret <= 0) { + dev_err(dev, "can not power down core0 before core1"); + return -EPERM; + } + } + /* Re-use LockStep-mode reset logic for Single-CPU mode */ ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP || cluster->mode == CLUSTER_MODE_SINGLECPU) ? - k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(core); + k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(kproc); if (ret) dev_err(dev, "unable to disable cores, ret = %d\n", ret); + /* + * Notify all threads in the wait queue when core1 state has changed so + * that threads waiting for this condition can be executed. + */ + core->released_from_reset = false; + if (core == core1) + wake_up_interruptible(&cluster->core_transition); + return ret; } @@ -548,10 +463,10 @@ static int k3_r5_rproc_unprepare(struct rproc *rproc) */ static int k3_r5_rproc_start(struct rproc *rproc) { - struct k3_r5_rproc *kproc = rproc->priv; - struct k3_r5_cluster *cluster = kproc->cluster; + struct k3_rproc *kproc = rproc->priv; + struct k3_r5_core *core = kproc->priv; + struct k3_r5_cluster *cluster = core->cluster; struct device *dev = kproc->dev; - struct k3_r5_core *core0, *core; u32 boot_addr; int ret; @@ -560,41 +475,28 @@ static int k3_r5_rproc_start(struct rproc *rproc) dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr); /* boot vector need not be programmed for Core1 in LockStep mode */ - core = kproc->core; - ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0); + ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0); if (ret) return ret; /* unhalt/run all applicable cores */ if (cluster->mode == CLUSTER_MODE_LOCKSTEP) { list_for_each_entry_reverse(core, &cluster->cores, elem) { - ret = k3_r5_core_run(core); + ret = k3_r5_core_run(core->kproc); if (ret) goto unroll_core_run; } } else { - /* do not allow core 1 to start before core 0 */ - core0 = list_first_entry(&cluster->cores, struct k3_r5_core, - elem); - if (core != core0 && core0->rproc->state == RPROC_OFFLINE) { - dev_err(dev, "%s: can not start core 1 before core 0\n", - __func__); - return -EPERM; - } - - ret = k3_r5_core_run(core); + ret = k3_r5_core_run(core->kproc); if (ret) return ret; - - core->released_from_reset = true; - wake_up_interruptible(&cluster->core_transition); } return 0; unroll_core_run: list_for_each_entry_continue(core, &cluster->cores, elem) { - if (k3_r5_core_halt(core)) + if (k3_r5_core_halt(core->kproc)) dev_warn(core->dev, "core halt back failed\n"); } return ret; @@ -626,33 +528,22 @@ unroll_core_run: */ static int k3_r5_rproc_stop(struct rproc *rproc) { - struct k3_r5_rproc *kproc = rproc->priv; - struct k3_r5_cluster *cluster = kproc->cluster; - struct device *dev = kproc->dev; - struct k3_r5_core *core1, *core = kproc->core; + struct k3_rproc *kproc = rproc->priv; + struct k3_r5_core *core = kproc->priv; + struct k3_r5_cluster *cluster = core->cluster; int ret; /* halt all applicable cores */ if (cluster->mode == CLUSTER_MODE_LOCKSTEP) { list_for_each_entry(core, &cluster->cores, elem) { - ret = k3_r5_core_halt(core); + ret = k3_r5_core_halt(core->kproc); if (ret) { core = list_prev_entry(core, elem); goto unroll_core_halt; } } } else { - /* do not allow core 0 to stop before core 1 */ - core1 = list_last_entry(&cluster->cores, struct k3_r5_core, - elem); - if (core != core1 && core1->rproc->state != RPROC_OFFLINE) { - dev_err(dev, "%s: can not stop core 0 before core 1\n", - __func__); - ret = -EPERM; - goto out; - } - - ret = k3_r5_core_halt(core); + ret = k3_r5_core_halt(core->kproc); if (ret) goto out; } @@ -661,7 +552,7 @@ static int k3_r5_rproc_stop(struct rproc *rproc) unroll_core_halt: list_for_each_entry_from_reverse(core, &cluster->cores, elem) { - if (k3_r5_core_run(core)) + if (k3_r5_core_run(core->kproc)) dev_warn(core->dev, "core run back failed\n"); } out: @@ -669,58 +560,6 @@ out: } /* - * Attach to a running R5F remote processor (IPC-only mode) - * - * The R5F attach callback is a NOP. The remote processor is already booted, and - * all required resources have been acquired during probe routine, so there is - * no need to issue any TI-SCI commands to boot the R5F cores in IPC-only mode. - * This callback is invoked only in IPC-only mode and exists because - * rproc_validate() checks for its existence. - */ -static int k3_r5_rproc_attach(struct rproc *rproc) { return 0; } - -/* - * Detach from a running R5F remote processor (IPC-only mode) - * - * The R5F detach callback is a NOP. The R5F cores are not stopped and will be - * left in booted state in IPC-only mode. This callback is invoked only in - * IPC-only mode and exists for sanity sake. - */ -static int k3_r5_rproc_detach(struct rproc *rproc) { return 0; } - -/* - * This function implements the .get_loaded_rsc_table() callback and is used - * to provide the resource table for the booted R5F in IPC-only mode. The K3 R5F - * firmwares follow a design-by-contract approach and are expected to have the - * resource table at the base of the DDR region reserved for firmware usage. - * This provides flexibility for the remote processor to be booted by different - * bootloaders that may or may not have the ability to publish the resource table - * address and size through a DT property. This callback is invoked only in - * IPC-only mode. - */ -static struct resource_table *k3_r5_get_loaded_rsc_table(struct rproc *rproc, - size_t *rsc_table_sz) -{ - struct k3_r5_rproc *kproc = rproc->priv; - struct device *dev = kproc->dev; - - if (!kproc->rmem[0].cpu_addr) { - dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found"); - return ERR_PTR(-ENOMEM); - } - - /* - * NOTE: The resource table size is currently hard-coded to a maximum - * of 256 bytes. The most common resource table usage for K3 firmwares - * is to only have the vdev resource entry and an optional trace entry. - * The exact size could be computed based on resource table address, but - * the hard-coded value suffices to support the IPC-only mode. - */ - *rsc_table_sz = 256; - return (__force struct resource_table *)kproc->rmem[0].cpu_addr; -} - -/* * Internal Memory translation helper * * Custom function implementing the rproc .da_to_va ops to provide address @@ -730,10 +569,9 @@ static struct resource_table *k3_r5_get_loaded_rsc_table(struct rproc *rproc, */ static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { - struct k3_r5_rproc *kproc = rproc->priv; - struct k3_r5_core *core = kproc->core; + struct k3_rproc *kproc = rproc->priv; + struct k3_r5_core *core = kproc->priv; void __iomem *va = NULL; - phys_addr_t bus_addr; u32 dev_addr, offset; size_t size; int i; @@ -741,27 +579,6 @@ static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool if (len == 0) return NULL; - /* handle both R5 and SoC views of ATCM and BTCM */ - for (i = 0; i < core->num_mems; i++) { - bus_addr = core->mem[i].bus_addr; - dev_addr = core->mem[i].dev_addr; - size = core->mem[i].size; - - /* handle R5-view addresses of TCMs */ - if (da >= dev_addr && ((da + len) <= (dev_addr + size))) { - offset = da - dev_addr; - va = core->mem[i].cpu_addr + offset; - return (__force void *)va; - } - - /* handle SoC-view addresses of TCMs */ - if (da >= bus_addr && ((da + len) <= (bus_addr + size))) { - offset = da - bus_addr; - va = core->mem[i].cpu_addr + offset; - return (__force void *)va; - } - } - /* handle any SRAM regions using SoC-view addresses */ for (i = 0; i < core->num_sram; i++) { dev_addr = core->sram[i].dev_addr; @@ -774,19 +591,8 @@ static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool } } - /* handle static DDR reserved memory regions */ - for (i = 0; i < kproc->num_rmems; i++) { - dev_addr = kproc->rmem[i].dev_addr; - size = kproc->rmem[i].size; - - if (da >= dev_addr && ((da + len) <= (dev_addr + size))) { - offset = da - dev_addr; - va = kproc->rmem[i].cpu_addr + offset; - return (__force void *)va; - } - } - - return NULL; + /* handle both TCM and DDR memory regions */ + return k3_rproc_da_to_va(rproc, da, len, is_iomem); } static const struct rproc_ops k3_r5_rproc_ops = { @@ -794,7 +600,7 @@ static const struct rproc_ops k3_r5_rproc_ops = { .unprepare = k3_r5_rproc_unprepare, .start = k3_r5_rproc_start, .stop = k3_r5_rproc_stop, - .kick = k3_r5_rproc_kick, + .kick = k3_rproc_kick, .da_to_va = k3_r5_rproc_da_to_va, }; @@ -833,11 +639,11 @@ static const struct rproc_ops k3_r5_rproc_ops = { * both the cores with the same settings, before reconfiguing again for * LockStep mode. */ -static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc) +static int k3_r5_rproc_configure(struct k3_rproc *kproc) { - struct k3_r5_cluster *cluster = kproc->cluster; + struct k3_r5_core *temp, *core0, *core = kproc->priv; + struct k3_r5_cluster *cluster = core->cluster; struct device *dev = kproc->dev; - struct k3_r5_core *core0, *core, *temp; u32 ctrl = 0, cfg = 0, stat = 0; u32 set_cfg = 0, clr_cfg = 0; u64 boot_vec = 0; @@ -851,10 +657,10 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc) cluster->mode == CLUSTER_MODE_SINGLECORE) { core = core0; } else { - core = kproc->core; + core = kproc->priv; } - ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl, + ret = ti_sci_proc_get_status(core->kproc->tsp, &boot_vec, &cfg, &ctrl, &stat); if (ret < 0) return ret; @@ -924,7 +730,7 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc) * and TEINIT config is only allowed with Core0. */ list_for_each_entry(temp, &cluster->cores, elem) { - ret = k3_r5_core_halt(temp); + ret = k3_r5_core_halt(temp->kproc); if (ret) goto out; @@ -932,7 +738,7 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc) clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_LOCKSTEP; clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_TEINIT; } - ret = ti_sci_proc_set_config(temp->tsp, boot_vec, + ret = ti_sci_proc_set_config(temp->kproc->tsp, boot_vec, set_cfg, clr_cfg); if (ret) goto out; @@ -940,14 +746,14 @@ static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc) set_cfg = PROC_BOOT_CFG_FLAG_R5_LOCKSTEP; clr_cfg = 0; - ret = ti_sci_proc_set_config(core->tsp, boot_vec, + ret = ti_sci_proc_set_config(core->kproc->tsp, boot_vec, set_cfg, clr_cfg); } else { - ret = k3_r5_core_halt(core); + ret = k3_r5_core_halt(core->kproc); if (ret) goto out; - ret = ti_sci_proc_set_config(core->tsp, boot_vec, + ret = ti_sci_proc_set_config(core->kproc->tsp, boot_vec, set_cfg, clr_cfg); } @@ -955,93 +761,6 @@ out: return ret; } -static void k3_r5_mem_release(void *data) -{ - struct device *dev = data; - - of_reserved_mem_device_release(dev); -} - -static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc) -{ - struct device *dev = kproc->dev; - struct device_node *np = dev_of_node(dev); - struct device_node *rmem_np; - struct reserved_mem *rmem; - int num_rmems; - int ret, i; - - num_rmems = of_property_count_elems_of_size(np, "memory-region", - sizeof(phandle)); - if (num_rmems <= 0) { - dev_err(dev, "device does not have reserved memory regions, ret = %d\n", - num_rmems); - return -EINVAL; - } - if (num_rmems < 2) { - dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n", - num_rmems); - return -EINVAL; - } - - /* use reserved memory region 0 for vring DMA allocations */ - ret = of_reserved_mem_device_init_by_idx(dev, np, 0); - if (ret) { - dev_err(dev, "device cannot initialize DMA pool, ret = %d\n", - ret); - return ret; - } - - ret = devm_add_action_or_reset(dev, k3_r5_mem_release, dev); - if (ret) - return ret; - - num_rmems--; - kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL); - if (!kproc->rmem) - return -ENOMEM; - - /* use remaining reserved memory regions for static carveouts */ - for (i = 0; i < num_rmems; i++) { - rmem_np = of_parse_phandle(np, "memory-region", i + 1); - if (!rmem_np) - return -EINVAL; - - rmem = of_reserved_mem_lookup(rmem_np); - of_node_put(rmem_np); - if (!rmem) - return -EINVAL; - - kproc->rmem[i].bus_addr = rmem->base; - /* - * R5Fs do not have an MMU, but have a Region Address Translator - * (RAT) module that provides a fixed entry translation between - * the 32-bit processor addresses to 64-bit bus addresses. The - * RAT is programmable only by the R5F cores. Support for RAT - * is currently not supported, so 64-bit address regions are not - * supported. The absence of MMUs implies that the R5F device - * addresses/supported memory regions are restricted to 32-bit - * bus addresses, and are identical - */ - kproc->rmem[i].dev_addr = (u32)rmem->base; - kproc->rmem[i].size = rmem->size; - kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size); - if (!kproc->rmem[i].cpu_addr) { - dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n", - i + 1, &rmem->base, &rmem->size); - return -ENOMEM; - } - - dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n", - i + 1, &kproc->rmem[i].bus_addr, - kproc->rmem[i].size, kproc->rmem[i].cpu_addr, - kproc->rmem[i].dev_addr); - } - kproc->num_rmems = num_rmems; - - return 0; -} - /* * Each R5F core within a typical R5FSS instance has a total of 64 KB of TCMs, * split equally into two 32 KB banks between ATCM and BTCM. The TCMs from both @@ -1055,12 +774,11 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc) * supported SoCs. The Core0 TCM sizes therefore have to be adjusted to only * half the original size in Split mode. */ -static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc) +static void k3_r5_adjust_tcm_sizes(struct k3_rproc *kproc) { - struct k3_r5_cluster *cluster = kproc->cluster; - struct k3_r5_core *core = kproc->core; + struct k3_r5_core *core0, *core = kproc->priv; + struct k3_r5_cluster *cluster = core->cluster; struct device *cdev = core->dev; - struct k3_r5_core *core0; if (cluster->mode == CLUSTER_MODE_LOCKSTEP || cluster->mode == CLUSTER_MODE_SINGLECPU || @@ -1070,14 +788,14 @@ static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc) core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem); if (core == core0) { - WARN_ON(core->mem[0].size != SZ_64K); - WARN_ON(core->mem[1].size != SZ_64K); + WARN_ON(kproc->mem[0].size != SZ_64K); + WARN_ON(kproc->mem[1].size != SZ_64K); - core->mem[0].size /= 2; - core->mem[1].size /= 2; + kproc->mem[0].size /= 2; + kproc->mem[1].size /= 2; dev_dbg(cdev, "adjusted TCM sizes, ATCM = 0x%zx BTCM = 0x%zx\n", - core->mem[0].size, core->mem[1].size); + kproc->mem[0].size, kproc->mem[1].size); } } @@ -1094,24 +812,23 @@ static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc) * actual values configured by bootloader. The driver internal device memory * addresses for TCMs are also updated. */ -static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc) +static int k3_r5_rproc_configure_mode(struct k3_rproc *kproc) { - struct k3_r5_cluster *cluster = kproc->cluster; - struct k3_r5_core *core = kproc->core; + struct k3_r5_core *core0, *core = kproc->priv; + struct k3_r5_cluster *cluster = core->cluster; struct device *cdev = core->dev; bool r_state = false, c_state = false, lockstep_en = false, single_cpu = false; u32 ctrl = 0, cfg = 0, stat = 0, halted = 0; u64 boot_vec = 0; u32 atcm_enable, btcm_enable, loczrama; - struct k3_r5_core *core0; enum cluster_mode mode = cluster->mode; int reset_ctrl_status; int ret; core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem); - ret = core->ti_sci->ops.dev_ops.is_on(core->ti_sci, core->ti_sci_id, - &r_state, &c_state); + ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id, + &r_state, &c_state); if (ret) { dev_err(cdev, "failed to get initial state, mode cannot be determined, ret = %d\n", ret); @@ -1122,7 +839,7 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc) r_state, c_state); } - reset_ctrl_status = reset_control_status(core->reset); + reset_ctrl_status = reset_control_status(kproc->reset); if (reset_ctrl_status < 0) { dev_err(cdev, "failed to get initial local reset status, ret = %d\n", reset_ctrl_status); @@ -1135,7 +852,7 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc) */ core->released_from_reset = c_state; - ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl, + ret = ti_sci_proc_get_status(kproc->tsp, &boot_vec, &cfg, &ctrl, &stat); if (ret < 0) { dev_err(cdev, "failed to get initial processor status, ret = %d\n", @@ -1170,10 +887,10 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc) kproc->rproc->ops->unprepare = NULL; kproc->rproc->ops->start = NULL; kproc->rproc->ops->stop = NULL; - kproc->rproc->ops->attach = k3_r5_rproc_attach; - kproc->rproc->ops->detach = k3_r5_rproc_detach; + kproc->rproc->ops->attach = k3_rproc_attach; + kproc->rproc->ops->detach = k3_rproc_detach; kproc->rproc->ops->get_loaded_rsc_table = - k3_r5_get_loaded_rsc_table; + k3_get_loaded_rsc_table; } else if (!c_state) { dev_info(cdev, "configured R5F for remoteproc mode\n"); ret = 0; @@ -1192,19 +909,121 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc) core->atcm_enable = atcm_enable; core->btcm_enable = btcm_enable; core->loczrama = loczrama; - core->mem[0].dev_addr = loczrama ? 0 : K3_R5_TCM_DEV_ADDR; - core->mem[1].dev_addr = loczrama ? K3_R5_TCM_DEV_ADDR : 0; + kproc->mem[0].dev_addr = loczrama ? 0 : K3_R5_TCM_DEV_ADDR; + kproc->mem[1].dev_addr = loczrama ? K3_R5_TCM_DEV_ADDR : 0; } return ret; } +static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev, + struct k3_rproc *kproc) +{ + const struct k3_rproc_dev_data *data = kproc->data; + struct device *dev = &pdev->dev; + struct k3_r5_core *core = kproc->priv; + int num_mems; + int i, ret; + + num_mems = data->num_mems; + kproc->mem = devm_kcalloc(kproc->dev, num_mems, sizeof(*kproc->mem), + GFP_KERNEL); + if (!kproc->mem) + return -ENOMEM; + + ret = k3_rproc_of_get_memories(pdev, kproc); + if (ret) + return ret; + + for (i = 0; i < num_mems; i++) { + /* + * TODO: + * The R5F cores can place ATCM & BTCM anywhere in its address + * based on the corresponding Region Registers in the System + * Control coprocessor. For now, place ATCM and BTCM at + * addresses 0 and 0x41010000 (same as the bus address on AM65x + * SoCs) based on loczrama setting overriding default assignment + * done by k3_rproc_of_get_memories(). + */ + if (!strcmp(data->mems[i].name, "atcm")) { + kproc->mem[i].dev_addr = core->loczrama ? + 0 : K3_R5_TCM_DEV_ADDR; + } else { + kproc->mem[i].dev_addr = core->loczrama ? + K3_R5_TCM_DEV_ADDR : 0; + } + + dev_dbg(dev, "Updating bus addr %pa of memory %5s\n", + &kproc->mem[i].bus_addr, data->mems[i].name); + } + + return 0; +} + +static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev, + struct k3_r5_core *core) +{ + struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct device_node *sram_np; + struct resource res; + int num_sram; + int i, ret; + + num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle)); + if (num_sram <= 0) { + dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n", + num_sram); + return 0; + } + + core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL); + if (!core->sram) + return -ENOMEM; + + for (i = 0; i < num_sram; i++) { + sram_np = of_parse_phandle(np, "sram", i); + if (!sram_np) + return -EINVAL; + + if (!of_device_is_available(sram_np)) { + of_node_put(sram_np); + return -EINVAL; + } + + ret = of_address_to_resource(sram_np, 0, &res); + of_node_put(sram_np); + if (ret) + return -EINVAL; + + core->sram[i].bus_addr = res.start; + core->sram[i].dev_addr = res.start; + core->sram[i].size = resource_size(&res); + core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start, + resource_size(&res)); + if (!core->sram[i].cpu_addr) { + dev_err(dev, "failed to parse and map sram%d memory at %pad\n", + i, &res.start); + return -ENOMEM; + } + + dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n", + i, &core->sram[i].bus_addr, + core->sram[i].size, core->sram[i].cpu_addr, + core->sram[i].dev_addr); + } + core->num_sram = num_sram; + + return 0; +} + static int k3_r5_cluster_rproc_init(struct platform_device *pdev) { struct k3_r5_cluster *cluster = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; - struct k3_r5_rproc *kproc; + struct k3_rproc *kproc; struct k3_r5_core *core, *core1; + struct device_node *np; struct device *cdev; const char *fw_name; struct rproc *rproc; @@ -1213,6 +1032,7 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev) core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem); list_for_each_entry(core, &cluster->cores, elem) { cdev = core->dev; + np = dev_of_node(cdev); ret = rproc_of_parse_firmware(cdev, 0, &fw_name); if (ret) { dev_err(dev, "failed to parse firmware-name property, ret = %d\n", @@ -1233,13 +1053,66 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev) rproc->recovery_disabled = true; kproc = rproc->priv; - kproc->cluster = cluster; - kproc->core = core; + kproc->priv = core; kproc->dev = cdev; kproc->rproc = rproc; - core->rproc = rproc; + kproc->data = cluster->soc_data->core_data; + core->kproc = kproc; + + kproc->ti_sci = devm_ti_sci_get_by_phandle(cdev, "ti,sci"); + if (IS_ERR(kproc->ti_sci)) { + ret = dev_err_probe(cdev, PTR_ERR(kproc->ti_sci), + "failed to get ti-sci handle\n"); + kproc->ti_sci = NULL; + goto out; + } + + ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id); + if (ret) { + dev_err(cdev, "missing 'ti,sci-dev-id' property\n"); + goto out; + } - ret = k3_r5_rproc_request_mbox(rproc); + kproc->reset = devm_reset_control_get_exclusive(cdev, NULL); + if (IS_ERR_OR_NULL(kproc->reset)) { + ret = PTR_ERR_OR_ZERO(kproc->reset); + if (!ret) + ret = -ENODEV; + dev_err_probe(cdev, ret, "failed to get reset handle\n"); + goto out; + } + + kproc->tsp = ti_sci_proc_of_get_tsp(cdev, kproc->ti_sci); + if (IS_ERR(kproc->tsp)) { + ret = dev_err_probe(cdev, PTR_ERR(kproc->tsp), + "failed to construct ti-sci proc control\n"); + goto out; + } + + ret = k3_r5_core_of_get_internal_memories(to_platform_device(cdev), kproc); + if (ret) { + dev_err(cdev, "failed to get internal memories, ret = %d\n", + ret); + goto out; + } + + ret = ti_sci_proc_request(kproc->tsp); + if (ret < 0) { + dev_err(cdev, "ti_sci_proc_request failed, ret = %d\n", ret); + goto out; + } + + ret = devm_add_action_or_reset(cdev, k3_release_tsp, kproc->tsp); + if (ret) + goto out; + } + + list_for_each_entry(core, &cluster->cores, elem) { + cdev = core->dev; + kproc = core->kproc; + rproc = kproc->rproc; + + ret = k3_rproc_request_mbox(rproc); if (ret) return ret; @@ -1251,7 +1124,7 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev) ret = k3_r5_rproc_configure(kproc); if (ret) { - dev_err(dev, "initial configure failed, ret = %d\n", + dev_err(cdev, "initial configure failed, ret = %d\n", ret); goto out; } @@ -1259,16 +1132,16 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev) init_rmem: k3_r5_adjust_tcm_sizes(kproc); - ret = k3_r5_reserved_mem_init(kproc); + ret = k3_reserved_mem_init(kproc); if (ret) { - dev_err(dev, "reserved memory init failed, ret = %d\n", + dev_err(cdev, "reserved memory init failed, ret = %d\n", ret); goto out; } - ret = devm_rproc_add(dev, rproc); + ret = devm_rproc_add(cdev, rproc); if (ret) { - dev_err_probe(dev, ret, "rproc_add failed\n"); + dev_err_probe(cdev, ret, "rproc_add failed\n"); goto out; } @@ -1279,26 +1152,6 @@ init_rmem: cluster->mode == CLUSTER_MODE_SINGLECPU || cluster->mode == CLUSTER_MODE_SINGLECORE) break; - - /* - * R5 cores require to be powered on sequentially, core0 - * should be in higher power state than core1 in a cluster - * So, wait for current core to power up before proceeding - * to next core and put timeout of 2sec for each core. - * - * This waiting mechanism is necessary because - * rproc_auto_boot_callback() for core1 can be called before - * core0 due to thread execution order. - */ - ret = wait_event_interruptible_timeout(cluster->core_transition, - core->released_from_reset, - msecs_to_jiffies(2000)); - if (ret <= 0) { - dev_err(dev, - "Timed out waiting for %s core to power up!\n", - rproc->name); - goto out; - } } return 0; @@ -1317,8 +1170,8 @@ out: /* undo core0 upon any failures on core1 in split-mode */ if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) { core = list_prev_entry(core, elem); - rproc = core->rproc; - kproc = rproc->priv; + kproc = core->kproc; + rproc = kproc->rproc; goto err_split; } return ret; @@ -1327,7 +1180,7 @@ out: static void k3_r5_cluster_rproc_exit(void *data) { struct k3_r5_cluster *cluster = platform_get_drvdata(data); - struct k3_r5_rproc *kproc; + struct k3_rproc *kproc; struct k3_r5_core *core; struct rproc *rproc; int ret; @@ -1343,8 +1196,8 @@ static void k3_r5_cluster_rproc_exit(void *data) list_last_entry(&cluster->cores, struct k3_r5_core, elem); list_for_each_entry_from_reverse(core, &cluster->cores, elem) { - rproc = core->rproc; - kproc = rproc->priv; + kproc = core->kproc; + rproc = kproc->rproc; if (rproc->state == RPROC_ATTACHED) { ret = rproc_detach(rproc); @@ -1358,142 +1211,6 @@ static void k3_r5_cluster_rproc_exit(void *data) } } -static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev, - struct k3_r5_core *core) -{ - static const char * const mem_names[] = {"atcm", "btcm"}; - struct device *dev = &pdev->dev; - struct resource *res; - int num_mems; - int i; - - num_mems = ARRAY_SIZE(mem_names); - core->mem = devm_kcalloc(dev, num_mems, sizeof(*core->mem), GFP_KERNEL); - if (!core->mem) - return -ENOMEM; - - for (i = 0; i < num_mems; i++) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - mem_names[i]); - if (!res) { - dev_err(dev, "found no memory resource for %s\n", - mem_names[i]); - return -EINVAL; - } - if (!devm_request_mem_region(dev, res->start, - resource_size(res), - dev_name(dev))) { - dev_err(dev, "could not request %s region for resource\n", - mem_names[i]); - return -EBUSY; - } - - /* - * TCMs are designed in general to support RAM-like backing - * memories. So, map these as Normal Non-Cached memories. This - * also avoids/fixes any potential alignment faults due to - * unaligned data accesses when using memcpy() or memset() - * functions (normally seen with device type memory). - */ - core->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start, - resource_size(res)); - if (!core->mem[i].cpu_addr) { - dev_err(dev, "failed to map %s memory\n", mem_names[i]); - return -ENOMEM; - } - core->mem[i].bus_addr = res->start; - - /* - * TODO: - * The R5F cores can place ATCM & BTCM anywhere in its address - * based on the corresponding Region Registers in the System - * Control coprocessor. For now, place ATCM and BTCM at - * addresses 0 and 0x41010000 (same as the bus address on AM65x - * SoCs) based on loczrama setting - */ - if (!strcmp(mem_names[i], "atcm")) { - core->mem[i].dev_addr = core->loczrama ? - 0 : K3_R5_TCM_DEV_ADDR; - } else { - core->mem[i].dev_addr = core->loczrama ? - K3_R5_TCM_DEV_ADDR : 0; - } - core->mem[i].size = resource_size(res); - - dev_dbg(dev, "memory %5s: bus addr %pa size 0x%zx va %pK da 0x%x\n", - mem_names[i], &core->mem[i].bus_addr, - core->mem[i].size, core->mem[i].cpu_addr, - core->mem[i].dev_addr); - } - core->num_mems = num_mems; - - return 0; -} - -static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev, - struct k3_r5_core *core) -{ - struct device_node *np = pdev->dev.of_node; - struct device *dev = &pdev->dev; - struct device_node *sram_np; - struct resource res; - int num_sram; - int i, ret; - - num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle)); - if (num_sram <= 0) { - dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n", - num_sram); - return 0; - } - - core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL); - if (!core->sram) - return -ENOMEM; - - for (i = 0; i < num_sram; i++) { - sram_np = of_parse_phandle(np, "sram", i); - if (!sram_np) - return -EINVAL; - - if (!of_device_is_available(sram_np)) { - of_node_put(sram_np); - return -EINVAL; - } - - ret = of_address_to_resource(sram_np, 0, &res); - of_node_put(sram_np); - if (ret) - return -EINVAL; - - core->sram[i].bus_addr = res.start; - core->sram[i].dev_addr = res.start; - core->sram[i].size = resource_size(&res); - core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start, - resource_size(&res)); - if (!core->sram[i].cpu_addr) { - dev_err(dev, "failed to parse and map sram%d memory at %pad\n", - i, &res.start); - return -ENOMEM; - } - - dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n", - i, &core->sram[i].bus_addr, - core->sram[i].size, core->sram[i].cpu_addr, - core->sram[i].dev_addr); - } - core->num_sram = num_sram; - - return 0; -} - -static void k3_r5_release_tsp(void *data) -{ - struct ti_sci_proc *tsp = data; - - ti_sci_proc_release(tsp); -} - static int k3_r5_core_of_init(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1539,58 +1256,12 @@ static int k3_r5_core_of_init(struct platform_device *pdev) goto err; } - core->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci"); - if (IS_ERR(core->ti_sci)) { - ret = dev_err_probe(dev, PTR_ERR(core->ti_sci), "failed to get ti-sci handle\n"); - core->ti_sci = NULL; - goto err; - } - - ret = of_property_read_u32(np, "ti,sci-dev-id", &core->ti_sci_id); - if (ret) { - dev_err(dev, "missing 'ti,sci-dev-id' property\n"); - goto err; - } - - core->reset = devm_reset_control_get_exclusive(dev, NULL); - if (IS_ERR_OR_NULL(core->reset)) { - ret = PTR_ERR_OR_ZERO(core->reset); - if (!ret) - ret = -ENODEV; - dev_err_probe(dev, ret, "failed to get reset handle\n"); - goto err; - } - - core->tsp = ti_sci_proc_of_get_tsp(dev, core->ti_sci); - if (IS_ERR(core->tsp)) { - ret = dev_err_probe(dev, PTR_ERR(core->tsp), - "failed to construct ti-sci proc control\n"); - goto err; - } - - ret = k3_r5_core_of_get_internal_memories(pdev, core); - if (ret) { - dev_err(dev, "failed to get internal memories, ret = %d\n", - ret); - goto err; - } - ret = k3_r5_core_of_get_sram_memories(pdev, core); if (ret) { dev_err(dev, "failed to get sram memories, ret = %d\n", ret); goto err; } - ret = ti_sci_proc_request(core->tsp); - if (ret < 0) { - dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret); - goto err; - } - - ret = devm_add_action_or_reset(dev, k3_r5_release_tsp, core->tsp); - if (ret) - goto err; - platform_set_drvdata(pdev, core); devres_close_group(dev, k3_r5_core_of_init); @@ -1652,6 +1323,7 @@ static int k3_r5_cluster_of_init(struct platform_device *pdev) } core = platform_get_drvdata(cpdev); + core->cluster = cluster; put_device(&cpdev->dev); list_add_tail(&core->elem, &cluster->cores); } @@ -1749,11 +1421,24 @@ static int k3_r5_probe(struct platform_device *pdev) return 0; } +static const struct k3_rproc_mem_data r5_mems[] = { + { .name = "atcm", .dev_addr = 0x0 }, + { .name = "btcm", .dev_addr = K3_R5_TCM_DEV_ADDR }, +}; + +static const struct k3_rproc_dev_data r5_data = { + .mems = r5_mems, + .num_mems = ARRAY_SIZE(r5_mems), + .boot_align_addr = 0, + .uses_lreset = true, +}; + static const struct k3_r5_soc_data am65_j721e_soc_data = { .tcm_is_double = false, .tcm_ecc_autoinit = false, .single_cpu_mode = false, .is_single_core = false, + .core_data = &r5_data, }; static const struct k3_r5_soc_data j7200_j721s2_soc_data = { @@ -1761,6 +1446,7 @@ static const struct k3_r5_soc_data j7200_j721s2_soc_data = { .tcm_ecc_autoinit = true, .single_cpu_mode = false, .is_single_core = false, + .core_data = &r5_data, }; static const struct k3_r5_soc_data am64_soc_data = { @@ -1768,6 +1454,7 @@ static const struct k3_r5_soc_data am64_soc_data = { .tcm_ecc_autoinit = true, .single_cpu_mode = true, .is_single_core = false, + .core_data = &r5_data, }; static const struct k3_r5_soc_data am62_soc_data = { @@ -1775,6 +1462,7 @@ static const struct k3_r5_soc_data am62_soc_data = { .tcm_ecc_autoinit = true, .single_cpu_mode = false, .is_single_core = true, + .core_data = &r5_data, }; static const struct of_device_id k3_r5_of_match[] = { diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c index 5aeedeaf3c41..1af89782e116 100644 --- a/drivers/remoteproc/xlnx_r5_remoteproc.c +++ b/drivers/remoteproc/xlnx_r5_remoteproc.c @@ -380,6 +380,18 @@ static int zynqmp_r5_rproc_start(struct rproc *rproc) dev_dbg(r5_core->dev, "RPU boot addr 0x%llx from %s.", rproc->bootaddr, bootmem == PM_RPU_BOOTMEM_HIVEC ? "OCM" : "TCM"); + /* Request node before starting RPU core if new version of API is supported */ + if (zynqmp_pm_feature(PM_REQUEST_NODE) > 1) { + ret = zynqmp_pm_request_node(r5_core->pm_domain_id, + ZYNQMP_PM_CAPABILITY_ACCESS, 0, + ZYNQMP_PM_REQUEST_ACK_BLOCKING); + if (ret < 0) { + dev_err(r5_core->dev, "failed to request 0x%x", + r5_core->pm_domain_id); + return ret; + } + } + ret = zynqmp_pm_request_wake(r5_core->pm_domain_id, 1, bootmem, ZYNQMP_PM_REQUEST_ACK_NO); if (ret) @@ -401,10 +413,30 @@ static int zynqmp_r5_rproc_stop(struct rproc *rproc) struct zynqmp_r5_core *r5_core = rproc->priv; int ret; + /* Use release node API to stop core if new version of API is supported */ + if (zynqmp_pm_feature(PM_RELEASE_NODE) > 1) { + ret = zynqmp_pm_release_node(r5_core->pm_domain_id); + if (ret) + dev_err(r5_core->dev, "failed to stop remoteproc RPU %d\n", ret); + return ret; + } + + /* + * Check expected version of EEMI call before calling it. This avoids + * any error or warning prints from firmware as it is expected that fw + * doesn't support it. + */ + if (zynqmp_pm_feature(PM_FORCE_POWERDOWN) != 1) { + dev_dbg(r5_core->dev, "EEMI interface %d ver 1 not supported\n", + PM_FORCE_POWERDOWN); + return -EOPNOTSUPP; + } + + /* maintain force pwr down for backward compatibility */ ret = zynqmp_pm_force_pwrdwn(r5_core->pm_domain_id, ZYNQMP_PM_REQUEST_ACK_BLOCKING); if (ret) - dev_err(r5_core->dev, "failed to stop remoteproc RPU %d\n", ret); + dev_err(r5_core->dev, "core force power down failed\n"); return ret; } diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c index 40d386809d6b..87c944d4b4f3 100644 --- a/drivers/rpmsg/qcom_smd.c +++ b/drivers/rpmsg/qcom_smd.c @@ -746,7 +746,7 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data, __le32 hdr[5] = { cpu_to_le32(len), }; int tlen = sizeof(hdr) + len; unsigned long flags; - int ret; + int ret = 0; /* Word aligned channels only accept word size aligned data */ if (channel->info_word && len % 4) @@ -1369,7 +1369,8 @@ static int qcom_smd_parse_edge(struct device *dev, edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0); if (IS_ERR(edge->mbox_chan)) { if (PTR_ERR(edge->mbox_chan) != -ENODEV) { - ret = PTR_ERR(edge->mbox_chan); + ret = dev_err_probe(dev, PTR_ERR(edge->mbox_chan), + "failed to acquire IPC mailbox\n"); goto put_node; } @@ -1386,6 +1387,7 @@ static int qcom_smd_parse_edge(struct device *dev, of_node_put(syscon_np); if (IS_ERR(edge->ipc_regmap)) { ret = PTR_ERR(edge->ipc_regmap); + dev_err(dev, "failed to get regmap from syscon: %d\n", ret); goto put_node; } @@ -1501,10 +1503,8 @@ struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, } ret = qcom_smd_parse_edge(&edge->dev, node, edge); - if (ret) { - dev_err(&edge->dev, "failed to parse smd edge\n"); + if (ret) goto unregister_dev; - } ret = qcom_smd_create_chrdev(edge); if (ret) { diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c index 207b64c0a2fe..6ee36adcbdba 100644 --- a/drivers/rpmsg/rpmsg_core.c +++ b/drivers/rpmsg/rpmsg_core.c @@ -194,38 +194,6 @@ int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst) EXPORT_SYMBOL(rpmsg_sendto); /** - * rpmsg_send_offchannel() - send a message using explicit src/dst addresses - * @ept: the rpmsg endpoint - * @src: source address - * @dst: destination address - * @data: payload of message - * @len: length of payload - * - * This function sends @data of length @len to the remote @dst address, - * and uses @src as the source address. - * The message will be sent to the remote processor which the @ept - * endpoint belongs to. - * In case there are no TX buffers available, the function will block until - * one becomes available, or a timeout of 15 seconds elapses. When the latter - * happens, -ERESTARTSYS is returned. - * - * Can only be called from process context (for now). - * - * Return: 0 on success and an appropriate error value on failure. - */ -int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, - void *data, int len) -{ - if (WARN_ON(!ept)) - return -EINVAL; - if (!ept->ops->send_offchannel) - return -ENXIO; - - return ept->ops->send_offchannel(ept, src, dst, data, len); -} -EXPORT_SYMBOL(rpmsg_send_offchannel); - -/** * rpmsg_trysend() - send a message across to the remote processor * @ept: the rpmsg endpoint * @data: payload of message @@ -302,37 +270,6 @@ __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, EXPORT_SYMBOL(rpmsg_poll); /** - * rpmsg_trysend_offchannel() - send a message using explicit src/dst addresses - * @ept: the rpmsg endpoint - * @src: source address - * @dst: destination address - * @data: payload of message - * @len: length of payload - * - * This function sends @data of length @len to the remote @dst address, - * and uses @src as the source address. - * The message will be sent to the remote processor which the @ept - * endpoint belongs to. - * In case there are no TX buffers available, the function will immediately - * return -ENOMEM without waiting until one becomes available. - * - * Can only be called from process context (for now). - * - * Return: 0 on success and an appropriate error value on failure. - */ -int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, - void *data, int len) -{ - if (WARN_ON(!ept)) - return -EINVAL; - if (!ept->ops->trysend_offchannel) - return -ENXIO; - - return ept->ops->trysend_offchannel(ept, src, dst, data, len); -} -EXPORT_SYMBOL(rpmsg_trysend_offchannel); - -/** * rpmsg_set_flow_control() - request remote to pause/resume transmission * @ept: the rpmsg endpoint * @pause: pause transmission diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h index 42c7007be1b5..397e4926bd02 100644 --- a/drivers/rpmsg/rpmsg_internal.h +++ b/drivers/rpmsg/rpmsg_internal.h @@ -50,10 +50,8 @@ struct rpmsg_device_ops { * @destroy_ept: see @rpmsg_destroy_ept(), required * @send: see @rpmsg_send(), required * @sendto: see @rpmsg_sendto(), optional - * @send_offchannel: see @rpmsg_send_offchannel(), optional * @trysend: see @rpmsg_trysend(), required * @trysendto: see @rpmsg_trysendto(), optional - * @trysend_offchannel: see @rpmsg_trysend_offchannel(), optional * @poll: see @rpmsg_poll(), optional * @set_flow_control: see @rpmsg_set_flow_control(), optional * @get_mtu: see @rpmsg_get_mtu(), optional @@ -67,13 +65,9 @@ struct rpmsg_endpoint_ops { int (*send)(struct rpmsg_endpoint *ept, void *data, int len); int (*sendto)(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); - int (*send_offchannel)(struct rpmsg_endpoint *ept, u32 src, u32 dst, - void *data, int len); int (*trysend)(struct rpmsg_endpoint *ept, void *data, int len); int (*trysendto)(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); - int (*trysend_offchannel)(struct rpmsg_endpoint *ept, u32 src, u32 dst, - void *data, int len); __poll_t (*poll)(struct rpmsg_endpoint *ept, struct file *filp, poll_table *wait); int (*set_flow_control)(struct rpmsg_endpoint *ept, bool pause, u32 dst); diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 89d7a3b8c48b..4730b1c8b322 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c @@ -141,13 +141,9 @@ static void virtio_rpmsg_destroy_ept(struct rpmsg_endpoint *ept); static int virtio_rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len); static int virtio_rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); -static int virtio_rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, - u32 dst, void *data, int len); static int virtio_rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len); static int virtio_rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); -static int virtio_rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, - u32 dst, void *data, int len); static ssize_t virtio_rpmsg_get_mtu(struct rpmsg_endpoint *ept); static struct rpmsg_device *__rpmsg_create_channel(struct virtproc_info *vrp, struct rpmsg_channel_info *chinfo); @@ -156,10 +152,8 @@ static const struct rpmsg_endpoint_ops virtio_endpoint_ops = { .destroy_ept = virtio_rpmsg_destroy_ept, .send = virtio_rpmsg_send, .sendto = virtio_rpmsg_sendto, - .send_offchannel = virtio_rpmsg_send_offchannel, .trysend = virtio_rpmsg_trysend, .trysendto = virtio_rpmsg_trysendto, - .trysend_offchannel = virtio_rpmsg_trysend_offchannel, .get_mtu = virtio_rpmsg_get_mtu, }; @@ -545,7 +539,7 @@ static void rpmsg_downref_sleepers(struct virtproc_info *vrp) * the function will immediately fail, and -ENOMEM will be returned. * * Normally drivers shouldn't use this function directly; instead, drivers - * should use the appropriate rpmsg_{try}send{to, _offchannel} API + * should use the appropriate rpmsg_{try}send{to} API * (see include/linux/rpmsg.h). * * Return: 0 on success and an appropriate error value on failure. @@ -665,14 +659,6 @@ static int virtio_rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true); } -static int virtio_rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, - u32 dst, void *data, int len) -{ - struct rpmsg_device *rpdev = ept->rpdev; - - return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true); -} - static int virtio_rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len) { struct rpmsg_device *rpdev = ept->rpdev; @@ -690,14 +676,6 @@ static int virtio_rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false); } -static int virtio_rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, - u32 dst, void *data, int len) -{ - struct rpmsg_device *rpdev = ept->rpdev; - - return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false); -} - static ssize_t virtio_rpmsg_get_mtu(struct rpmsg_endpoint *ept) { struct rpmsg_device *rpdev = ept->rpdev; diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index dac85294d2f5..e284eea331d7 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -255,7 +255,7 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, /* * The recording commands needs to be called with option QID - * for guests that have previlege classes A or B. + * for guests that have privilege classes A or B. * Purging has to be done as separate step, because recording * can't be switched on as long as records are on the queue. * Doing both at the same time doesn't work. @@ -557,7 +557,7 @@ static ssize_t vmlogrdr_purge_store(struct device * dev, /* * The recording command needs to be called with option QID - * for guests that have previlege classes A or B. + * for guests that have privilege classes A or B. * Other guests will not recognize the command and we have to * issue the same command without the QID parameter. */ diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c index fd129650434f..3f747fd61d19 100644 --- a/drivers/spi/spi-qpic-snand.c +++ b/drivers/spi/spi-qpic-snand.c @@ -1616,6 +1616,7 @@ static void qcom_spi_remove(struct platform_device *pdev) static const struct qcom_nandc_props ipq9574_snandc_props = { .dev_cmd_reg_start = 0x7000, + .bam_offset = 0x30000, .supports_bam = true, }; diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c index 69c1df0f4ca5..aac67a4413ce 100644 --- a/drivers/uio/uio_hv_generic.c +++ b/drivers/uio/uio_hv_generic.c @@ -243,6 +243,9 @@ hv_uio_probe(struct hv_device *dev, if (!ring_size) ring_size = SZ_2M; + /* Adjust ring size if necessary to have it page aligned */ + ring_size = VMBUS_RING_SIZE(ring_size); + pdata = devm_kzalloc(&dev->device, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; @@ -274,13 +277,13 @@ hv_uio_probe(struct hv_device *dev, pdata->info.mem[INT_PAGE_MAP].name = "int_page"; pdata->info.mem[INT_PAGE_MAP].addr = (uintptr_t)vmbus_connection.int_page; - pdata->info.mem[INT_PAGE_MAP].size = PAGE_SIZE; + pdata->info.mem[INT_PAGE_MAP].size = HV_HYP_PAGE_SIZE; pdata->info.mem[INT_PAGE_MAP].memtype = UIO_MEM_LOGICAL; pdata->info.mem[MON_PAGE_MAP].name = "monitor_page"; pdata->info.mem[MON_PAGE_MAP].addr = (uintptr_t)vmbus_connection.monitor_pages[1]; - pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE; + pdata->info.mem[MON_PAGE_MAP].size = HV_HYP_PAGE_SIZE; pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL; if (channel->device_id == HV_NIC) { diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 37bd18730fe0..f9cdbf8c53e3 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -1168,7 +1168,7 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, c->vc_screenbuf_size - delta); c->vc_origin = vga_vram_end - c->vc_screenbuf_size; vga_rolled_over = 0; - } else + } else if (oldo - delta >= (unsigned long)c->vc_screenbuf) c->vc_origin -= delta; c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; scr_memsetw((u16 *) (c->vc_origin), c->vc_video_erase_char, diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c index 082501feceb9..ec084323115f 100644 --- a/drivers/video/fbdev/arkfb.c +++ b/drivers/video/fbdev/arkfb.c @@ -431,9 +431,10 @@ static struct dac_ops ics5342_ops = { static struct dac_info * ics5342_init(dac_read_regs_t drr, dac_write_regs_t dwr, void *data) { - struct dac_info *info = kzalloc(sizeof(struct ics5342_info), GFP_KERNEL); + struct ics5342_info *ics_info = kzalloc(sizeof(struct ics5342_info), GFP_KERNEL); + struct dac_info *info = &ics_info->dac; - if (! info) + if (!ics_info) return NULL; info->dacops = &ics5342_ops; diff --git a/drivers/video/fbdev/carminefb.c b/drivers/video/fbdev/carminefb.c index e56065cdba97..2bdd67595891 100644 --- a/drivers/video/fbdev/carminefb.c +++ b/drivers/video/fbdev/carminefb.c @@ -649,13 +649,13 @@ static int carminefb_probe(struct pci_dev *dev, const struct pci_device_id *ent) * is required for that largest resolution to avoid remaps at run * time */ - if (carminefb_fix.smem_len > CARMINE_TOTAL_DIPLAY_MEM) - carminefb_fix.smem_len = CARMINE_TOTAL_DIPLAY_MEM; + if (carminefb_fix.smem_len > CARMINE_TOTAL_DISPLAY_MEM) + carminefb_fix.smem_len = CARMINE_TOTAL_DISPLAY_MEM; - else if (carminefb_fix.smem_len < CARMINE_TOTAL_DIPLAY_MEM) { + else if (carminefb_fix.smem_len < CARMINE_TOTAL_DISPLAY_MEM) { printk(KERN_ERR "carminefb: Memory bar is only %d bytes, %d " "are required.", carminefb_fix.smem_len, - CARMINE_TOTAL_DIPLAY_MEM); + CARMINE_TOTAL_DISPLAY_MEM); goto err_unmap_vregs; } diff --git a/drivers/video/fbdev/carminefb.h b/drivers/video/fbdev/carminefb.h index 297688eba469..c9825481d96b 100644 --- a/drivers/video/fbdev/carminefb.h +++ b/drivers/video/fbdev/carminefb.h @@ -7,7 +7,7 @@ #define MAX_DISPLAY 2 #define CARMINE_DISPLAY_MEM (800 * 600 * 4) -#define CARMINE_TOTAL_DIPLAY_MEM (CARMINE_DISPLAY_MEM * MAX_DISPLAY) +#define CARMINE_TOTAL_DISPLAY_MEM (CARMINE_DISPLAY_MEM * MAX_DISPLAY) #define CARMINE_USE_DISPLAY0 (1 << 0) #define CARMINE_USE_DISPLAY1 (1 << 1) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index ac3c99ed92d1..2df48037688d 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -117,9 +117,14 @@ static signed char con2fb_map_boot[MAX_NR_CONSOLES]; static struct fb_info *fbcon_info_from_console(int console) { + signed char fb; WARN_CONSOLE_UNLOCKED(); - return fbcon_registered_fb[con2fb_map[console]]; + fb = con2fb_map[console]; + if (fb < 0 || fb >= ARRAY_SIZE(fbcon_registered_fb)) + return NULL; + + return fbcon_registered_fb[fb]; } static int logo_lines; diff --git a/drivers/video/fbdev/core/fbcvt.c b/drivers/video/fbdev/core/fbcvt.c index 64843464c661..cd3821bd82e5 100644 --- a/drivers/video/fbdev/core/fbcvt.c +++ b/drivers/video/fbdev/core/fbcvt.c @@ -312,7 +312,7 @@ int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb) cvt.f_refresh = cvt.refresh; cvt.interlace = 1; - if (!cvt.xres || !cvt.yres || !cvt.refresh) { + if (!cvt.xres || !cvt.yres || !cvt.refresh || cvt.f_refresh > INT_MAX) { printk(KERN_INFO "fbcvt: Invalid input parameters\n"); return 1; } diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 3c568cff2913..eca2498f2436 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -328,8 +328,10 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) !list_empty(&info->modelist)) ret = fb_add_videomode(&mode, &info->modelist); - if (ret) + if (ret) { + info->var = old_var; return ret; + } event.info = info; event.data = &mode; @@ -388,7 +390,7 @@ static int fb_check_foreignness(struct fb_info *fi) static int do_register_framebuffer(struct fb_info *fb_info) { - int i; + int i, err = 0; struct fb_videomode mode; if (fb_check_foreignness(fb_info)) @@ -397,10 +399,18 @@ static int do_register_framebuffer(struct fb_info *fb_info) if (num_registered_fb == FB_MAX) return -ENXIO; - num_registered_fb++; for (i = 0 ; i < FB_MAX; i++) if (!registered_fb[i]) break; + + if (!fb_info->modelist.prev || !fb_info->modelist.next) + INIT_LIST_HEAD(&fb_info->modelist); + + fb_var_to_videomode(&mode, &fb_info->var); + err = fb_add_videomode(&mode, &fb_info->modelist); + if (err < 0) + return err; + fb_info->node = i; refcount_set(&fb_info->count, 1); mutex_init(&fb_info->lock); @@ -426,16 +436,12 @@ static int do_register_framebuffer(struct fb_info *fb_info) if (bitmap_empty(fb_info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT)) bitmap_fill(fb_info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT); - if (!fb_info->modelist.prev || !fb_info->modelist.next) - INIT_LIST_HEAD(&fb_info->modelist); - if (fb_info->skip_vt_switch) pm_vt_switch_required(fb_info->device, false); else pm_vt_switch_required(fb_info->device, true); - fb_var_to_videomode(&mode, &fb_info->var); - fb_add_videomode(&mode, &fb_info->modelist); + num_registered_fb++; registered_fb[i] = fb_info; #ifdef CONFIG_GUMSTIX_AM200EPD diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c index 8900f181f195..cfaf9454014d 100644 --- a/drivers/video/fbdev/nvidia/nvidia.c +++ b/drivers/video/fbdev/nvidia/nvidia.c @@ -1484,7 +1484,7 @@ static int nvidiafb_setup(char *options) flatpanel = 1; } else if (!strncmp(this_opt, "hwcur", 5)) { hwcur = 1; - } else if (!strncmp(this_opt, "noaccel", 6)) { + } else if (!strncmp(this_opt, "noaccel", 7)) { noaccel = 1; } else if (!strncmp(this_opt, "noscale", 7)) { noscale = 1; diff --git a/drivers/video/fbdev/via/via-gpio.c b/drivers/video/fbdev/via/via-gpio.c index 9577c2cd52c7..27226a8f3f42 100644 --- a/drivers/video/fbdev/via/via-gpio.c +++ b/drivers/video/fbdev/via/via-gpio.c @@ -81,8 +81,7 @@ struct viafb_gpio_cfg { /* * GPIO access functions */ -static void via_gpio_set(struct gpio_chip *chip, unsigned int nr, - int value) +static int via_gpio_set(struct gpio_chip *chip, unsigned int nr, int value) { struct viafb_gpio_cfg *cfg = gpiochip_get_data(chip); u8 reg; @@ -99,13 +98,14 @@ static void via_gpio_set(struct gpio_chip *chip, unsigned int nr, reg &= ~(0x10 << gpio->vg_mask_shift); via_write_reg(VIASR, gpio->vg_port_index, reg); spin_unlock_irqrestore(&cfg->vdev->reg_lock, flags); + + return 0; } static int via_gpio_dir_out(struct gpio_chip *chip, unsigned int nr, int value) { - via_gpio_set(chip, nr, value); - return 0; + return via_gpio_set(chip, nr, value); } /* @@ -146,7 +146,7 @@ static struct viafb_gpio_cfg viafb_gpio_config = { .label = "VIAFB onboard GPIO", .owner = THIS_MODULE, .direction_output = via_gpio_dir_out, - .set = via_gpio_set, + .set_rv = via_gpio_set, .direction_input = via_gpio_dir_input, .get = via_gpio_get, .base = -1, diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 0d8d37f712e8..0c25b2ed44eb 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -804,6 +804,15 @@ config IMX7ULP_WDT To compile this driver as a module, choose M here: the module will be called imx7ulp_wdt. +config S32G_WDT + tristate "S32G Watchdog" + depends on ARCH_S32 || COMPILE_TEST + select WATCHDOG_CORE + help + This is the driver for the hardware watchdog on the NXP + S32G platforms. If you wish to have watchdog support + enabled, say Y, otherwise say N. + config DB500_WATCHDOG tristate "ST-Ericsson DB800 watchdog" depends on MFD_DB8500_PRCMU @@ -1001,7 +1010,7 @@ config STM32_WATCHDOG tristate "STM32 Independent WatchDoG (IWDG) support" depends on ARCH_STM32 || COMPILE_TEST select WATCHDOG_CORE - default y + default ARCH_STM32 help Say Y here to include support for the watchdog timer in stm32 SoCs. @@ -1363,6 +1372,17 @@ config INTEL_MID_WATCHDOG To compile this driver as a module, choose M here. +config INTEL_OC_WATCHDOG + tristate "Intel OC Watchdog" + depends on (X86 || COMPILE_TEST) && ACPI && HAS_IOPORT + select WATCHDOG_CORE + help + Hardware driver for Intel Over-Clocking watchdog present in + Platform Controller Hub (PCH) chipsets. + + To compile this driver as a module, choose M here: the + module will be called intel_oc_wdt. + config ITCO_WDT tristate "Intel TCO Timer/Watchdog" depends on X86 && PCI @@ -1869,7 +1889,7 @@ config OCTEON_WDT config MARVELL_GTI_WDT tristate "Marvell GTI Watchdog driver" depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT) - default y + default ARCH_THUNDER select WATCHDOG_CORE help Marvell GTI hardware supports watchdog timer. First timeout @@ -2035,7 +2055,7 @@ config 8xxx_WDT config PIKA_WDT tristate "PIKA FPGA Watchdog" depends on WARP || (PPC64 && COMPILE_TEST) - default y + default WARP help This enables the watchdog in the PIKA FPGA. Currently used on the Warp platform. diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index c9482904bf87..bbd4d62d2cc3 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -69,6 +69,7 @@ obj-$(CONFIG_TS72XX_WATCHDOG) += ts72xx_wdt.o obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o obj-$(CONFIG_IMX_SC_WDT) += imx_sc_wdt.o obj-$(CONFIG_IMX7ULP_WDT) += imx7ulp_wdt.o +obj-$(CONFIG_S32G_WDT) += s32g_wdt.o obj-$(CONFIG_DB500_WATCHDOG) += db8500_wdt.o obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o @@ -150,6 +151,7 @@ obj-$(CONFIG_W83977F_WDT) += w83977f_wdt.o obj-$(CONFIG_MACHZ_WDT) += machzwd.o obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o obj-$(CONFIG_INTEL_MID_WATCHDOG) += intel-mid_wdt.o +obj-$(CONFIG_INTEL_OC_WATCHDOG) += intel_oc_wdt.o obj-$(CONFIG_INTEL_MEI_WDT) += mei_wdt.o obj-$(CONFIG_NI903X_WDT) += ni903x_wdt.o obj-$(CONFIG_NIC7018_WDT) += nic7018_wdt.o diff --git a/drivers/watchdog/apple_wdt.c b/drivers/watchdog/apple_wdt.c index 95d9e37df41c..66a158f67a71 100644 --- a/drivers/watchdog/apple_wdt.c +++ b/drivers/watchdog/apple_wdt.c @@ -95,9 +95,12 @@ static int apple_wdt_ping(struct watchdog_device *wdd) static int apple_wdt_set_timeout(struct watchdog_device *wdd, unsigned int s) { struct apple_wdt *wdt = to_apple_wdt(wdd); + u32 actual; writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CUR_TIME); - writel_relaxed(wdt->clk_rate * s, wdt->regs + APPLE_WDT_WD1_BITE_TIME); + + actual = min(s, wdd->max_hw_heartbeat_ms / 1000); + writel_relaxed(wdt->clk_rate * actual, wdt->regs + APPLE_WDT_WD1_BITE_TIME); wdd->timeout = s; @@ -177,7 +180,7 @@ static int apple_wdt_probe(struct platform_device *pdev) wdt->wdd.ops = &apple_wdt_ops; wdt->wdd.info = &apple_wdt_info; - wdt->wdd.max_timeout = U32_MAX / wdt->clk_rate; + wdt->wdd.max_hw_heartbeat_ms = U32_MAX / wdt->clk_rate * 1000; wdt->wdd.timeout = APPLE_WDT_TIMEOUT_DEFAULT; wdt_ctrl = readl_relaxed(wdt->regs + APPLE_WDT_WD1_CTRL); diff --git a/drivers/watchdog/arm_smc_wdt.c b/drivers/watchdog/arm_smc_wdt.c index 8f3d0c3a005f..bbba23ace7b8 100644 --- a/drivers/watchdog/arm_smc_wdt.c +++ b/drivers/watchdog/arm_smc_wdt.c @@ -46,6 +46,8 @@ static int smcwd_call(struct watchdog_device *wdd, enum smcwd_call call, return -ENODEV; if (res->a0 == PSCI_RET_INVALID_PARAMS) return -EINVAL; + if (res->a0 == PSCI_RET_DISABLED) + return -ENODATA; if (res->a0 != PSCI_RET_SUCCESS) return -EIO; return 0; @@ -131,10 +133,19 @@ static int smcwd_probe(struct platform_device *pdev) wdd->info = &smcwd_info; /* get_timeleft is optional */ - if (smcwd_call(wdd, SMCWD_GET_TIMELEFT, 0, NULL)) - wdd->ops = &smcwd_ops; - else + err = smcwd_call(wdd, SMCWD_GET_TIMELEFT, 0, NULL); + switch (err) { + case 0: + set_bit(WDOG_HW_RUNNING, &wdd->status); + fallthrough; + case -ENODATA: wdd->ops = &smcwd_timeleft_ops; + break; + default: + wdd->ops = &smcwd_ops; + break; + } + wdd->timeout = res.a2; wdd->max_timeout = res.a2; wdd->min_timeout = res.a1; diff --git a/drivers/watchdog/cros_ec_wdt.c b/drivers/watchdog/cros_ec_wdt.c index 716c23f4388c..9ffe7f505645 100644 --- a/drivers/watchdog/cros_ec_wdt.c +++ b/drivers/watchdog/cros_ec_wdt.c @@ -25,26 +25,22 @@ static int cros_ec_wdt_send_cmd(struct cros_ec_device *cros_ec, union cros_ec_wdt_data *arg) { int ret; - struct { - struct cros_ec_command msg; - union cros_ec_wdt_data data; - } __packed buf = { - .msg = { - .version = 0, - .command = EC_CMD_HANG_DETECT, - .insize = (arg->req.command == EC_HANG_DETECT_CMD_GET_STATUS) ? - sizeof(struct ec_response_hang_detect) : - 0, - .outsize = sizeof(struct ec_params_hang_detect), - }, - .data.req = arg->req - }; - - ret = cros_ec_cmd_xfer_status(cros_ec, &buf.msg); + DEFINE_RAW_FLEX(struct cros_ec_command, msg, data, + sizeof(union cros_ec_wdt_data)); + + msg->version = 0; + msg->command = EC_CMD_HANG_DETECT; + msg->insize = (arg->req.command == EC_HANG_DETECT_CMD_GET_STATUS) ? + sizeof(struct ec_response_hang_detect) : + 0; + msg->outsize = sizeof(struct ec_params_hang_detect); + *(struct ec_params_hang_detect *)msg->data = arg->req; + + ret = cros_ec_cmd_xfer_status(cros_ec, msg); if (ret < 0) return ret; - arg->resp = buf.data.resp; + arg->resp = *(struct ec_response_hang_detect *)msg->data; return 0; } diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c index 77039f2f0be5..afb7887c3a1e 100644 --- a/drivers/watchdog/da9052_wdt.c +++ b/drivers/watchdog/da9052_wdt.c @@ -30,6 +30,18 @@ struct da9052_wdt_data { unsigned long jpast; }; +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, + "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +static int timeout; +module_param(timeout, int, 0); +MODULE_PARM_DESC(timeout, + "Watchdog timeout in seconds. (default = " + __MODULE_STRING(WDT_DEFAULT_TIMEOUT) ")"); + static const struct { u8 reg_val; int time; /* Seconds */ @@ -168,10 +180,13 @@ static int da9052_wdt_probe(struct platform_device *pdev) da9052_wdt = &driver_data->wdt; da9052_wdt->timeout = DA9052_DEF_TIMEOUT; + da9052_wdt->min_hw_heartbeat_ms = DA9052_TWDMIN; da9052_wdt->info = &da9052_wdt_info; da9052_wdt->ops = &da9052_wdt_ops; da9052_wdt->parent = dev; watchdog_set_drvdata(da9052_wdt, driver_data); + watchdog_init_timeout(da9052_wdt, timeout, dev); + watchdog_set_nowayout(da9052_wdt, nowayout); if (da9052->fault_log & DA9052_FAULTLOG_TWDERROR) da9052_wdt->bootstatus |= WDIOF_CARDRESET; @@ -180,11 +195,15 @@ static int da9052_wdt_probe(struct platform_device *pdev) if (da9052->fault_log & DA9052_FAULTLOG_VDDFAULT) da9052_wdt->bootstatus |= WDIOF_POWERUNDER; - ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG, - DA9052_CONTROLD_TWDSCALE, 0); - if (ret < 0) { - dev_err(dev, "Failed to disable watchdog bits, %d\n", ret); + ret = da9052_reg_read(da9052, DA9052_CONTROL_D_REG); + if (ret < 0) return ret; + + /* Check if FW enabled the watchdog */ + if (ret & DA9052_CONTROLD_TWDSCALE) { + /* Ensure proper initialization */ + da9052_wdt_start(da9052_wdt); + set_bit(WDOG_HW_RUNNING, &da9052_wdt->status); } return devm_watchdog_register_device(dev, &driver_data->wdt); diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 7672582fa407..9ab769aa0244 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -58,7 +58,6 @@ #include <linux/platform_device.h> /* For platform_driver framework */ #include <linux/pci.h> /* For pci functions */ #include <linux/ioport.h> /* For io-port access */ -#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */ #include <linux/uaccess.h> /* For copy_to_user/put_user/... */ #include <linux/io.h> /* For inb/outb/... */ #include <linux/platform_data/itco_wdt.h> @@ -102,8 +101,6 @@ struct iTCO_wdt_private { * or memory-mapped PMC register bit 4 (TCO version 3). */ unsigned long __iomem *gcs_pmc; - /* the lock for io operations */ - spinlock_t io_lock; /* the PCI-device */ struct pci_dev *pci_dev; /* whether or not the watchdog has been suspended */ @@ -286,13 +283,10 @@ static int iTCO_wdt_start(struct watchdog_device *wd_dev) struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev); unsigned int val; - spin_lock(&p->io_lock); - iTCO_vendor_pre_start(p->smi_res, wd_dev->timeout); /* disable chipset's NO_REBOOT bit */ if (p->update_no_reboot_bit(p->no_reboot_priv, false)) { - spin_unlock(&p->io_lock); dev_err(wd_dev->parent, "failed to reset NO_REBOOT flag, reboot disabled by hardware/BIOS\n"); return -EIO; } @@ -309,7 +303,6 @@ static int iTCO_wdt_start(struct watchdog_device *wd_dev) val &= 0xf7ff; outw(val, TCO1_CNT(p)); val = inw(TCO1_CNT(p)); - spin_unlock(&p->io_lock); if (val & 0x0800) return -1; @@ -321,8 +314,6 @@ static int iTCO_wdt_stop(struct watchdog_device *wd_dev) struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev); unsigned int val; - spin_lock(&p->io_lock); - iTCO_vendor_pre_stop(p->smi_res); /* Bit 11: TCO Timer Halt -> 1 = The TCO timer is disabled */ @@ -334,8 +325,6 @@ static int iTCO_wdt_stop(struct watchdog_device *wd_dev) /* Set the NO_REBOOT bit to prevent later reboots, just for sure */ p->update_no_reboot_bit(p->no_reboot_priv, true); - spin_unlock(&p->io_lock); - if ((val & 0x0800) == 0) return -1; return 0; @@ -345,8 +334,6 @@ static int iTCO_wdt_ping(struct watchdog_device *wd_dev) { struct iTCO_wdt_private *p = watchdog_get_drvdata(wd_dev); - spin_lock(&p->io_lock); - /* Reload the timer by writing to the TCO Timer Counter register */ if (p->iTCO_version >= 2) { outw(0x01, TCO_RLD(p)); @@ -358,7 +345,6 @@ static int iTCO_wdt_ping(struct watchdog_device *wd_dev) outb(0x01, TCO_RLD(p)); } - spin_unlock(&p->io_lock); return 0; } @@ -385,24 +371,20 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t) /* Write new heartbeat to watchdog */ if (p->iTCO_version >= 2) { - spin_lock(&p->io_lock); val16 = inw(TCOv2_TMR(p)); val16 &= 0xfc00; val16 |= tmrval; outw(val16, TCOv2_TMR(p)); val16 = inw(TCOv2_TMR(p)); - spin_unlock(&p->io_lock); if ((val16 & 0x3ff) != tmrval) return -EINVAL; } else if (p->iTCO_version == 1) { - spin_lock(&p->io_lock); val8 = inb(TCOv1_TMR(p)); val8 &= 0xc0; val8 |= (tmrval & 0xff); outb(val8, TCOv1_TMR(p)); val8 = inb(TCOv1_TMR(p)); - spin_unlock(&p->io_lock); if ((val8 & 0x3f) != tmrval) return -EINVAL; @@ -421,19 +403,15 @@ static unsigned int iTCO_wdt_get_timeleft(struct watchdog_device *wd_dev) /* read the TCO Timer */ if (p->iTCO_version >= 2) { - spin_lock(&p->io_lock); val16 = inw(TCO_RLD(p)); val16 &= 0x3ff; - spin_unlock(&p->io_lock); time_left = ticks_to_seconds(p, val16); } else if (p->iTCO_version == 1) { - spin_lock(&p->io_lock); val8 = inb(TCO_RLD(p)); val8 &= 0x3f; if (!(inw(TCO1_STS(p)) & 0x0008)) val8 += (inb(TCOv1_TMR(p)) & 0x3f); - spin_unlock(&p->io_lock); time_left = ticks_to_seconds(p, val8); } @@ -493,8 +471,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev) if (!p) return -ENOMEM; - spin_lock_init(&p->io_lock); - p->tco_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_TCO); if (!p->tco_res) return -ENODEV; @@ -604,6 +580,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev) iTCO_wdt_set_timeout(&p->wddev, WATCHDOG_TIMEOUT); dev_info(dev, "timeout value out of range, using %d\n", WATCHDOG_TIMEOUT); + heartbeat = WATCHDOG_TIMEOUT; } watchdog_stop_on_reboot(&p->wddev); diff --git a/drivers/watchdog/intel_oc_wdt.c b/drivers/watchdog/intel_oc_wdt.c new file mode 100644 index 000000000000..7c0551106981 --- /dev/null +++ b/drivers/watchdog/intel_oc_wdt.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel OC Watchdog driver + * + * Copyright (C) 2025, Siemens + * Author: Diogo Ivo <diogo.ivo@siemens.com> + */ + +#define DRV_NAME "intel_oc_wdt" + +#include <linux/acpi.h> +#include <linux/bits.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/platform_device.h> +#include <linux/watchdog.h> + +#define INTEL_OC_WDT_TOV GENMASK(9, 0) +#define INTEL_OC_WDT_MIN_TOV 1 +#define INTEL_OC_WDT_MAX_TOV 1024 +#define INTEL_OC_WDT_DEF_TOV 60 + +/* + * One-time writable lock bit. If set forbids + * modification of itself, _TOV and _EN until + * next reboot. + */ +#define INTEL_OC_WDT_CTL_LCK BIT(12) + +#define INTEL_OC_WDT_EN BIT(14) +#define INTEL_OC_WDT_NO_ICCSURV_STS BIT(24) +#define INTEL_OC_WDT_ICCSURV_STS BIT(25) +#define INTEL_OC_WDT_RLD BIT(31) + +#define INTEL_OC_WDT_STS_BITS (INTEL_OC_WDT_NO_ICCSURV_STS | \ + INTEL_OC_WDT_ICCSURV_STS) + +#define INTEL_OC_WDT_CTRL_REG(wdt) ((wdt)->ctrl_res->start) + +struct intel_oc_wdt { + struct watchdog_device wdd; + struct resource *ctrl_res; + bool locked; +}; + +static int heartbeat; +module_param(heartbeat, uint, 0); +MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. (default=" + __MODULE_STRING(WDT_HEARTBEAT) ")"); + +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +static int intel_oc_wdt_start(struct watchdog_device *wdd) +{ + struct intel_oc_wdt *oc_wdt = watchdog_get_drvdata(wdd); + + if (oc_wdt->locked) + return 0; + + outl(inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)) | INTEL_OC_WDT_EN, + INTEL_OC_WDT_CTRL_REG(oc_wdt)); + + return 0; +} + +static int intel_oc_wdt_stop(struct watchdog_device *wdd) +{ + struct intel_oc_wdt *oc_wdt = watchdog_get_drvdata(wdd); + + outl(inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)) & ~INTEL_OC_WDT_EN, + INTEL_OC_WDT_CTRL_REG(oc_wdt)); + + return 0; +} + +static int intel_oc_wdt_ping(struct watchdog_device *wdd) +{ + struct intel_oc_wdt *oc_wdt = watchdog_get_drvdata(wdd); + + outl(inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)) | INTEL_OC_WDT_RLD, + INTEL_OC_WDT_CTRL_REG(oc_wdt)); + + return 0; +} + +static int intel_oc_wdt_set_timeout(struct watchdog_device *wdd, + unsigned int t) +{ + struct intel_oc_wdt *oc_wdt = watchdog_get_drvdata(wdd); + + outl((inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)) & ~INTEL_OC_WDT_TOV) | (t - 1), + INTEL_OC_WDT_CTRL_REG(oc_wdt)); + + wdd->timeout = t; + + return 0; +} + +static const struct watchdog_info intel_oc_wdt_info = { + .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, + .identity = DRV_NAME, +}; + +static const struct watchdog_ops intel_oc_wdt_ops = { + .owner = THIS_MODULE, + .start = intel_oc_wdt_start, + .stop = intel_oc_wdt_stop, + .ping = intel_oc_wdt_ping, + .set_timeout = intel_oc_wdt_set_timeout, +}; + +static int intel_oc_wdt_setup(struct intel_oc_wdt *oc_wdt) +{ + struct watchdog_info *info; + unsigned long val; + + val = inl(INTEL_OC_WDT_CTRL_REG(oc_wdt)); + + if (val & INTEL_OC_WDT_STS_BITS) + oc_wdt->wdd.bootstatus |= WDIOF_CARDRESET; + + oc_wdt->locked = !!(val & INTEL_OC_WDT_CTL_LCK); + + if (val & INTEL_OC_WDT_EN) { + /* + * No need to issue a ping here to "commit" the new timeout + * value to hardware as the watchdog core schedules one + * immediately when registering the watchdog. + */ + set_bit(WDOG_HW_RUNNING, &oc_wdt->wdd.status); + + if (oc_wdt->locked) { + info = (struct watchdog_info *)&intel_oc_wdt_info; + /* + * Set nowayout unconditionally as we cannot stop + * the watchdog. + */ + nowayout = true; + /* + * If we are locked read the current timeout value + * and inform the core we can't change it. + */ + oc_wdt->wdd.timeout = (val & INTEL_OC_WDT_TOV) + 1; + info->options &= ~WDIOF_SETTIMEOUT; + + dev_info(oc_wdt->wdd.parent, + "Register access locked, heartbeat fixed at: %u s\n", + oc_wdt->wdd.timeout); + } + } else if (oc_wdt->locked) { + /* + * In case the watchdog is disabled and locked there + * is nothing we can do with it so just fail probing. + */ + return -EACCES; + } + + val &= ~INTEL_OC_WDT_TOV; + outl(val | (oc_wdt->wdd.timeout - 1), INTEL_OC_WDT_CTRL_REG(oc_wdt)); + + return 0; +} + +static int intel_oc_wdt_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct intel_oc_wdt *oc_wdt; + struct watchdog_device *wdd; + int ret; + + oc_wdt = devm_kzalloc(&pdev->dev, sizeof(*oc_wdt), GFP_KERNEL); + if (!oc_wdt) + return -ENOMEM; + + oc_wdt->ctrl_res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!oc_wdt->ctrl_res) { + dev_err(&pdev->dev, "missing I/O resource\n"); + return -ENODEV; + } + + if (!devm_request_region(&pdev->dev, oc_wdt->ctrl_res->start, + resource_size(oc_wdt->ctrl_res), pdev->name)) { + dev_err(dev, "resource %pR already in use, device disabled\n", + oc_wdt->ctrl_res); + return -EBUSY; + } + + wdd = &oc_wdt->wdd; + wdd->min_timeout = INTEL_OC_WDT_MIN_TOV; + wdd->max_timeout = INTEL_OC_WDT_MAX_TOV; + wdd->timeout = INTEL_OC_WDT_DEF_TOV; + wdd->info = &intel_oc_wdt_info; + wdd->ops = &intel_oc_wdt_ops; + wdd->parent = dev; + + watchdog_init_timeout(wdd, heartbeat, dev); + + ret = intel_oc_wdt_setup(oc_wdt); + if (ret) + return ret; + + watchdog_set_drvdata(wdd, oc_wdt); + watchdog_set_nowayout(wdd, nowayout); + watchdog_stop_on_reboot(wdd); + watchdog_stop_on_unregister(wdd); + + return devm_watchdog_register_device(dev, wdd); +} + +static const struct acpi_device_id intel_oc_wdt_match[] = { + { "INT3F0D" }, + { "INTC1099" }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, intel_oc_wdt_match); + +static struct platform_driver intel_oc_wdt_platform_driver = { + .driver = { + .name = DRV_NAME, + .acpi_match_table = intel_oc_wdt_match, + }, + .probe = intel_oc_wdt_probe, +}; + +module_platform_driver(intel_oc_wdt_platform_driver); + +MODULE_AUTHOR("Diogo Ivo <diogo.ivo@siemens.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Intel OC Watchdog driver"); diff --git a/drivers/watchdog/lenovo_se30_wdt.c b/drivers/watchdog/lenovo_se30_wdt.c index 024b842499b3..1c73bb7eeeee 100644 --- a/drivers/watchdog/lenovo_se30_wdt.c +++ b/drivers/watchdog/lenovo_se30_wdt.c @@ -271,6 +271,8 @@ static int lenovo_se30_wdt_probe(struct platform_device *pdev) return -EBUSY; priv->shm_base_addr = devm_ioremap(dev, base_phys, SHM_WIN_SIZE); + if (!priv->shm_base_addr) + return -ENOMEM; priv->wdt_cfg.mod = WDT_MODULE; priv->wdt_cfg.idx = WDT_CFG_INDEX; diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c index 132699e2f247..b636650b714b 100644 --- a/drivers/watchdog/pcwd_usb.c +++ b/drivers/watchdog/pcwd_usb.c @@ -579,7 +579,7 @@ static struct notifier_block usb_pcwd_notifier = { .notifier_call = usb_pcwd_notify_sys, }; -/** +/* * usb_pcwd_delete */ static inline void usb_pcwd_delete(struct usb_pcwd_private *usb_pcwd) @@ -590,7 +590,7 @@ static inline void usb_pcwd_delete(struct usb_pcwd_private *usb_pcwd) kfree(usb_pcwd); } -/** +/* * usb_pcwd_probe * * Called by the usb core when a new device is connected that it thinks @@ -758,7 +758,7 @@ error: } -/** +/* * usb_pcwd_disconnect * * Called by the usb core when the device is removed from the system. diff --git a/drivers/watchdog/pretimeout_noop.c b/drivers/watchdog/pretimeout_noop.c index 4799551dd784..74ec02b9ffca 100644 --- a/drivers/watchdog/pretimeout_noop.c +++ b/drivers/watchdog/pretimeout_noop.c @@ -11,7 +11,7 @@ /** * pretimeout_noop - No operation on watchdog pretimeout event - * @wdd - watchdog_device + * @wdd: watchdog_device * * This function prints a message about pretimeout to kernel log. */ diff --git a/drivers/watchdog/pretimeout_panic.c b/drivers/watchdog/pretimeout_panic.c index 2cc3c41d2be5..8c3ac674dc45 100644 --- a/drivers/watchdog/pretimeout_panic.c +++ b/drivers/watchdog/pretimeout_panic.c @@ -11,7 +11,7 @@ /** * pretimeout_panic - Panic on watchdog pretimeout event - * @wdd - watchdog_device + * @wdd: watchdog_device * * Panic, watchdog has not been fed till pretimeout event. */ diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c index 006f9c61aa64..dfaac5995c84 100644 --- a/drivers/watchdog/qcom-wdt.c +++ b/drivers/watchdog/qcom-wdt.c @@ -181,6 +181,12 @@ static const struct qcom_wdt_match_data match_data_apcs_tmr = { .max_tick_count = 0x10000000U, }; +static const struct qcom_wdt_match_data match_data_ipq5424 = { + .offset = reg_offset_data_kpss, + .pretimeout = true, + .max_tick_count = 0xFFFFFU, +}; + static const struct qcom_wdt_match_data match_data_kpss = { .offset = reg_offset_data_kpss, .pretimeout = true, @@ -322,6 +328,7 @@ static const struct dev_pm_ops qcom_wdt_pm_ops = { }; static const struct of_device_id qcom_wdt_of_table[] = { + { .compatible = "qcom,apss-wdt-ipq5424", .data = &match_data_ipq5424 }, { .compatible = "qcom,kpss-timer", .data = &match_data_apcs_tmr }, { .compatible = "qcom,scss-timer", .data = &match_data_apcs_tmr }, { .compatible = "qcom,kpss-wdt", .data = &match_data_kpss }, diff --git a/drivers/watchdog/s32g_wdt.c b/drivers/watchdog/s32g_wdt.c new file mode 100644 index 000000000000..ad55063060af --- /dev/null +++ b/drivers/watchdog/s32g_wdt.c @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Watchdog driver for S32G SoC + * + * Copyright 2017-2019, 2021-2025 NXP. + * + */ +#include <linux/clk.h> +#include <linux/debugfs.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/watchdog.h> + +#define DRIVER_NAME "s32g-swt" + +#define S32G_SWT_CR(__base) ((__base) + 0x00) /* Control Register offset */ +#define S32G_SWT_CR_SM (BIT(9) | BIT(10)) /* -> Service Mode */ +#define S32G_SWT_CR_STP BIT(2) /* -> Stop Mode Control */ +#define S32G_SWT_CR_FRZ BIT(1) /* -> Debug Mode Control */ +#define S32G_SWT_CR_WEN BIT(0) /* -> Watchdog Enable */ + +#define S32G_SWT_TO(__base) ((__base) + 0x08) /* Timeout Register offset */ + +#define S32G_SWT_SR(__base) ((__base) + 0x10) /* Service Register offset */ +#define S32G_WDT_SEQ1 0xA602 /* -> service sequence number 1 */ +#define S32G_WDT_SEQ2 0xB480 /* -> service sequence number 2 */ + +#define S32G_SWT_CO(__base) ((__base) + 0x14) /* Counter output register */ + +#define S32G_WDT_DEFAULT_TIMEOUT 30 + +struct s32g_wdt_device { + int rate; + void __iomem *base; + struct watchdog_device wdog; +}; + +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +static unsigned int timeout_param = S32G_WDT_DEFAULT_TIMEOUT; +module_param(timeout_param, uint, 0); +MODULE_PARM_DESC(timeout_param, "Watchdog timeout in seconds (default=" + __MODULE_STRING(S32G_WDT_DEFAULT_TIMEOUT) ")"); + +static bool early_enable; +module_param(early_enable, bool, 0); +MODULE_PARM_DESC(early_enable, + "Watchdog is started on module insertion (default=false)"); + +static const struct watchdog_info s32g_wdt_info = { + .identity = "s32g watchdog", + .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | + WDIOC_GETTIMEOUT | WDIOC_GETTIMELEFT, +}; + +static struct s32g_wdt_device *wdd_to_s32g_wdt(struct watchdog_device *wdd) +{ + return container_of(wdd, struct s32g_wdt_device, wdog); +} + +static unsigned int wdog_sec_to_count(struct s32g_wdt_device *wdev, unsigned int timeout) +{ + return wdev->rate * timeout; +} + +static int s32g_wdt_ping(struct watchdog_device *wdog) +{ + struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog); + + writel(S32G_WDT_SEQ1, S32G_SWT_SR(wdev->base)); + writel(S32G_WDT_SEQ2, S32G_SWT_SR(wdev->base)); + + return 0; +} + +static int s32g_wdt_start(struct watchdog_device *wdog) +{ + struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog); + unsigned long val; + + val = readl(S32G_SWT_CR(wdev->base)); + + val |= S32G_SWT_CR_WEN; + + writel(val, S32G_SWT_CR(wdev->base)); + + return 0; +} + +static int s32g_wdt_stop(struct watchdog_device *wdog) +{ + struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog); + unsigned long val; + + val = readl(S32G_SWT_CR(wdev->base)); + + val &= ~S32G_SWT_CR_WEN; + + writel(val, S32G_SWT_CR(wdev->base)); + + return 0; +} + +static int s32g_wdt_set_timeout(struct watchdog_device *wdog, unsigned int timeout) +{ + struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog); + + writel(wdog_sec_to_count(wdev, timeout), S32G_SWT_TO(wdev->base)); + + wdog->timeout = timeout; + + /* + * Conforming to the documentation, the timeout counter is + * loaded when servicing is operated (aka ping) or when the + * counter is enabled. In case the watchdog is already started + * it must be stopped and started again to update the timeout + * register or a ping can be sent to refresh the counter. Here + * we choose to send a ping to the watchdog which is harmless + * if the watchdog is stopped. + */ + return s32g_wdt_ping(wdog); +} + +static unsigned int s32g_wdt_get_timeleft(struct watchdog_device *wdog) +{ + struct s32g_wdt_device *wdev = wdd_to_s32g_wdt(wdog); + unsigned long counter; + bool is_running; + + /* + * The counter output can be read only if the SWT is + * disabled. Given the latency between the internal counter + * and the counter output update, there can be very small + * difference. However, we can accept this matter of fact + * given the resolution is a second based unit for the output. + */ + is_running = watchdog_hw_running(wdog); + + if (is_running) + s32g_wdt_stop(wdog); + + counter = readl(S32G_SWT_CO(wdev->base)); + + if (is_running) + s32g_wdt_start(wdog); + + return counter / wdev->rate; +} + +static const struct watchdog_ops s32g_wdt_ops = { + .owner = THIS_MODULE, + .start = s32g_wdt_start, + .stop = s32g_wdt_stop, + .ping = s32g_wdt_ping, + .set_timeout = s32g_wdt_set_timeout, + .get_timeleft = s32g_wdt_get_timeleft, +}; + +static void s32g_wdt_init(struct s32g_wdt_device *wdev) +{ + unsigned long val; + + /* Set the watchdog's Time-Out value */ + val = wdog_sec_to_count(wdev, wdev->wdog.timeout); + + writel(val, S32G_SWT_TO(wdev->base)); + + /* + * Get the control register content. We are at init time, the + * watchdog should not be started. + */ + val = readl(S32G_SWT_CR(wdev->base)); + + /* + * We want to allow the watchdog timer to be stopped when + * device enters debug mode. + */ + val |= S32G_SWT_CR_FRZ; + + /* + * However, when the CPU is in WFI or suspend mode, the + * watchdog must continue. The documentation refers it as the + * stopped mode. + */ + val &= ~S32G_SWT_CR_STP; + + /* + * Use Fixed Service Sequence to ping the watchdog which is + * 0x00 configuration value for the service mode. It should be + * already set because it is the default value but we reset it + * in case. + */ + val &= ~S32G_SWT_CR_SM; + + writel(val, S32G_SWT_CR(wdev->base)); + + /* + * When the 'early_enable' option is set, we start the + * watchdog from the kernel. + */ + if (early_enable) { + s32g_wdt_start(&wdev->wdog); + set_bit(WDOG_HW_RUNNING, &wdev->wdog.status); + } +} + +static int s32g_wdt_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct clk *clk; + struct s32g_wdt_device *wdev; + struct watchdog_device *wdog; + int ret; + + wdev = devm_kzalloc(dev, sizeof(*wdev), GFP_KERNEL); + if (!wdev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + wdev->base = devm_ioremap_resource(dev, res); + if (IS_ERR(wdev->base)) + return dev_err_probe(&pdev->dev, PTR_ERR(wdev->base), "Can not get resource\n"); + + clk = devm_clk_get_enabled(dev, "counter"); + if (IS_ERR(clk)) + return dev_err_probe(dev, PTR_ERR(clk), "Can't get Watchdog clock\n"); + + wdev->rate = clk_get_rate(clk); + if (!wdev->rate) { + dev_err(dev, "Input clock rate is not valid\n"); + return -EINVAL; + } + + wdog = &wdev->wdog; + wdog->info = &s32g_wdt_info; + wdog->ops = &s32g_wdt_ops; + + /* + * The code converts the timeout into a counter a value, if + * the value is less than 0x100, then it is clamped by the SWT + * module, so it is safe to specify a zero value as the + * minimum timeout. + */ + wdog->min_timeout = 0; + + /* + * The counter register is a 32 bits long, so the maximum + * counter value is UINT_MAX and the timeout in second is the + * value divided by the rate. + * + * For instance, a rate of 51MHz lead to 84 seconds maximum + * timeout. + */ + wdog->max_timeout = UINT_MAX / wdev->rate; + + /* + * The module param and the DT 'timeout-sec' property will + * override the default value if they are specified. + */ + ret = watchdog_init_timeout(wdog, timeout_param, dev); + if (ret) + return ret; + + /* + * As soon as the watchdog is started, there is no way to stop + * it if the 'nowayout' option is set at boot time + */ + watchdog_set_nowayout(wdog, nowayout); + + /* + * The devm_ version of the watchdog_register_device() + * function will call watchdog_unregister_device() when the + * device is removed. + */ + watchdog_stop_on_unregister(wdog); + + s32g_wdt_init(wdev); + + ret = devm_watchdog_register_device(dev, wdog); + if (ret) + return dev_err_probe(dev, ret, "Cannot register watchdog device\n"); + + dev_info(dev, "S32G Watchdog Timer Registered, timeout=%ds, nowayout=%d, early_enable=%d\n", + wdog->timeout, nowayout, early_enable); + + return 0; +} + +static const struct of_device_id s32g_wdt_dt_ids[] = { + { .compatible = "nxp,s32g2-swt" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, s32g_wdt_dt_ids); + +static struct platform_driver s32g_wdt_driver = { + .probe = s32g_wdt_probe, + .driver = { + .name = DRIVER_NAME, + .of_match_table = s32g_wdt_dt_ids, + }, +}; + +module_platform_driver(s32g_wdt_driver); + +MODULE_AUTHOR("Daniel Lezcano <daniel.lezcano@linaro.org>"); +MODULE_DESCRIPTION("Watchdog driver for S32G SoC"); +MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index bdd81d8074b2..40901bdac426 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c @@ -82,6 +82,10 @@ #define GS_CLUSTER2_NONCPU_INT_EN 0x1644 #define GS_RST_STAT_REG_OFFSET 0x3B44 +#define EXYNOS990_CLUSTER2_NONCPU_OUT 0x1620 +#define EXYNOS990_CLUSTER2_NONCPU_INT_EN 0x1644 +#define EXYNOS990_CLUSTER2_WDTRESET_BIT 23 + /** * DOC: Quirk flags for different Samsung watchdog IP-cores * @@ -259,6 +263,32 @@ static const struct s3c2410_wdt_variant drv_data_exynos850_cl1 = { QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN, }; +static const struct s3c2410_wdt_variant drv_data_exynos990_cl0 = { + .mask_reset_reg = GS_CLUSTER0_NONCPU_INT_EN, + .mask_bit = 2, + .mask_reset_inv = true, + .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET, + .rst_stat_bit = EXYNOS850_CLUSTER0_WDTRESET_BIT, + .cnt_en_reg = EXYNOSAUTOV920_CLUSTER0_NONCPU_OUT, + .cnt_en_bit = 7, + .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | + QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN | + QUIRK_HAS_DBGACK_BIT, +}; + +static const struct s3c2410_wdt_variant drv_data_exynos990_cl2 = { + .mask_reset_reg = EXYNOS990_CLUSTER2_NONCPU_INT_EN, + .mask_bit = 2, + .mask_reset_inv = true, + .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET, + .rst_stat_bit = EXYNOS990_CLUSTER2_WDTRESET_BIT, + .cnt_en_reg = EXYNOS990_CLUSTER2_NONCPU_OUT, + .cnt_en_bit = 7, + .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | + QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN | + QUIRK_HAS_DBGACK_BIT, +}; + static const struct s3c2410_wdt_variant drv_data_exynosautov9_cl0 = { .mask_reset_reg = EXYNOS850_CLUSTER0_NONCPU_INT_EN, .mask_bit = 2, @@ -350,6 +380,8 @@ static const struct of_device_id s3c2410_wdt_match[] = { .data = &drv_data_exynos7 }, { .compatible = "samsung,exynos850-wdt", .data = &drv_data_exynos850_cl0 }, + { .compatible = "samsung,exynos990-wdt", + .data = &drv_data_exynos990_cl0 }, { .compatible = "samsung,exynosautov9-wdt", .data = &drv_data_exynosautov9_cl0 }, { .compatible = "samsung,exynosautov920-wdt", @@ -678,7 +710,8 @@ s3c2410_get_wdt_drv_data(struct platform_device *pdev, struct s3c2410_wdt *wdt) if (variant == &drv_data_exynos850_cl0 || variant == &drv_data_exynosautov9_cl0 || variant == &drv_data_gs101_cl0 || - variant == &drv_data_exynosautov920_cl0) { + variant == &drv_data_exynosautov920_cl0 || + variant == &drv_data_exynos990_cl0) { u32 index; int err; @@ -700,6 +733,10 @@ s3c2410_get_wdt_drv_data(struct platform_device *pdev, struct s3c2410_wdt *wdt) else if (variant == &drv_data_exynosautov920_cl0) variant = &drv_data_exynosautov920_cl1; break; + case 2: + if (variant == &drv_data_exynos990_cl0) + variant = &drv_data_exynos990_cl2; + break; default: return dev_err_probe(dev, -EINVAL, "wrong cluster index: %u\n", index); } diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c index 8ad06b54c5ad..b356a272ff9a 100644 --- a/drivers/watchdog/stm32_iwdg.c +++ b/drivers/watchdog/stm32_iwdg.c @@ -291,7 +291,7 @@ static int stm32_iwdg_irq_init(struct platform_device *pdev, return 0; if (of_property_read_bool(np, "wakeup-source")) { - ret = device_init_wakeup(dev, true); + ret = devm_device_init_wakeup(dev); if (ret) return ret; diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c index dc5f29560e9b..3918a600f2a0 100644 --- a/drivers/watchdog/wdt_pci.c +++ b/drivers/watchdog/wdt_pci.c @@ -264,7 +264,7 @@ static int wdtpci_get_status(int *status) return 0; } -/** +/* * wdtpci_get_temperature: * * Reports the temperature in degrees Fahrenheit. The API is in |