diff options
Diffstat (limited to 'drivers/firmware')
27 files changed, 1737 insertions, 448 deletions
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c index f51047d8ea64..525ac0f0a75d 100644 --- a/drivers/firmware/cirrus/cs_dsp.c +++ b/drivers/firmware/cirrus/cs_dsp.c @@ -9,9 +9,11 @@ * Cirrus Logic International Semiconductor Ltd. */ +#include <linux/cleanup.h> #include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/delay.h> +#include <linux/math.h> #include <linux/minmax.h> #include <linux/module.h> #include <linux/moduleparam.h> @@ -316,44 +318,6 @@ struct cs_dsp_alg_region_list_item { struct cs_dsp_alg_region alg_region; }; -struct cs_dsp_buf { - struct list_head list; - void *buf; -}; - -static struct cs_dsp_buf *cs_dsp_buf_alloc(const void *src, size_t len, - struct list_head *list) -{ - struct cs_dsp_buf *buf = kzalloc(sizeof(*buf), GFP_KERNEL); - - if (buf == NULL) - return NULL; - - buf->buf = vmalloc(len); - if (!buf->buf) { - kfree(buf); - return NULL; - } - memcpy(buf->buf, src, len); - - if (list) - list_add_tail(&buf->list, list); - - return buf; -} - -static void cs_dsp_buf_free(struct list_head *list) -{ - while (!list_empty(list)) { - struct cs_dsp_buf *buf = list_first_entry(list, - struct cs_dsp_buf, - list); - list_del(&buf->list); - vfree(buf->buf); - kfree(buf); - } -} - /** * cs_dsp_mem_region_name() - Return a name string for a memory type * @type: the memory type to match @@ -388,18 +352,14 @@ EXPORT_SYMBOL_NS_GPL(cs_dsp_mem_region_name, "FW_CS_DSP"); #ifdef CONFIG_DEBUG_FS static void cs_dsp_debugfs_save_wmfwname(struct cs_dsp *dsp, const char *s) { - char *tmp = kasprintf(GFP_KERNEL, "%s\n", s); - kfree(dsp->wmfw_file_name); - dsp->wmfw_file_name = tmp; + dsp->wmfw_file_name = kstrdup(s, GFP_KERNEL); } static void cs_dsp_debugfs_save_binname(struct cs_dsp *dsp, const char *s) { - char *tmp = kasprintf(GFP_KERNEL, "%s\n", s); - kfree(dsp->bin_file_name); - dsp->bin_file_name = tmp; + dsp->bin_file_name = kstrdup(s, GFP_KERNEL); } static void cs_dsp_debugfs_clear(struct cs_dsp *dsp) @@ -410,24 +370,33 @@ static void cs_dsp_debugfs_clear(struct cs_dsp *dsp) dsp->bin_file_name = NULL; } +static ssize_t cs_dsp_debugfs_string_read(struct cs_dsp *dsp, + char __user *user_buf, + size_t count, loff_t *ppos, + const char **pstr) +{ + const char *str __free(kfree) = NULL; + + scoped_guard(mutex, &dsp->pwr_lock) { + if (!*pstr) + return 0; + + str = kasprintf(GFP_KERNEL, "%s\n", *pstr); + if (!str) + return -ENOMEM; + + return simple_read_from_buffer(user_buf, count, ppos, str, strlen(str)); + } +} + static ssize_t cs_dsp_debugfs_wmfw_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct cs_dsp *dsp = file->private_data; - ssize_t ret; - - mutex_lock(&dsp->pwr_lock); - if (!dsp->wmfw_file_name || !dsp->booted) - ret = 0; - else - ret = simple_read_from_buffer(user_buf, count, ppos, - dsp->wmfw_file_name, - strlen(dsp->wmfw_file_name)); - - mutex_unlock(&dsp->pwr_lock); - return ret; + return cs_dsp_debugfs_string_read(dsp, user_buf, count, ppos, + &dsp->wmfw_file_name); } static ssize_t cs_dsp_debugfs_bin_read(struct file *file, @@ -435,19 +404,9 @@ static ssize_t cs_dsp_debugfs_bin_read(struct file *file, size_t count, loff_t *ppos) { struct cs_dsp *dsp = file->private_data; - ssize_t ret; - - mutex_lock(&dsp->pwr_lock); - if (!dsp->bin_file_name || !dsp->booted) - ret = 0; - else - ret = simple_read_from_buffer(user_buf, count, ppos, - dsp->bin_file_name, - strlen(dsp->bin_file_name)); - - mutex_unlock(&dsp->pwr_lock); - return ret; + return cs_dsp_debugfs_string_read(dsp, user_buf, count, ppos, + &dsp->bin_file_name); } static const struct { @@ -479,9 +438,11 @@ static int cs_dsp_debugfs_read_controls_show(struct seq_file *s, void *ignored) struct cs_dsp_coeff_ctl *ctl; unsigned int reg; + guard(mutex)(&dsp->pwr_lock); + list_for_each_entry(ctl, &dsp->ctl_list, list) { cs_dsp_coeff_base_reg(ctl, ®, 0); - seq_printf(s, "%22.*s: %#8zx %s:%08x %#8x %s %#8x %#4x %c%c%c%c %s %s\n", + seq_printf(s, "%22.*s: %#8x %s:%08x %#8x %s %#8x %#4x %c%c%c%c %s %s\n", ctl->subname_len, ctl->subname, ctl->len, cs_dsp_mem_region_name(ctl->alg_region.type), ctl->offset, reg, ctl->fw_name, ctl->alg_region.alg, ctl->type, @@ -1028,7 +989,7 @@ static void cs_dsp_signal_event_controls(struct cs_dsp *dsp, static void cs_dsp_free_ctl_blk(struct cs_dsp_coeff_ctl *ctl) { - kfree(ctl->cache); + kvfree(ctl->cache); kfree(ctl->subname); kfree(ctl); } @@ -1078,7 +1039,7 @@ static int cs_dsp_create_control(struct cs_dsp *dsp, ctl->type = type; ctl->offset = offset; ctl->len = len; - ctl->cache = kzalloc(ctl->len, GFP_KERNEL); + ctl->cache = kvzalloc(ctl->len, GFP_KERNEL); if (!ctl->cache) { ret = -ENOMEM; goto err_ctl_subname; @@ -1096,7 +1057,7 @@ static int cs_dsp_create_control(struct cs_dsp *dsp, err_list_del: list_del(&ctl->list); - kfree(ctl->cache); + kvfree(ctl->cache); err_ctl_subname: kfree(ctl->subname); err_ctl: @@ -1485,7 +1446,9 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, const struct wmfw_region *region; const struct cs_dsp_region *mem; const char *region_name; - struct cs_dsp_buf *buf; + u8 *buf __free(kfree) = NULL; + size_t buf_len = 0; + size_t region_len; unsigned int reg; int regions = 0; int ret, offset, type; @@ -1605,23 +1568,23 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, region_name); if (reg) { - buf = cs_dsp_buf_alloc(region->data, - le32_to_cpu(region->len), - &buf_list); - if (!buf) { - cs_dsp_err(dsp, "Out of memory\n"); - ret = -ENOMEM; - goto out_fw; + region_len = le32_to_cpu(region->len); + if (region_len > buf_len) { + buf_len = round_up(region_len, PAGE_SIZE); + kfree(buf); + buf = kmalloc(buf_len, GFP_KERNEL | GFP_DMA); + if (!buf) { + ret = -ENOMEM; + goto out_fw; + } } - ret = regmap_raw_write(regmap, reg, buf->buf, - le32_to_cpu(region->len)); + memcpy(buf, region->data, region_len); + ret = regmap_raw_write(regmap, reg, buf, region_len); if (ret != 0) { cs_dsp_err(dsp, - "%s.%d: Failed to write %d bytes at %d in %s: %d\n", - file, regions, - le32_to_cpu(region->len), offset, - region_name, ret); + "%s.%d: Failed to write %zu bytes at %d in %s: %d\n", + file, regions, region_len, offset, region_name, ret); goto out_fw; } } @@ -1638,8 +1601,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, ret = 0; out_fw: - cs_dsp_buf_free(&buf_list); - if (ret == -EOVERFLOW) cs_dsp_err(dsp, "%s: file content overflows file data\n", file); @@ -2171,7 +2132,9 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware struct cs_dsp_alg_region *alg_region; const char *region_name; int ret, pos, blocks, type, offset, reg, version; - struct cs_dsp_buf *buf; + u8 *buf __free(kfree) = NULL; + size_t buf_len = 0; + size_t region_len; if (!firmware) return 0; @@ -2313,20 +2276,22 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware } if (reg) { - buf = cs_dsp_buf_alloc(blk->data, - le32_to_cpu(blk->len), - &buf_list); - if (!buf) { - cs_dsp_err(dsp, "Out of memory\n"); - ret = -ENOMEM; - goto out_fw; + region_len = le32_to_cpu(blk->len); + if (region_len > buf_len) { + buf_len = round_up(region_len, PAGE_SIZE); + kfree(buf); + buf = kmalloc(buf_len, GFP_KERNEL | GFP_DMA); + if (!buf) { + ret = -ENOMEM; + goto out_fw; + } } - cs_dsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n", - file, blocks, le32_to_cpu(blk->len), - reg); - ret = regmap_raw_write(regmap, reg, buf->buf, - le32_to_cpu(blk->len)); + memcpy(buf, blk->data, region_len); + + cs_dsp_dbg(dsp, "%s.%d: Writing %zu bytes at %x\n", + file, blocks, region_len, reg); + ret = regmap_raw_write(regmap, reg, buf, region_len); if (ret != 0) { cs_dsp_err(dsp, "%s.%d: Failed to write to %x in %s: %d\n", @@ -2346,8 +2311,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware ret = 0; out_fw: - cs_dsp_buf_free(&buf_list); - if (ret == -EOVERFLOW) cs_dsp_err(dsp, "%s: file content overflows file data\n", file); @@ -2366,6 +2329,9 @@ static int cs_dsp_create_name(struct cs_dsp *dsp) return 0; } +static const struct cs_dsp_client_ops cs_dsp_default_client_ops = { +}; + static int cs_dsp_common_init(struct cs_dsp *dsp) { int ret; @@ -2379,6 +2345,9 @@ static int cs_dsp_common_init(struct cs_dsp *dsp) mutex_init(&dsp->pwr_lock); + if (!dsp->client_ops) + dsp->client_ops = &cs_dsp_default_client_ops; + #ifdef CONFIG_DEBUG_FS /* Ensure this is invalid if client never provides a debugfs root */ dsp->debugfs_root = ERR_PTR(-ENODEV); diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c b/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c index 8a9b66a3b7d3..e5a389808e5f 100644 --- a/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c +++ b/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c @@ -600,6 +600,7 @@ KUNIT_ARRAY_PARAM(cs_dsp_callbacks_ops, static const struct cs_dsp_callbacks_test_param cs_dsp_no_callbacks_cases[] = { { .ops = &cs_dsp_callback_test_empty_client_ops, .case_name = "empty ops" }, + { .ops = NULL, .case_name = "NULL ops" }, }; KUNIT_ARRAY_PARAM(cs_dsp_no_callbacks, diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 83092d93f36a..53a5336cde5a 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c @@ -12,18 +12,18 @@ #include <linux/io.h> #include <linux/memblock.h> #include <linux/mm_types.h> +#include <linux/pgalloc.h> +#include <linux/pgtable.h> #include <linux/preempt.h> #include <linux/rbtree.h> #include <linux/rwsem.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> -#include <linux/pgtable.h> #include <asm/cacheflush.h> #include <asm/efi.h> #include <asm/mmu.h> -#include <asm/pgalloc.h> #if defined(CONFIG_PTDUMP_DEBUGFS) || defined(CONFIG_ARM_PTDUMP_DEBUGFS) #include <asm/ptdump.h> diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c index f0a63d09d3c4..76542a53e202 100644 --- a/drivers/firmware/efi/cper-arm.c +++ b/drivers/firmware/efi/cper-arm.c @@ -93,15 +93,11 @@ static void cper_print_arm_err_info(const char *pfx, u32 type, bool proc_context_corrupt, corrected, precise_pc, restartable_pc; bool time_out, access_mode; - /* If the type is unknown, bail. */ - if (type > CPER_ARM_MAX_TYPE) - return; - /* * Vendor type errors have error information values that are vendor * specific. */ - if (type == CPER_ARM_VENDOR_ERROR) + if (type & CPER_ARM_VENDOR_ERROR) return; if (error_info & CPER_ARM_ERR_VALID_TRANSACTION_TYPE) { @@ -116,43 +112,38 @@ static void cper_print_arm_err_info(const char *pfx, u32 type, if (error_info & CPER_ARM_ERR_VALID_OPERATION_TYPE) { op_type = ((error_info >> CPER_ARM_ERR_OPERATION_SHIFT) & CPER_ARM_ERR_OPERATION_MASK); - switch (type) { - case CPER_ARM_CACHE_ERROR: + if (type & CPER_ARM_CACHE_ERROR) { if (op_type < ARRAY_SIZE(arm_cache_err_op_strs)) { - printk("%soperation type: %s\n", pfx, + printk("%scache error, operation type: %s\n", pfx, arm_cache_err_op_strs[op_type]); } - break; - case CPER_ARM_TLB_ERROR: + } + if (type & CPER_ARM_TLB_ERROR) { if (op_type < ARRAY_SIZE(arm_tlb_err_op_strs)) { - printk("%soperation type: %s\n", pfx, + printk("%sTLB error, operation type: %s\n", pfx, arm_tlb_err_op_strs[op_type]); } - break; - case CPER_ARM_BUS_ERROR: + } + if (type & CPER_ARM_BUS_ERROR) { if (op_type < ARRAY_SIZE(arm_bus_err_op_strs)) { - printk("%soperation type: %s\n", pfx, + printk("%sbus error, operation type: %s\n", pfx, arm_bus_err_op_strs[op_type]); } - break; } } if (error_info & CPER_ARM_ERR_VALID_LEVEL) { level = ((error_info >> CPER_ARM_ERR_LEVEL_SHIFT) & CPER_ARM_ERR_LEVEL_MASK); - switch (type) { - case CPER_ARM_CACHE_ERROR: + if (type & CPER_ARM_CACHE_ERROR) printk("%scache level: %d\n", pfx, level); - break; - case CPER_ARM_TLB_ERROR: + + if (type & CPER_ARM_TLB_ERROR) printk("%sTLB level: %d\n", pfx, level); - break; - case CPER_ARM_BUS_ERROR: + + if (type & CPER_ARM_BUS_ERROR) printk("%saffinity level at which the bus error occurred: %d\n", pfx, level); - break; - } } if (error_info & CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT) { @@ -240,7 +231,8 @@ void cper_print_proc_arm(const char *pfx, int i, len, max_ctx_type; struct cper_arm_err_info *err_info; struct cper_arm_ctx_info *ctx_info; - char newpfx[64], infopfx[64]; + char newpfx[64], infopfx[ARRAY_SIZE(newpfx) + 1]; + char error_type[120]; printk("%sMIDR: 0x%016llx\n", pfx, proc->midr); @@ -289,9 +281,15 @@ void cper_print_proc_arm(const char *pfx, newpfx); } - printk("%serror_type: %d, %s\n", newpfx, err_info->type, - err_info->type < ARRAY_SIZE(cper_proc_error_type_strs) ? - cper_proc_error_type_strs[err_info->type] : "unknown"); + cper_bits_to_str(error_type, sizeof(error_type), + FIELD_GET(CPER_ARM_ERR_TYPE_MASK, err_info->type), + cper_proc_error_type_strs, + ARRAY_SIZE(cper_proc_error_type_strs)); + + printk("%serror_type: 0x%02x: %s%s\n", newpfx, err_info->type, + error_type, + (err_info->type & ~CPER_ARM_ERR_TYPE_MASK) ? " with reserved bit(s)" : ""); + if (err_info->validation_bits & CPER_ARM_INFO_VALID_ERR_INFO) { printk("%serror_info: 0x%016llx\n", newpfx, err_info->error_info); diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 928409199a1a..0232bd040f61 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -12,6 +12,7 @@ * Specification version 2.4. */ +#include <linux/bitmap.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/time.h> @@ -69,7 +70,7 @@ const char *cper_severity_str(unsigned int severity) } EXPORT_SYMBOL_GPL(cper_severity_str); -/* +/** * cper_print_bits - print strings for set bits * @pfx: prefix for each line, including log level and prefix string * @bits: bit mask @@ -106,6 +107,65 @@ void cper_print_bits(const char *pfx, unsigned int bits, printk("%s\n", buf); } +/** + * cper_bits_to_str - return a string for set bits + * @buf: buffer to store the output string + * @buf_size: size of the output string buffer + * @bits: bit mask + * @strs: string array, indexed by bit position + * @strs_size: size of the string array: @strs + * + * Add to @buf the bitmask in hexadecimal. Then, for each set bit in @bits, + * add the corresponding string describing the bit in @strs to @buf. + * + * A typical example is:: + * + * const char * const bits[] = { + * "bit 3 name", + * "bit 4 name", + * "bit 5 name", + * }; + * char str[120]; + * unsigned int bitmask = BIT(3) | BIT(5); + * #define MASK GENMASK(5,3) + * + * cper_bits_to_str(str, sizeof(str), FIELD_GET(MASK, bitmask), + * bits, ARRAY_SIZE(bits)); + * + * The above code fills the string ``str`` with ``bit 3 name|bit 5 name``. + * + * Return: number of bytes stored or an error code if lower than zero. + */ +int cper_bits_to_str(char *buf, int buf_size, unsigned long bits, + const char * const strs[], unsigned int strs_size) +{ + int len = buf_size; + char *str = buf; + int i, size; + + *buf = '\0'; + + for_each_set_bit(i, &bits, strs_size) { + if (!(bits & BIT_ULL(i))) + continue; + + if (*buf && len > 0) { + *str = '|'; + len--; + str++; + } + + size = strscpy(str, strs[i], len); + if (size < 0) + return size; + + len -= size; + str += size; + } + return len - buf_size; +} +EXPORT_SYMBOL_GPL(cper_bits_to_str); + static const char * const proc_type_strs[] = { "IA32/X64", "IA64", diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 1ce428e2ac8a..a9070d00b833 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -74,6 +74,9 @@ struct mm_struct efi_mm = { .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0}, +#ifdef CONFIG_SCHED_MM_CID + .mm_cid.lock = __RAW_SPIN_LOCK_UNLOCKED(efi_mm.mm_cid.lock), +#endif }; struct workqueue_struct *efi_rts_wq; diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 94b05e4451dd..7d15a85d579f 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -11,12 +11,12 @@ cflags-y := $(KBUILD_CFLAGS) cflags-$(CONFIG_X86_32) := -march=i386 cflags-$(CONFIG_X86_64) := -mcmodel=small -cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -std=gnu11 \ +cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -std=gnu11 -fms-extensions \ -fPIC -fno-strict-aliasing -mno-red-zone \ -mno-mmx -mno-sse -fshort-wchar \ -Wno-pointer-sign \ $(call cc-disable-warning, address-of-packed-member) \ - $(call cc-disable-warning, gnu) \ + $(if $(CONFIG_CC_IS_CLANG),-Wno-gnu -Wno-microsoft-anon-tag) \ -fno-asynchronous-unwind-tables \ $(CLANG_FLAGS) diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c index 874f63b4a383..9cb814c5ba1b 100644 --- a/drivers/firmware/efi/libstub/efi-stub.c +++ b/drivers/firmware/efi/libstub/efi-stub.c @@ -56,7 +56,7 @@ static struct screen_info *setup_graphics(void) { struct screen_info *si, tmp = {}; - if (efi_setup_gop(&tmp) != EFI_SUCCESS) + if (efi_setup_graphics(&tmp, NULL) != EFI_SUCCESS) return NULL; si = alloc_screen_info(); diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index f5ba032863a9..b2fb0c3fa721 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -34,6 +34,9 @@ #define EFI_ALLOC_LIMIT ULONG_MAX #endif +struct edid_info; +struct screen_info; + extern bool efi_no5lvl; extern bool efi_nochunk; extern bool efi_nokaslr; @@ -578,6 +581,32 @@ union efi_graphics_output_protocol { } mixed_mode; }; +typedef union efi_edid_discovered_protocol efi_edid_discovered_protocol_t; + +union efi_edid_discovered_protocol { + struct { + u32 size_of_edid; + u8 *edid; + }; + struct { + u32 size_of_edid; + u32 edid; + } mixed_mode; +}; + +typedef union efi_edid_active_protocol efi_edid_active_protocol_t; + +union efi_edid_active_protocol { + struct { + u32 size_of_edid; + u8 *edid; + }; + struct { + u32 size_of_edid; + u32 edid; + } mixed_mode; +}; + typedef union { struct { u32 revision; @@ -1085,7 +1114,7 @@ efi_status_t efi_parse_options(char const *cmdline); void efi_parse_option_graphics(char *option); -efi_status_t efi_setup_gop(struct screen_info *si); +efi_status_t efi_setup_graphics(struct screen_info *si, struct edid_info *edid); efi_status_t handle_cmdline_files(efi_loaded_image_t *image, const efi_char16_t *optstr, diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c index 3785fb4986b4..72d74436a7a4 100644 --- a/drivers/firmware/efi/libstub/gop.c +++ b/drivers/firmware/efi/libstub/gop.c @@ -12,6 +12,7 @@ #include <linux/string.h> #include <asm/efi.h> #include <asm/setup.h> +#include <video/edid.h> #include "efistub.h" @@ -367,24 +368,31 @@ static void find_bits(u32 mask, u8 *pos, u8 *size) *size = __fls(mask) - *pos + 1; } -static void -setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line, - efi_pixel_bitmask_t pixel_info, int pixel_format) +static void setup_screen_info(struct screen_info *si, const efi_graphics_output_protocol_t *gop) { - if (pixel_format == PIXEL_BIT_MASK) { - find_bits(pixel_info.red_mask, - &si->red_pos, &si->red_size); - find_bits(pixel_info.green_mask, - &si->green_pos, &si->green_size); - find_bits(pixel_info.blue_mask, - &si->blue_pos, &si->blue_size); - find_bits(pixel_info.reserved_mask, - &si->rsvd_pos, &si->rsvd_size); - si->lfb_depth = si->red_size + si->green_size + - si->blue_size + si->rsvd_size; - si->lfb_linelength = (pixels_per_scan_line * si->lfb_depth) / 8; + const efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode); + const efi_graphics_output_mode_info_t *info = efi_table_attr(mode, info); + + si->orig_video_isVGA = VIDEO_TYPE_EFI; + + si->lfb_width = info->horizontal_resolution; + si->lfb_height = info->vertical_resolution; + + efi_set_u64_split(efi_table_attr(mode, frame_buffer_base), + &si->lfb_base, &si->ext_lfb_base); + if (si->ext_lfb_base) + si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE; + si->pages = 1; + + if (info->pixel_format == PIXEL_BIT_MASK) { + find_bits(info->pixel_information.red_mask, &si->red_pos, &si->red_size); + find_bits(info->pixel_information.green_mask, &si->green_pos, &si->green_size); + find_bits(info->pixel_information.blue_mask, &si->blue_pos, &si->blue_size); + find_bits(info->pixel_information.reserved_mask, &si->rsvd_pos, &si->rsvd_size); + si->lfb_depth = si->red_size + si->green_size + si->blue_size + si->rsvd_size; + si->lfb_linelength = (info->pixels_per_scan_line * si->lfb_depth) / 8; } else { - if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) { + if (info->pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) { si->red_pos = 0; si->blue_pos = 16; } else /* PIXEL_BGR_RESERVED_8BIT_PER_COLOR */ { @@ -394,20 +402,33 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line, si->green_pos = 8; si->rsvd_pos = 24; - si->red_size = si->green_size = - si->blue_size = si->rsvd_size = 8; - + si->red_size = 8; + si->green_size = 8; + si->blue_size = 8; + si->rsvd_size = 8; si->lfb_depth = 32; - si->lfb_linelength = pixels_per_scan_line * 4; + si->lfb_linelength = info->pixels_per_scan_line * 4; } + + si->lfb_size = si->lfb_linelength * si->lfb_height; + si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS; } -static efi_graphics_output_protocol_t *find_gop(unsigned long num, - const efi_handle_t handles[]) +static void setup_edid_info(struct edid_info *edid, u32 gop_size_of_edid, u8 *gop_edid) +{ + if (!gop_edid || gop_size_of_edid < 128) + memset(edid->dummy, 0, sizeof(edid->dummy)); + else + memcpy(edid->dummy, gop_edid, min(gop_size_of_edid, sizeof(edid->dummy))); +} + +static efi_handle_t find_handle_with_primary_gop(unsigned long num, const efi_handle_t handles[], + efi_graphics_output_protocol_t **found_gop) { efi_graphics_output_protocol_t *first_gop; - efi_handle_t h; + efi_handle_t h, first_gop_handle; + first_gop_handle = NULL; first_gop = NULL; for_each_efi_handle(h, handles, num) { @@ -442,21 +463,25 @@ static efi_graphics_output_protocol_t *find_gop(unsigned long num, */ status = efi_bs_call(handle_protocol, h, &EFI_CONSOLE_OUT_DEVICE_GUID, &dummy); - if (status == EFI_SUCCESS) - return gop; - - if (!first_gop) + if (status == EFI_SUCCESS) { + if (found_gop) + *found_gop = gop; + return h; + } else if (!first_gop_handle) { + first_gop_handle = h; first_gop = gop; + } } - return first_gop; + if (found_gop) + *found_gop = first_gop; + return first_gop_handle; } -efi_status_t efi_setup_gop(struct screen_info *si) +efi_status_t efi_setup_graphics(struct screen_info *si, struct edid_info *edid) { efi_handle_t *handles __free(efi_pool) = NULL; - efi_graphics_output_protocol_mode_t *mode; - efi_graphics_output_mode_info_t *info; + efi_handle_t handle; efi_graphics_output_protocol_t *gop; efi_status_t status; unsigned long num; @@ -467,35 +492,41 @@ efi_status_t efi_setup_gop(struct screen_info *si) if (status != EFI_SUCCESS) return status; - gop = find_gop(num, handles); - if (!gop) + handle = find_handle_with_primary_gop(num, handles, &gop); + if (!handle) return EFI_NOT_FOUND; /* Change mode if requested */ set_mode(gop); /* EFI framebuffer */ - mode = efi_table_attr(gop, mode); - info = efi_table_attr(mode, info); - - si->orig_video_isVGA = VIDEO_TYPE_EFI; - - si->lfb_width = info->horizontal_resolution; - si->lfb_height = info->vertical_resolution; - - efi_set_u64_split(efi_table_attr(mode, frame_buffer_base), - &si->lfb_base, &si->ext_lfb_base); - if (si->ext_lfb_base) - si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE; - - si->pages = 1; - - setup_pixel_info(si, info->pixels_per_scan_line, - info->pixel_information, info->pixel_format); - - si->lfb_size = si->lfb_linelength * si->lfb_height; + if (si) + setup_screen_info(si, gop); + + /* Display EDID for primary GOP */ + if (edid) { + efi_edid_discovered_protocol_t *discovered_edid; + efi_edid_active_protocol_t *active_edid; + u32 gop_size_of_edid = 0; + u8 *gop_edid = NULL; + + status = efi_bs_call(handle_protocol, handle, &EFI_EDID_ACTIVE_PROTOCOL_GUID, + (void **)&active_edid); + if (status == EFI_SUCCESS) { + gop_size_of_edid = active_edid->size_of_edid; + gop_edid = active_edid->edid; + } else { + status = efi_bs_call(handle_protocol, handle, + &EFI_EDID_DISCOVERED_PROTOCOL_GUID, + (void **)&discovered_edid); + if (status == EFI_SUCCESS) { + gop_size_of_edid = discovered_edid->size_of_edid; + gop_edid = discovered_edid->edid; + } + } - si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS; + setup_edid_info(edid, gop_size_of_edid, gop_edid); + } return EFI_SUCCESS; } diff --git a/drivers/firmware/efi/libstub/x86-5lvl.c b/drivers/firmware/efi/libstub/x86-5lvl.c index f1c5fb45d5f7..c00d0ae7ed5d 100644 --- a/drivers/firmware/efi/libstub/x86-5lvl.c +++ b/drivers/firmware/efi/libstub/x86-5lvl.c @@ -66,7 +66,7 @@ void efi_5level_switch(void) bool have_la57 = native_read_cr4() & X86_CR4_LA57; bool need_toggle = want_la57 ^ have_la57; u64 *pgt = (void *)la57_toggle + PAGE_SIZE; - u64 *cr3 = (u64 *)__native_read_cr3(); + pgd_t *cr3 = (pgd_t *)native_read_cr3_pa(); u64 *new_cr3; if (!la57_toggle || !need_toggle) @@ -82,7 +82,7 @@ void efi_5level_switch(void) new_cr3[0] = (u64)cr3 | _PAGE_TABLE_NOENC; } else { /* take the new root table pointer from the current entry #0 */ - new_cr3 = (u64 *)(cr3[0] & PAGE_MASK); + new_cr3 = (u64 *)(native_pgd_val(cr3[0]) & PTE_PFN_MASK); /* copy the new root table if it is not 32-bit addressable */ if ((u64)new_cr3 > U32_MAX) diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index 761121a77f9e..cef32e2c82d8 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -203,6 +203,104 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params) } } +struct smbios_entry_point { + u8 anchor[4]; + u8 ep_checksum; + u8 ep_length; + u8 major_version; + u8 minor_version; + u16 max_size_entry; + u8 ep_rev; + u8 reserved[5]; + + struct __packed { + u8 anchor[5]; + u8 checksum; + u16 st_length; + u32 st_address; + u16 number_of_entries; + u8 bcd_rev; + } intm; +}; + +static bool verify_ep_checksum(const void *ptr, int length) +{ + u8 sum = 0; + + for (int i = 0; i < length; i++) + sum += ((u8 *)ptr)[i]; + + return sum == 0; +} + +static bool verify_ep_integrity(const struct smbios_entry_point *ep) +{ + if (memcmp(ep->anchor, "_SM_", sizeof(ep->anchor)) != 0) + return false; + + if (memcmp(ep->intm.anchor, "_DMI_", sizeof(ep->intm.anchor)) != 0) + return false; + + if (!verify_ep_checksum(ep, ep->ep_length) || + !verify_ep_checksum(&ep->intm, sizeof(ep->intm))) + return false; + + return true; +} + +static const struct efi_smbios_record *search_record(void *table, u32 length, + u8 type) +{ + const u8 *p, *end; + + p = (u8 *)table; + end = p + length; + + while (p + sizeof(struct efi_smbios_record) < end) { + const struct efi_smbios_record *hdr = + (struct efi_smbios_record *)p; + const u8 *next; + + if (hdr->type == type) + return hdr; + + /* Type 127 = End-of-Table */ + if (hdr->type == 0x7F) + return NULL; + + /* Jumping to the unformed section */ + next = p + hdr->length; + + /* Unformed section ends with 0000h */ + while ((next[0] != 0 || next[1] != 0) && next + 1 < end) + next++; + + next += 2; + p = next; + } + + return NULL; +} + +static const struct efi_smbios_record *get_table_record(u8 type) +{ + const struct smbios_entry_point *ep; + + /* + * Locate the legacy 32-bit SMBIOS entrypoint in memory, and parse it + * directly. Needed by some Macs that do not implement the EFI protocol. + */ + ep = get_efi_config_table(SMBIOS_TABLE_GUID); + if (!ep) + return NULL; + + if (!verify_ep_integrity(ep)) + return NULL; + + return search_record((void *)(unsigned long)ep->intm.st_address, + ep->intm.st_length, type); +} + static bool apple_match_product_name(void) { static const char type1_product_matches[][15] = { @@ -218,7 +316,8 @@ static bool apple_match_product_name(void) const struct efi_smbios_type1_record *record; const u8 *product; - record = (struct efi_smbios_type1_record *)efi_get_smbios_record(1); + record = (struct efi_smbios_type1_record *) + (efi_get_smbios_record(1) ?: get_table_record(1)); if (!record) return false; @@ -388,8 +487,9 @@ static void setup_quirks(struct boot_params *boot_params) static void setup_graphics(struct boot_params *boot_params) { struct screen_info *si = memset(&boot_params->screen_info, 0, sizeof(*si)); + struct edid_info *edid = memset(&boot_params->edid_info, 0, sizeof(*edid)); - efi_setup_gop(si); + efi_setup_graphics(si, edid); } static void __noreturn efi_exit(efi_handle_t handle, efi_status_t status) diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c index c38b1a335590..e727cc5909cb 100644 --- a/drivers/firmware/efi/memattr.c +++ b/drivers/firmware/efi/memattr.c @@ -19,19 +19,19 @@ unsigned long __ro_after_init efi_mem_attr_table = EFI_INVALID_TABLE_ADDR; * Reserve the memory associated with the Memory Attributes configuration * table, if it exists. */ -int __init efi_memattr_init(void) +void __init efi_memattr_init(void) { efi_memory_attributes_table_t *tbl; unsigned long size; if (efi_mem_attr_table == EFI_INVALID_TABLE_ADDR) - return 0; + return; tbl = early_memremap(efi_mem_attr_table, sizeof(*tbl)); if (!tbl) { pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n", efi_mem_attr_table); - return -ENOMEM; + return; } if (tbl->version > 2) { @@ -61,7 +61,6 @@ int __init efi_memattr_init(void) unmap: early_memunmap(tbl, sizeof(*tbl)); - return 0; } /* diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c index fa71cd898120..66f584a228d0 100644 --- a/drivers/firmware/efi/riscv-runtime.c +++ b/drivers/firmware/efi/riscv-runtime.c @@ -14,18 +14,18 @@ #include <linux/io.h> #include <linux/memblock.h> #include <linux/mm_types.h> +#include <linux/pgalloc.h> +#include <linux/pgtable.h> #include <linux/preempt.h> #include <linux/rbtree.h> #include <linux/rwsem.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> -#include <linux/pgtable.h> #include <asm/cacheflush.h> #include <asm/efi.h> #include <asm/mmu.h> -#include <asm/pgalloc.h> static bool __init efi_virtmap_init(void) { @@ -36,20 +36,12 @@ static bool __init efi_virtmap_init(void) init_new_context(NULL, &efi_mm); for_each_efi_memory_desc(md) { - phys_addr_t phys = md->phys_addr; - int ret; - if (!(md->attribute & EFI_MEMORY_RUNTIME)) continue; if (md->virt_addr == U64_MAX) return false; - ret = efi_create_mapping(&efi_mm, md); - if (ret) { - pr_warn(" EFI remap %pa: failed to create mapping (%d)\n", - &phys, ret); - return false; - } + efi_create_mapping(&efi_mm, md); } if (efi_memattr_apply_permissions(&efi_mm, efi_set_mapping_permissions)) diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c index 708b777857d3..da8d29621644 100644 --- a/drivers/firmware/efi/runtime-wrappers.c +++ b/drivers/firmware/efi/runtime-wrappers.c @@ -202,6 +202,8 @@ void efi_call_virt_check_flags(unsigned long flags, const void *caller) */ static DEFINE_SEMAPHORE(efi_runtime_lock, 1); +static struct task_struct *efi_runtime_lock_owner; + /* * Expose the EFI runtime lock to the UV platform */ @@ -219,6 +221,8 @@ static void __nocfi efi_call_rts(struct work_struct *work) efi_status_t status = EFI_NOT_FOUND; unsigned long flags; + efi_runtime_lock_owner = current; + arch_efi_call_virt_setup(); flags = efi_call_virt_save_flags(); @@ -310,6 +314,7 @@ static void __nocfi efi_call_rts(struct work_struct *work) efi_rts_work.status = status; complete(&efi_rts_work.efi_rts_comp); + efi_runtime_lock_owner = NULL; } static efi_status_t __efi_queue_work(enum efi_rts_ids id, @@ -444,8 +449,10 @@ virt_efi_set_variable_nb(efi_char16_t *name, efi_guid_t *vendor, u32 attr, if (down_trylock(&efi_runtime_lock)) return EFI_NOT_READY; + efi_runtime_lock_owner = current; status = efi_call_virt_pointer(efi.runtime, set_variable, name, vendor, attr, data_size, data); + efi_runtime_lock_owner = NULL; up(&efi_runtime_lock); return status; } @@ -481,9 +488,11 @@ virt_efi_query_variable_info_nb(u32 attr, u64 *storage_space, if (down_trylock(&efi_runtime_lock)) return EFI_NOT_READY; + efi_runtime_lock_owner = current; status = efi_call_virt_pointer(efi.runtime, query_variable_info, attr, storage_space, remaining_space, max_variable_size); + efi_runtime_lock_owner = NULL; up(&efi_runtime_lock); return status; } @@ -509,12 +518,13 @@ virt_efi_reset_system(int reset_type, efi_status_t status, return; } + efi_runtime_lock_owner = current; arch_efi_call_virt_setup(); efi_rts_work.efi_rts_id = EFI_RESET_SYSTEM; arch_efi_call_virt(efi.runtime, reset_system, reset_type, status, data_size, data); arch_efi_call_virt_teardown(); - + efi_runtime_lock_owner = NULL; up(&efi_runtime_lock); } @@ -587,3 +597,8 @@ efi_call_acpi_prm_handler(efi_status_t (__efiapi *handler_addr)(u64, void *), } #endif + +void efi_runtime_assert_lock_held(void) +{ + WARN_ON(efi_runtime_lock_owner != current); +} diff --git a/drivers/firmware/efi/stmm/mm_communication.h b/drivers/firmware/efi/stmm/mm_communication.h index 52a1f32cd1eb..06e7663f96dc 100644 --- a/drivers/firmware/efi/stmm/mm_communication.h +++ b/drivers/firmware/efi/stmm/mm_communication.h @@ -32,7 +32,7 @@ /** * struct efi_mm_communicate_header - Header used for SMM variable communication - + * * @header_guid: header use for disambiguation of content * @message_len: length of the message. Does not include the size of the * header @@ -111,7 +111,7 @@ struct efi_mm_communicate_header { /** * struct smm_variable_communicate_header - Used for SMM variable communication - + * * @function: function to call in Smm. * @ret_status: return status * @data: payload @@ -128,7 +128,7 @@ struct smm_variable_communicate_header { /** * struct smm_variable_access - Used to communicate with StMM by * SetVariable and GetVariable. - + * * @guid: vendor GUID * @data_size: size of EFI variable data * @name_size: size of EFI name diff --git a/drivers/firmware/imx/imx-scu-irq.c b/drivers/firmware/imx/imx-scu-irq.c index 6125cccc9ba7..a68d38f89254 100644 --- a/drivers/firmware/imx/imx-scu-irq.c +++ b/drivers/firmware/imx/imx-scu-irq.c @@ -203,6 +203,18 @@ int imx_scu_enable_general_irq_channel(struct device *dev) struct mbox_chan *ch; int ret = 0, i = 0; + if (!of_parse_phandle_with_args(dev->of_node, "mboxes", + "#mbox-cells", 0, &spec)) { + i = of_alias_get_id(spec.np, "mu"); + of_node_put(spec.np); + } + + /* use mu1 as general mu irq channel if failed */ + if (i < 0) + i = 1; + + mu_resource_id = IMX_SC_R_MU_0A + i; + ret = imx_scu_get_handle(&imx_sc_irq_ipc_handle); if (ret) return ret; @@ -214,27 +226,16 @@ int imx_scu_enable_general_irq_channel(struct device *dev) cl->dev = dev; cl->rx_callback = imx_scu_irq_callback; + INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler); + /* SCU general IRQ uses general interrupt channel 3 */ ch = mbox_request_channel_byname(cl, "gip3"); if (IS_ERR(ch)) { ret = PTR_ERR(ch); dev_err(dev, "failed to request mbox chan gip3, ret %d\n", ret); - devm_kfree(dev, cl); - return ret; + goto free_cl; } - INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler); - - if (!of_parse_phandle_with_args(dev->of_node, "mboxes", - "#mbox-cells", 0, &spec)) - i = of_alias_get_id(spec.np, "mu"); - - /* use mu1 as general mu irq channel if failed */ - if (i < 0) - i = 1; - - mu_resource_id = IMX_SC_R_MU_0A + i; - /* Create directory under /sysfs/firmware */ wakeup_obj = kobject_create_and_add("scu_wakeup_source", firmware_kobj); if (!wakeup_obj) { @@ -253,7 +254,8 @@ int imx_scu_enable_general_irq_channel(struct device *dev) free_ch: mbox_free_channel(ch); +free_cl: + devm_kfree(dev, cl); return ret; } -EXPORT_SYMBOL(imx_scu_enable_general_irq_channel); diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c index 8c28e25ddc8a..67b267a7408a 100644 --- a/drivers/firmware/imx/imx-scu.c +++ b/drivers/firmware/imx/imx-scu.c @@ -73,9 +73,9 @@ static int imx_sc_linux_errmap[IMX_SC_ERR_LAST] = { -EACCES, /* IMX_SC_ERR_NOACCESS */ -EACCES, /* IMX_SC_ERR_LOCKED */ -ERANGE, /* IMX_SC_ERR_UNAVAILABLE */ - -EEXIST, /* IMX_SC_ERR_NOTFOUND */ - -EPERM, /* IMX_SC_ERR_NOPOWER */ - -EPIPE, /* IMX_SC_ERR_IPC */ + -ENOENT, /* IMX_SC_ERR_NOTFOUND */ + -ENODEV, /* IMX_SC_ERR_NOPOWER */ + -ECOMM, /* IMX_SC_ERR_IPC */ -EBUSY, /* IMX_SC_ERR_BUSY */ -EIO, /* IMX_SC_ERR_FAIL */ }; @@ -324,7 +324,9 @@ static int imx_scu_probe(struct platform_device *pdev) } sc_ipc->dev = dev; - mutex_init(&sc_ipc->lock); + ret = devm_mutex_init(dev, &sc_ipc->lock); + if (ret) + return ret; init_completion(&sc_ipc->done); imx_sc_ipc_handle = sc_ipc; @@ -352,6 +354,7 @@ static struct platform_driver imx_scu_driver = { .driver = { .name = "imx-scu", .of_match_table = imx_scu_match, + .suppress_bind_attrs = true, }, .probe = imx_scu_probe, }; diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c index e777b7cb9b12..1a6f85e463e0 100644 --- a/drivers/firmware/qcom/qcom_scm.c +++ b/drivers/firmware/qcom/qcom_scm.c @@ -2018,21 +2018,6 @@ static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = { { } }; -static bool qcom_scm_qseecom_machine_is_allowed(void) -{ - struct device_node *np; - bool match; - - np = of_find_node_by_path("/"); - if (!np) - return false; - - match = of_match_node(qcom_scm_qseecom_allowlist, np); - of_node_put(np); - - return match; -} - static void qcom_scm_qseecom_free(void *data) { struct platform_device *qseecom_dev = data; @@ -2064,7 +2049,7 @@ static int qcom_scm_qseecom_init(struct qcom_scm *scm) dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version); - if (!qcom_scm_qseecom_machine_is_allowed()) { + if (!of_machine_device_match(qcom_scm_qseecom_allowlist)) { dev_info(scm->dev, "qseecom: untested machine, skipping\n"); return 0; } diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c index 1ea39a0a76c7..41da07c445a6 100644 --- a/drivers/firmware/stratix10-rsu.c +++ b/drivers/firmware/stratix10-rsu.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018-2019, Intel Corporation + * Copyright (C) 2025, Altera Corporation */ #include <linux/arm-smccc.h> @@ -14,11 +15,9 @@ #include <linux/firmware/intel/stratix10-svc-client.h> #include <linux/string.h> #include <linux/sysfs.h> +#include <linux/delay.h> -#define RSU_STATE_MASK GENMASK_ULL(31, 0) -#define RSU_VERSION_MASK GENMASK_ULL(63, 32) -#define RSU_ERROR_LOCATION_MASK GENMASK_ULL(31, 0) -#define RSU_ERROR_DETAIL_MASK GENMASK_ULL(63, 32) +#define RSU_ERASE_SIZE_MASK GENMASK_ULL(63, 32) #define RSU_DCMF0_MASK GENMASK_ULL(31, 0) #define RSU_DCMF1_MASK GENMASK_ULL(63, 32) #define RSU_DCMF2_MASK GENMASK_ULL(31, 0) @@ -35,7 +34,8 @@ #define INVALID_DCMF_STATUS 0xFFFFFFFF #define INVALID_SPT_ADDRESS 0x0 -#define RSU_GET_SPT_CMD 0x5A +#define RSU_RETRY_SLEEP_MS (1U) +#define RSU_ASYNC_MSG_RETRY (3U) #define RSU_GET_SPT_RESP_LEN (4 * sizeof(unsigned int)) typedef void (*rsu_callback)(struct stratix10_svc_client *client, @@ -64,7 +64,6 @@ typedef void (*rsu_callback)(struct stratix10_svc_client *client, * @max_retry: the preset max retry value * @spt0_address: address of spt0 * @spt1_address: address of spt1 - * @get_spt_response_buf: response from sdm for get_spt command */ struct stratix10_rsu_priv { struct stratix10_svc_chan *chan; @@ -99,47 +98,32 @@ struct stratix10_rsu_priv { unsigned long spt0_address; unsigned long spt1_address; - - unsigned int *get_spt_response_buf; }; +typedef void (*rsu_async_callback)(struct device *dev, + struct stratix10_rsu_priv *priv, struct stratix10_svc_cb_data *data); + /** - * rsu_status_callback() - Status callback from Intel Service Layer - * @client: pointer to service client + * rsu_async_status_callback() - Status callback from rsu_async_send() + * @dev: pointer to device object + * @priv: pointer to priv object * @data: pointer to callback data structure * - * Callback from Intel service layer for RSU status request. Status is - * only updated after a system reboot, so a get updated status call is - * made during driver probe. + * Callback from rsu_async_send() to get the system rsu error status. */ -static void rsu_status_callback(struct stratix10_svc_client *client, - struct stratix10_svc_cb_data *data) +static void rsu_async_status_callback(struct device *dev, + struct stratix10_rsu_priv *priv, + struct stratix10_svc_cb_data *data) { - struct stratix10_rsu_priv *priv = client->priv; - struct arm_smccc_res *res = (struct arm_smccc_res *)data->kaddr1; - - if (data->status == BIT(SVC_STATUS_OK)) { - priv->status.version = FIELD_GET(RSU_VERSION_MASK, - res->a2); - priv->status.state = FIELD_GET(RSU_STATE_MASK, res->a2); - priv->status.fail_image = res->a1; - priv->status.current_image = res->a0; - priv->status.error_location = - FIELD_GET(RSU_ERROR_LOCATION_MASK, res->a3); - priv->status.error_details = - FIELD_GET(RSU_ERROR_DETAIL_MASK, res->a3); - } else { - dev_err(client->dev, "COMMAND_RSU_STATUS returned 0x%lX\n", - res->a0); - priv->status.version = 0; - priv->status.state = 0; - priv->status.fail_image = 0; - priv->status.current_image = 0; - priv->status.error_location = 0; - priv->status.error_details = 0; - } - - complete(&priv->completion); + struct arm_smccc_1_2_regs *res = (struct arm_smccc_1_2_regs *)data->kaddr1; + + priv->status.current_image = res->a2; + priv->status.fail_image = res->a3; + priv->status.state = res->a4; + priv->status.version = res->a5; + priv->status.error_location = res->a7; + priv->status.error_details = res->a8; + priv->retry_counter = res->a9; } /** @@ -163,32 +147,6 @@ static void rsu_command_callback(struct stratix10_svc_client *client, complete(&priv->completion); } -/** - * rsu_retry_callback() - Callback from Intel service layer for getting - * the current image's retry counter from the firmware - * @client: pointer to client - * @data: pointer to callback data structure - * - * Callback from Intel service layer for retry counter, which is used by - * user to know how many times the images is still allowed to reload - * itself before giving up and starting RSU fail-over flow. - */ -static void rsu_retry_callback(struct stratix10_svc_client *client, - struct stratix10_svc_cb_data *data) -{ - struct stratix10_rsu_priv *priv = client->priv; - unsigned int *counter = (unsigned int *)data->kaddr1; - - if (data->status == BIT(SVC_STATUS_OK)) - priv->retry_counter = *counter; - else if (data->status == BIT(SVC_STATUS_NO_SUPPORT)) - dev_warn(client->dev, "Secure FW doesn't support retry\n"); - else - dev_err(client->dev, "Failed to get retry counter %lu\n", - BIT(data->status)); - - complete(&priv->completion); -} /** * rsu_max_retry_callback() - Callback from Intel service layer for getting @@ -270,34 +228,19 @@ static void rsu_dcmf_status_callback(struct stratix10_svc_client *client, complete(&priv->completion); } -static void rsu_get_spt_callback(struct stratix10_svc_client *client, - struct stratix10_svc_cb_data *data) +/** + * rsu_async_get_spt_table_callback() - Callback to be used by the rsu_async_send() + * to retrieve the SPT table information. + * @dev: pointer to device object + * @priv: pointer to priv object + * @data: pointer to callback data structure + */ +static void rsu_async_get_spt_table_callback(struct device *dev, + struct stratix10_rsu_priv *priv, + struct stratix10_svc_cb_data *data) { - struct stratix10_rsu_priv *priv = client->priv; - unsigned long *mbox_err = (unsigned long *)data->kaddr1; - unsigned long *resp_len = (unsigned long *)data->kaddr2; - - if (data->status != BIT(SVC_STATUS_OK) || (*mbox_err) || - (*resp_len != RSU_GET_SPT_RESP_LEN)) - goto error; - - priv->spt0_address = priv->get_spt_response_buf[0]; - priv->spt0_address <<= 32; - priv->spt0_address |= priv->get_spt_response_buf[1]; - - priv->spt1_address = priv->get_spt_response_buf[2]; - priv->spt1_address <<= 32; - priv->spt1_address |= priv->get_spt_response_buf[3]; - - goto complete; - -error: - dev_err(client->dev, "failed to get SPTs\n"); - -complete: - stratix10_svc_free_memory(priv->chan, priv->get_spt_response_buf); - priv->get_spt_response_buf = NULL; - complete(&priv->completion); + priv->spt0_address = *((unsigned long *)data->kaddr1); + priv->spt1_address = *((unsigned long *)data->kaddr2); } /** @@ -329,14 +272,6 @@ static int rsu_send_msg(struct stratix10_rsu_priv *priv, if (arg) msg.arg[0] = arg; - if (command == COMMAND_MBOX_SEND_CMD) { - msg.arg[1] = 0; - msg.payload = NULL; - msg.payload_length = 0; - msg.payload_output = priv->get_spt_response_buf; - msg.payload_length_output = RSU_GET_SPT_RESP_LEN; - } - ret = stratix10_svc_send(priv->chan, &msg); if (ret < 0) goto status_done; @@ -362,6 +297,95 @@ status_done: return ret; } +/** + * soc64_async_callback() - Callback from Intel service layer for async requests + * @ptr: pointer to the completion object + */ +static void soc64_async_callback(void *ptr) +{ + if (ptr) + complete(ptr); +} + +/** + * rsu_send_async_msg() - send an async message to Intel service layer + * @dev: pointer to device object + * @priv: pointer to rsu private data + * @command: RSU status or update command + * @arg: the request argument, notify status + * @callback: function pointer for the callback (status or update) + */ +static int rsu_send_async_msg(struct device *dev, struct stratix10_rsu_priv *priv, + enum stratix10_svc_command_code command, + unsigned long arg, + rsu_async_callback callback) +{ + struct stratix10_svc_client_msg msg = {0}; + struct stratix10_svc_cb_data data = {0}; + struct completion completion; + int status, index, ret; + void *handle = NULL; + + msg.command = command; + msg.arg[0] = arg; + + init_completion(&completion); + + for (index = 0; index < RSU_ASYNC_MSG_RETRY; index++) { + status = stratix10_svc_async_send(priv->chan, &msg, + &handle, soc64_async_callback, + &completion); + if (status == 0) + break; + dev_warn(dev, "Failed to send async message\n"); + msleep(RSU_RETRY_SLEEP_MS); + } + + if (status && !handle) { + dev_err(dev, "Failed to send async message\n"); + return -ETIMEDOUT; + } + + ret = wait_for_completion_io_timeout(&completion, RSU_TIMEOUT); + if (ret > 0) + dev_dbg(dev, "Received async interrupt\n"); + else if (ret == 0) + dev_dbg(dev, "Timeout occurred. Trying to poll the response\n"); + + for (index = 0; index < RSU_ASYNC_MSG_RETRY; index++) { + status = stratix10_svc_async_poll(priv->chan, handle, &data); + if (status == -EAGAIN) { + dev_dbg(dev, "Async message is still in progress\n"); + } else if (status < 0) { + dev_alert(dev, "Failed to poll async message\n"); + ret = -ETIMEDOUT; + } else if (status == 0) { + ret = 0; + break; + } + msleep(RSU_RETRY_SLEEP_MS); + } + + if (ret) { + dev_err(dev, "Failed to get async response\n"); + goto status_done; + } + + if (data.status == 0) { + ret = 0; + if (callback) + callback(dev, priv, &data); + } else { + dev_err(dev, "%s returned 0x%x from SDM\n", __func__, + data.status); + ret = -EFAULT; + } + +status_done: + stratix10_svc_async_done(priv->chan, handle); + return ret; +} + /* * This driver exposes some optional features of the Intel Stratix 10 SoC FPGA. * The sysfs interfaces exposed here are FPGA Remote System Update (RSU) @@ -454,8 +478,7 @@ static ssize_t max_retry_show(struct device *dev, if (!priv) return -ENODEV; - return scnprintf(buf, sizeof(priv->max_retry), - "0x%08x\n", priv->max_retry); + return sysfs_emit(buf, "0x%08x\n", priv->max_retry); } static ssize_t dcmf0_show(struct device *dev, @@ -597,27 +620,20 @@ static ssize_t notify_store(struct device *dev, if (ret) return ret; - ret = rsu_send_msg(priv, COMMAND_RSU_NOTIFY, - status, rsu_command_callback); + ret = rsu_send_async_msg(dev, priv, COMMAND_RSU_NOTIFY, status, NULL); if (ret) { dev_err(dev, "Error, RSU notify returned %i\n", ret); return ret; } /* to get the updated state */ - ret = rsu_send_msg(priv, COMMAND_RSU_STATUS, - 0, rsu_status_callback); + ret = rsu_send_async_msg(dev, priv, COMMAND_RSU_STATUS, 0, + rsu_async_status_callback); if (ret) { dev_err(dev, "Error, getting RSU status %i\n", ret); return ret; } - ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback); - if (ret) { - dev_err(dev, "Error, getting RSU retry %i\n", ret); - return ret; - } - return count; } @@ -632,7 +648,7 @@ static ssize_t spt0_address_show(struct device *dev, if (priv->spt0_address == INVALID_SPT_ADDRESS) return -EIO; - return scnprintf(buf, PAGE_SIZE, "0x%08lx\n", priv->spt0_address); + return sysfs_emit(buf, "0x%08lx\n", priv->spt0_address); } static ssize_t spt1_address_show(struct device *dev, @@ -646,7 +662,7 @@ static ssize_t spt1_address_show(struct device *dev, if (priv->spt1_address == INVALID_SPT_ADDRESS) return -EIO; - return scnprintf(buf, PAGE_SIZE, "0x%08lx\n", priv->spt1_address); + return sysfs_emit(buf, "0x%08lx\n", priv->spt1_address); } static DEVICE_ATTR_RO(current_image); @@ -737,12 +753,19 @@ static int stratix10_rsu_probe(struct platform_device *pdev) return PTR_ERR(priv->chan); } + ret = stratix10_svc_add_async_client(priv->chan, false); + if (ret) { + dev_err(dev, "failed to add async client\n"); + stratix10_svc_free_channel(priv->chan); + return ret; + } + init_completion(&priv->completion); platform_set_drvdata(pdev, priv); /* get the initial state from firmware */ - ret = rsu_send_msg(priv, COMMAND_RSU_STATUS, - 0, rsu_status_callback); + ret = rsu_send_async_msg(dev, priv, COMMAND_RSU_STATUS, 0, + rsu_async_status_callback); if (ret) { dev_err(dev, "Error, getting RSU status %i\n", ret); stratix10_svc_free_channel(priv->chan); @@ -763,12 +786,6 @@ static int stratix10_rsu_probe(struct platform_device *pdev) stratix10_svc_free_channel(priv->chan); } - ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback); - if (ret) { - dev_err(dev, "Error, getting RSU retry %i\n", ret); - stratix10_svc_free_channel(priv->chan); - } - ret = rsu_send_msg(priv, COMMAND_RSU_MAX_RETRY, 0, rsu_max_retry_callback); if (ret) { @@ -776,18 +793,12 @@ static int stratix10_rsu_probe(struct platform_device *pdev) stratix10_svc_free_channel(priv->chan); } - priv->get_spt_response_buf = - stratix10_svc_allocate_memory(priv->chan, RSU_GET_SPT_RESP_LEN); - if (IS_ERR(priv->get_spt_response_buf)) { - dev_err(dev, "failed to allocate get spt buffer\n"); - } else { - ret = rsu_send_msg(priv, COMMAND_MBOX_SEND_CMD, - RSU_GET_SPT_CMD, rsu_get_spt_callback); - if (ret) { - dev_err(dev, "Error, getting SPT table %i\n", ret); - stratix10_svc_free_channel(priv->chan); - } + ret = rsu_send_async_msg(dev, priv, COMMAND_RSU_GET_SPT_TABLE, 0, + rsu_async_get_spt_table_callback); + if (ret) { + dev_err(dev, "Error, getting SPT table %i\n", ret); + stratix10_svc_free_channel(priv->chan); } return ret; diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c index e3f990d888d7..515b948ff320 100644 --- a/drivers/firmware/stratix10-svc.c +++ b/drivers/firmware/stratix10-svc.c @@ -1,11 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2017-2018, Intel Corporation + * Copyright (C) 2025, Altera Corporation */ +#include <linux/atomic.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/genalloc.h> +#include <linux/hashtable.h> +#include <linux/idr.h> #include <linux/io.h> #include <linux/kfifo.h> #include <linux/kthread.h> @@ -34,7 +38,7 @@ * timeout is set to 30 seconds (30 * 1000) at Intel Stratix10 SoC. */ #define SVC_NUM_DATA_IN_FIFO 32 -#define SVC_NUM_CHANNEL 3 +#define SVC_NUM_CHANNEL 4 #define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200 #define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30 #define BYTE_TO_WORD_SIZE 4 @@ -43,6 +47,55 @@ #define STRATIX10_RSU "stratix10-rsu" #define INTEL_FCS "intel-fcs" +/* Maximum number of SDM client IDs. */ +#define MAX_SDM_CLIENT_IDS 16 +/* Client ID for SIP Service Version 1. */ +#define SIP_SVC_V1_CLIENT_ID 0x1 +/* Maximum number of SDM job IDs. */ +#define MAX_SDM_JOB_IDS 16 +/* Number of bits used for asynchronous transaction hashing. */ +#define ASYNC_TRX_HASH_BITS 3 +/* + * Total number of transaction IDs, which is a combination of + * client ID and job ID. + */ +#define TOTAL_TRANSACTION_IDS \ + (MAX_SDM_CLIENT_IDS * MAX_SDM_JOB_IDS) + +/* Minimum major version of the ATF for Asynchronous transactions. */ +#define ASYNC_ATF_MINIMUM_MAJOR_VERSION 0x3 +/* Minimum minor version of the ATF for Asynchronous transactions.*/ +#define ASYNC_ATF_MINIMUM_MINOR_VERSION 0x0 + +/* Job ID field in the transaction ID */ +#define STRATIX10_JOB_FIELD GENMASK(3, 0) +/* Client ID field in the transaction ID */ +#define STRATIX10_CLIENT_FIELD GENMASK(7, 4) +/* Transaction ID mask for Stratix10 service layer */ +#define STRATIX10_TRANS_ID_FIELD GENMASK(7, 0) + +/* Macro to extract the job ID from a transaction ID. */ +#define STRATIX10_GET_JOBID(transaction_id) \ + (FIELD_GET(STRATIX10_JOB_FIELD, transaction_id)) +/* Macro to set the job ID in a transaction ID. */ +#define STRATIX10_SET_JOBID(jobid) \ + (FIELD_PREP(STRATIX10_JOB_FIELD, jobid)) +/* Macro to set the client ID in a transaction ID. */ +#define STRATIX10_SET_CLIENTID(clientid) \ + (FIELD_PREP(STRATIX10_CLIENT_FIELD, clientid)) +/* Macro to set a transaction ID using a client ID and a job ID. */ +#define STRATIX10_SET_TRANSACTIONID(clientid, jobid) \ + (STRATIX10_SET_CLIENTID(clientid) | STRATIX10_SET_JOBID(jobid)) +/* Macro to set a transaction ID for SIP SMC Async transactions */ +#define STRATIX10_SIP_SMC_SET_TRANSACTIONID_X1(transaction_id) \ + (FIELD_PREP(STRATIX10_TRANS_ID_FIELD, transaction_id)) + +/* 10-bit mask for extracting the SDM status code */ +#define STRATIX10_SDM_STATUS_MASK GENMASK(9, 0) +/* Macro to get the SDM mailbox error status */ +#define STRATIX10_GET_SDM_STATUS_CODE(status) \ + (FIELD_GET(STRATIX10_SDM_STATUS_MASK, status)) + typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, @@ -52,6 +105,7 @@ struct stratix10_svc_chan; /** * struct stratix10_svc - svc private data * @stratix10_svc_rsu: pointer to stratix10 RSU device + * @intel_svc_fcs: pointer to the FCS device */ struct stratix10_svc { struct platform_device *stratix10_svc_rsu; @@ -63,7 +117,7 @@ struct stratix10_svc { * @sync_complete: state for a completion * @addr: physical address of shared memory block * @size: size of shared memory block - * @invoke_fn: function to issue secure monitor or hypervisor call + * @invoke_fn: service clients to handle secure monitor or hypervisor calls * * This struct is used to save physical address and size of shared memory * block. The shared memory blocked is allocated by secure monitor software @@ -122,6 +176,74 @@ struct stratix10_svc_data { }; /** + * struct stratix10_svc_async_handler - Asynchronous handler for Stratix10 + * service layer + * @transaction_id: Unique identifier for the transaction + * @achan: Pointer to the asynchronous channel structure + * @cb_arg: Argument to be passed to the callback function + * @cb: Callback function to be called upon completion + * @msg: Pointer to the client message structure + * @next: Node in the hash list + * @res: Response structure to store result from the secure firmware + * + * This structure is used to handle asynchronous transactions in the + * Stratix10 service layer. It maintains the necessary information + * for processing and completing asynchronous requests. + */ + +struct stratix10_svc_async_handler { + u8 transaction_id; + struct stratix10_async_chan *achan; + void *cb_arg; + async_callback_t cb; + struct stratix10_svc_client_msg *msg; + struct hlist_node next; + struct arm_smccc_1_2_regs res; +}; + +/** + * struct stratix10_async_chan - Structure representing an asynchronous channel + * @async_client_id: Unique client identifier for the asynchronous operation + * @job_id_pool: Pointer to the job ID pool associated with this channel + */ + +struct stratix10_async_chan { + unsigned long async_client_id; + struct ida job_id_pool; +}; + +/** + * struct stratix10_async_ctrl - Control structure for Stratix10 + * asynchronous operations + * @initialized: Flag indicating whether the control structure has + * been initialized + * @invoke_fn: Function pointer for invoking Stratix10 service calls + * to EL3 secure firmware + * @async_id_pool: Pointer to the ID pool used for asynchronous + * operations + * @common_achan_refcount: Atomic reference count for the common + * asynchronous channel usage + * @common_async_chan: Pointer to the common asynchronous channel + * structure + * @trx_list_lock: Spinlock for protecting the transaction list + * operations + * @trx_list: Hash table for managing asynchronous transactions + */ + +struct stratix10_async_ctrl { + bool initialized; + void (*invoke_fn)(struct stratix10_async_ctrl *actrl, + const struct arm_smccc_1_2_regs *args, + struct arm_smccc_1_2_regs *res); + struct ida async_id_pool; + atomic_t common_achan_refcount; + struct stratix10_async_chan *common_async_chan; + /* spinlock to protect trx_list hash table */ + spinlock_t trx_list_lock; + DECLARE_HASHTABLE(trx_list, ASYNC_TRX_HASH_BITS); +}; + +/** * struct stratix10_svc_controller - service controller * @dev: device * @chans: array of service channels @@ -134,6 +256,8 @@ struct stratix10_svc_data { * @complete_status: state for completion * @svc_fifo_lock: protect access to service message data queue * @invoke_fn: function to issue secure monitor call or hypervisor call + * @svc: manages the list of client svc drivers + * @actrl: async control structure * * This struct is used to create communication channels for service clients, to * handle secure monitor or hypervisor call. @@ -150,6 +274,8 @@ struct stratix10_svc_controller { struct completion complete_status; spinlock_t svc_fifo_lock; svc_invoke_fn *invoke_fn; + struct stratix10_svc *svc; + struct stratix10_async_ctrl actrl; }; /** @@ -158,20 +284,28 @@ struct stratix10_svc_controller { * @scl: pointer to service client which owns the channel * @name: service client name associated with the channel * @lock: protect access to the channel + * @async_chan: reference to asynchronous channel object for this channel * - * This struct is used by service client to communicate with service layer, each - * service client has its own channel created by service controller. + * This struct is used by service client to communicate with service layer. + * Each service client has its own channel created by service controller. */ struct stratix10_svc_chan { struct stratix10_svc_controller *ctrl; struct stratix10_svc_client *scl; char *name; spinlock_t lock; + struct stratix10_async_chan *async_chan; }; static LIST_HEAD(svc_ctrl); static LIST_HEAD(svc_data_mem); +/* + * svc_mem_lock protects access to the svc_data_mem list for + * concurrent multi-client operations + */ +static DEFINE_MUTEX(svc_mem_lock); + /** * svc_pa_to_va() - translate physical address to virtual address * @addr: to be translated physical address @@ -184,6 +318,7 @@ static void *svc_pa_to_va(unsigned long addr) struct stratix10_svc_data_mem *pmem; pr_debug("claim back P-addr=0x%016x\n", (unsigned int)addr); + guard(mutex)(&svc_mem_lock); list_for_each_entry(pmem, &svc_data_mem, node) if (pmem->paddr == addr) return pmem->vaddr; @@ -341,6 +476,8 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data, case COMMAND_RSU_MAX_RETRY: case COMMAND_RSU_DCMF_STATUS: case COMMAND_FIRMWARE_VERSION: + case COMMAND_HWMON_READTEMP: + case COMMAND_HWMON_READVOLT: cb_data->status = BIT(SVC_STATUS_OK); cb_data->kaddr1 = &res.a1; break; @@ -525,7 +662,17 @@ static int svc_normal_to_secure_thread(void *data) a1 = (unsigned long)pdata->paddr; a2 = 0; break; - + /* for HWMON */ + case COMMAND_HWMON_READTEMP: + a0 = INTEL_SIP_SMC_HWMON_READTEMP; + a1 = pdata->arg[0]; + a2 = 0; + break; + case COMMAND_HWMON_READVOLT: + a0 = INTEL_SIP_SMC_HWMON_READVOLT; + a1 = pdata->arg[0]; + a2 = 0; + break; /* for polling */ case COMMAND_POLL_SERVICE_STATUS: a0 = INTEL_SIP_SMC_SERVICE_COMPLETED; @@ -923,6 +1070,591 @@ struct stratix10_svc_chan *stratix10_svc_request_channel_byname( EXPORT_SYMBOL_GPL(stratix10_svc_request_channel_byname); /** + * stratix10_svc_add_async_client - Add an asynchronous client to the + * Stratix10 service channel. + * @chan: Pointer to the Stratix10 service channel structure. + * @use_unique_clientid: Boolean flag indicating whether to use a + * unique client ID. + * + * This function adds an asynchronous client to the specified + * Stratix10 service channel. If the `use_unique_clientid` flag is + * set to true, a unique client ID is allocated for the asynchronous + * channel. Otherwise, a common asynchronous channel is used. + * + * Return: 0 on success, or a negative error code on failure: + * -EINVAL if the channel is NULL or the async controller is + * not initialized. + * -EALREADY if the async channel is already allocated. + * -ENOMEM if memory allocation fails. + * Other negative values if ID allocation fails. + */ +int stratix10_svc_add_async_client(struct stratix10_svc_chan *chan, + bool use_unique_clientid) +{ + struct stratix10_svc_controller *ctrl; + struct stratix10_async_ctrl *actrl; + struct stratix10_async_chan *achan; + int ret = 0; + + if (!chan) + return -EINVAL; + + ctrl = chan->ctrl; + actrl = &ctrl->actrl; + + if (!actrl->initialized) { + dev_err(ctrl->dev, "Async controller not initialized\n"); + return -EINVAL; + } + + if (chan->async_chan) { + dev_err(ctrl->dev, "async channel already allocated\n"); + return -EALREADY; + } + + if (use_unique_clientid && + atomic_read(&actrl->common_achan_refcount) > 0) { + chan->async_chan = actrl->common_async_chan; + atomic_inc(&actrl->common_achan_refcount); + return 0; + } + + achan = kzalloc(sizeof(*achan), GFP_KERNEL); + if (!achan) + return -ENOMEM; + + ida_init(&achan->job_id_pool); + + ret = ida_alloc_max(&actrl->async_id_pool, MAX_SDM_CLIENT_IDS, + GFP_KERNEL); + if (ret < 0) { + dev_err(ctrl->dev, + "Failed to allocate async client id\n"); + ida_destroy(&achan->job_id_pool); + kfree(achan); + return ret; + } + + achan->async_client_id = ret; + chan->async_chan = achan; + + if (use_unique_clientid && + atomic_read(&actrl->common_achan_refcount) == 0) { + actrl->common_async_chan = achan; + atomic_inc(&actrl->common_achan_refcount); + } + + return 0; +} +EXPORT_SYMBOL_GPL(stratix10_svc_add_async_client); + +/** + * stratix10_svc_remove_async_client - Remove an asynchronous client + * from the Stratix10 service + * channel. + * @chan: Pointer to the Stratix10 service channel structure. + * + * This function removes an asynchronous client associated with the + * given service channel. It checks if the channel and the + * asynchronous channel are valid, and then proceeds to decrement + * the reference count for the common asynchronous channel if + * applicable. If the reference count reaches zero, it destroys the + * job ID pool and deallocates the asynchronous client ID. For + * non-common asynchronous channels, it directly destroys the job ID + * pool, deallocates the asynchronous client ID, and frees the + * memory allocated for the asynchronous channel. + * + * Return: 0 on success, -EINVAL if the channel or asynchronous + * channel is invalid. + */ +int stratix10_svc_remove_async_client(struct stratix10_svc_chan *chan) +{ + struct stratix10_svc_controller *ctrl; + struct stratix10_async_ctrl *actrl; + struct stratix10_async_chan *achan; + + if (!chan) + return -EINVAL; + + ctrl = chan->ctrl; + actrl = &ctrl->actrl; + achan = chan->async_chan; + + if (!achan) { + dev_err(ctrl->dev, "async channel not allocated\n"); + return -EINVAL; + } + + if (achan == actrl->common_async_chan) { + atomic_dec(&actrl->common_achan_refcount); + if (atomic_read(&actrl->common_achan_refcount) == 0) { + ida_destroy(&achan->job_id_pool); + ida_free(&actrl->async_id_pool, + achan->async_client_id); + kfree(achan); + actrl->common_async_chan = NULL; + } + } else { + ida_destroy(&achan->job_id_pool); + ida_free(&actrl->async_id_pool, achan->async_client_id); + kfree(achan); + } + chan->async_chan = NULL; + + return 0; +} +EXPORT_SYMBOL_GPL(stratix10_svc_remove_async_client); + +/** + * stratix10_svc_async_send - Send an asynchronous message to the + * Stratix10 service + * @chan: Pointer to the service channel structure + * @msg: Pointer to the message to be sent + * @handler: Pointer to the handler for the asynchronous message + * used by caller for later reference. + * @cb: Callback function to be called upon completion + * @cb_arg: Argument to be passed to the callback function + * + * This function sends an asynchronous message to the SDM mailbox in + * EL3 secure firmware. It performs various checks and setups, + * including allocating a job ID, setting up the transaction ID and + * packaging it to El3 firmware. The function handles different + * commands by setting up the appropriate arguments for the SMC call. + * If the SMC call is successful, the handler is set up and the + * function returns 0. If the SMC call fails, appropriate error + * handling is performed along with cleanup of resources. + * + * Return: 0 on success, -EINVAL for invalid argument, -ENOMEM if + * memory is not available, -EAGAIN if EL3 firmware is busy, -EBADF + * if the message is rejected by EL3 firmware and -EIO on other + * errors from EL3 firmware. + */ +int stratix10_svc_async_send(struct stratix10_svc_chan *chan, void *msg, + void **handler, async_callback_t cb, void *cb_arg) +{ + struct arm_smccc_1_2_regs args = { 0 }, res = { 0 }; + struct stratix10_svc_async_handler *handle = NULL; + struct stratix10_svc_client_msg *p_msg = + (struct stratix10_svc_client_msg *)msg; + struct stratix10_svc_controller *ctrl; + struct stratix10_async_ctrl *actrl; + struct stratix10_async_chan *achan; + int ret = 0; + + if (!chan || !msg || !handler) + return -EINVAL; + + achan = chan->async_chan; + ctrl = chan->ctrl; + actrl = &ctrl->actrl; + + if (!actrl->initialized) { + dev_err(ctrl->dev, "Async controller not initialized\n"); + return -EINVAL; + } + + if (!achan) { + dev_err(ctrl->dev, "Async channel not allocated\n"); + return -EINVAL; + } + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + ret = ida_alloc_max(&achan->job_id_pool, MAX_SDM_JOB_IDS, + GFP_KERNEL); + if (ret < 0) { + dev_err(ctrl->dev, "Failed to allocate job id\n"); + kfree(handle); + return -ENOMEM; + } + + handle->transaction_id = + STRATIX10_SET_TRANSACTIONID(achan->async_client_id, ret); + handle->cb = cb; + handle->msg = p_msg; + handle->cb_arg = cb_arg; + handle->achan = achan; + + /*set the transaction jobid in args.a1*/ + args.a1 = + STRATIX10_SIP_SMC_SET_TRANSACTIONID_X1(handle->transaction_id); + + switch (p_msg->command) { + case COMMAND_RSU_GET_SPT_TABLE: + args.a0 = INTEL_SIP_SMC_ASYNC_RSU_GET_SPT; + break; + case COMMAND_RSU_STATUS: + args.a0 = INTEL_SIP_SMC_ASYNC_RSU_GET_ERROR_STATUS; + break; + case COMMAND_RSU_NOTIFY: + args.a0 = INTEL_SIP_SMC_ASYNC_RSU_NOTIFY; + args.a2 = p_msg->arg[0]; + break; + default: + dev_err(ctrl->dev, "Invalid command ,%d\n", p_msg->command); + ret = -EINVAL; + goto deallocate_id; + } + + /** + * There is a chance that during the execution of async_send() + * in one core, an interrupt might be received in another core; + * to mitigate this we are adding the handle to the DB and then + * send the smc call. If the smc call is rejected or busy then + * we will deallocate the handle for the client to retry again. + */ + scoped_guard(spinlock_bh, &actrl->trx_list_lock) { + hash_add(actrl->trx_list, &handle->next, + handle->transaction_id); + } + + actrl->invoke_fn(actrl, &args, &res); + + switch (res.a0) { + case INTEL_SIP_SMC_STATUS_OK: + dev_dbg(ctrl->dev, + "Async message sent with transaction_id 0x%02x\n", + handle->transaction_id); + *handler = handle; + return 0; + case INTEL_SIP_SMC_STATUS_BUSY: + dev_warn(ctrl->dev, "Mailbox is busy, try after some time\n"); + ret = -EAGAIN; + break; + case INTEL_SIP_SMC_STATUS_REJECTED: + dev_err(ctrl->dev, "Async message rejected\n"); + ret = -EBADF; + break; + default: + dev_err(ctrl->dev, + "Failed to send async message ,got status as %ld\n", + res.a0); + ret = -EIO; + } + + scoped_guard(spinlock_bh, &actrl->trx_list_lock) { + hash_del(&handle->next); + } + +deallocate_id: + ida_free(&achan->job_id_pool, + STRATIX10_GET_JOBID(handle->transaction_id)); + kfree(handle); + return ret; +} +EXPORT_SYMBOL_GPL(stratix10_svc_async_send); + +/** + * stratix10_svc_async_prepare_response - Prepare the response data for + * an asynchronous transaction. + * @chan: Pointer to the service channel structure. + * @handle: Pointer to the asynchronous handler structure. + * @data: Pointer to the callback data structure. + * + * This function prepares the response data for an asynchronous transaction. It + * extracts the response data from the SMC response structure and stores it in + * the callback data structure. The function also logs the completion of the + * asynchronous transaction. + * + * Return: 0 on success, -ENOENT if the command is invalid + */ +static int stratix10_svc_async_prepare_response(struct stratix10_svc_chan *chan, + struct stratix10_svc_async_handler *handle, + struct stratix10_svc_cb_data *data) +{ + struct stratix10_svc_client_msg *p_msg = + (struct stratix10_svc_client_msg *)handle->msg; + struct stratix10_svc_controller *ctrl = chan->ctrl; + + data->status = STRATIX10_GET_SDM_STATUS_CODE(handle->res.a1); + + switch (p_msg->command) { + case COMMAND_RSU_NOTIFY: + break; + case COMMAND_RSU_GET_SPT_TABLE: + data->kaddr1 = (void *)&handle->res.a2; + data->kaddr2 = (void *)&handle->res.a3; + break; + case COMMAND_RSU_STATUS: + /* COMMAND_RSU_STATUS has more elements than the cb_data + * can acomodate, so passing the response structure to the + * response function to be handled before done command is + * executed by the client. + */ + data->kaddr1 = (void *)&handle->res; + break; + + default: + dev_alert(ctrl->dev, "Invalid command\n ,%d", p_msg->command); + return -ENOENT; + } + dev_dbg(ctrl->dev, "Async message completed transaction_id 0x%02x\n", + handle->transaction_id); + return 0; +} + +/** + * stratix10_svc_async_poll - Polls the status of an asynchronous + * transaction. + * @chan: Pointer to the service channel structure. + * @tx_handle: Handle to the transaction being polled. + * @data: Pointer to the callback data structure. + * + * This function polls the status of an asynchronous transaction + * identified by the given transaction handle. It ensures that the + * necessary structures are initialized and valid before proceeding + * with the poll operation. The function sets up the necessary + * arguments for the SMC call, invokes the call, and prepares the + * response data if the call is successful. If the call fails, the + * function returns the error mapped to the SVC status error. + * + * Return: 0 on success, -EINVAL if any input parameter is invalid, + * -EAGAIN if the transaction is still in progress, + * -EPERM if the command is invalid, or other negative + * error codes on failure. + */ +int stratix10_svc_async_poll(struct stratix10_svc_chan *chan, + void *tx_handle, + struct stratix10_svc_cb_data *data) +{ + struct stratix10_svc_async_handler *handle; + struct arm_smccc_1_2_regs args = { 0 }; + struct stratix10_svc_controller *ctrl; + struct stratix10_async_ctrl *actrl; + struct stratix10_async_chan *achan; + int ret; + + if (!chan || !tx_handle || !data) + return -EINVAL; + + ctrl = chan->ctrl; + actrl = &ctrl->actrl; + achan = chan->async_chan; + + if (!achan) { + dev_err(ctrl->dev, "Async channel not allocated\n"); + return -EINVAL; + } + + handle = (struct stratix10_svc_async_handler *)tx_handle; + scoped_guard(spinlock_bh, &actrl->trx_list_lock) { + if (!hash_hashed(&handle->next)) { + dev_err(ctrl->dev, "Invalid transaction handler"); + return -EINVAL; + } + } + + args.a0 = INTEL_SIP_SMC_ASYNC_POLL; + args.a1 = + STRATIX10_SIP_SMC_SET_TRANSACTIONID_X1(handle->transaction_id); + + actrl->invoke_fn(actrl, &args, &handle->res); + + /*clear data for response*/ + memset(data, 0, sizeof(*data)); + + if (handle->res.a0 == INTEL_SIP_SMC_STATUS_OK) { + ret = stratix10_svc_async_prepare_response(chan, handle, data); + if (ret) { + dev_err(ctrl->dev, "Error in preparation of response,%d\n", ret); + WARN_ON_ONCE(1); + } + return 0; + } else if (handle->res.a0 == INTEL_SIP_SMC_STATUS_BUSY) { + dev_dbg(ctrl->dev, "async message is still in progress\n"); + return -EAGAIN; + } + + dev_err(ctrl->dev, + "Failed to poll async message ,got status as %ld\n", + handle->res.a0); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(stratix10_svc_async_poll); + +/** + * stratix10_svc_async_done - Completes an asynchronous transaction. + * @chan: Pointer to the service channel structure. + * @tx_handle: Handle to the transaction being completed. + * + * This function completes an asynchronous transaction identified by + * the given transaction handle. It ensures that the necessary + * structures are initialized and valid before proceeding with the + * completion operation. The function deallocates the transaction ID, + * frees the memory allocated for the handler, and removes the handler + * from the transaction list. + * + * Return: 0 on success, -EINVAL if any input parameter is invalid, + * or other negative error codes on failure. + */ +int stratix10_svc_async_done(struct stratix10_svc_chan *chan, void *tx_handle) +{ + struct stratix10_svc_async_handler *handle; + struct stratix10_svc_controller *ctrl; + struct stratix10_async_chan *achan; + struct stratix10_async_ctrl *actrl; + + if (!chan || !tx_handle) + return -EINVAL; + + ctrl = chan->ctrl; + achan = chan->async_chan; + actrl = &ctrl->actrl; + + if (!achan) { + dev_err(ctrl->dev, "async channel not allocated\n"); + return -EINVAL; + } + + handle = (struct stratix10_svc_async_handler *)tx_handle; + scoped_guard(spinlock_bh, &actrl->trx_list_lock) { + if (!hash_hashed(&handle->next)) { + dev_err(ctrl->dev, "Invalid transaction handle"); + return -EINVAL; + } + hash_del(&handle->next); + } + ida_free(&achan->job_id_pool, + STRATIX10_GET_JOBID(handle->transaction_id)); + kfree(handle); + return 0; +} +EXPORT_SYMBOL_GPL(stratix10_svc_async_done); + +static inline void stratix10_smc_1_2(struct stratix10_async_ctrl *actrl, + const struct arm_smccc_1_2_regs *args, + struct arm_smccc_1_2_regs *res) +{ + arm_smccc_1_2_smc(args, res); +} + +/** + * stratix10_svc_async_init - Initialize the Stratix10 service + * controller for asynchronous operations. + * @controller: Pointer to the Stratix10 service controller structure. + * + * This function initializes the asynchronous service controller by + * setting up the necessary data structures and initializing the + * transaction list. + * + * Return: 0 on success, -EINVAL if the controller is NULL or already + * initialized, -ENOMEM if memory allocation fails, + * -EADDRINUSE if the client ID is already reserved, or other + * negative error codes on failure. + */ +static int stratix10_svc_async_init(struct stratix10_svc_controller *controller) +{ + struct stratix10_async_ctrl *actrl; + struct arm_smccc_res res; + struct device *dev; + int ret; + + if (!controller) + return -EINVAL; + + actrl = &controller->actrl; + + if (actrl->initialized) + return -EINVAL; + + dev = controller->dev; + + controller->invoke_fn(INTEL_SIP_SMC_SVC_VERSION, 0, 0, 0, 0, 0, 0, 0, &res); + if (res.a0 != INTEL_SIP_SMC_STATUS_OK || + !(res.a1 > ASYNC_ATF_MINIMUM_MAJOR_VERSION || + (res.a1 == ASYNC_ATF_MINIMUM_MAJOR_VERSION && + res.a2 >= ASYNC_ATF_MINIMUM_MINOR_VERSION))) { + dev_err(dev, + "Intel Service Layer Driver: ATF version is not compatible for async operation\n"); + return -EINVAL; + } + + actrl->invoke_fn = stratix10_smc_1_2; + + ida_init(&actrl->async_id_pool); + + /** + * SIP_SVC_V1_CLIENT_ID is used by V1/stratix10_svc_send() clients + * for communicating with SDM synchronously. We need to restrict + * this in V3/stratix10_svc_async_send() usage to distinguish + * between V1 and V3 messages in El3 firmware. + */ + ret = ida_alloc_range(&actrl->async_id_pool, SIP_SVC_V1_CLIENT_ID, + SIP_SVC_V1_CLIENT_ID, GFP_KERNEL); + if (ret < 0) { + dev_err(dev, + "Intel Service Layer Driver: Error on reserving SIP_SVC_V1_CLIENT_ID\n"); + ida_destroy(&actrl->async_id_pool); + actrl->invoke_fn = NULL; + return -EADDRINUSE; + } + + spin_lock_init(&actrl->trx_list_lock); + hash_init(actrl->trx_list); + atomic_set(&actrl->common_achan_refcount, 0); + + actrl->initialized = true; + return 0; +} + +/** + * stratix10_svc_async_exit - Clean up and exit the asynchronous + * service controller + * @ctrl: Pointer to the stratix10_svc_controller structure + * + * This function performs the necessary cleanup for the asynchronous + * service controller. It checks if the controller is valid and if it + * has been initialized. It then locks the transaction list and safely + * removes and deallocates each handler in the list. The function also + * removes any asynchronous clients associated with the controller's + * channels and destroys the asynchronous ID pool. Finally, it resets + * the asynchronous ID pool and invoke function pointers to NULL. + * + * Return: 0 on success, -EINVAL if the controller is invalid or not + * initialized. + */ +static int stratix10_svc_async_exit(struct stratix10_svc_controller *ctrl) +{ + struct stratix10_svc_async_handler *handler; + struct stratix10_async_ctrl *actrl; + struct hlist_node *tmp; + int i; + + if (!ctrl) + return -EINVAL; + + actrl = &ctrl->actrl; + + if (!actrl->initialized) + return -EINVAL; + + actrl->initialized = false; + + scoped_guard(spinlock_bh, &actrl->trx_list_lock) { + hash_for_each_safe(actrl->trx_list, i, tmp, handler, next) { + ida_free(&handler->achan->job_id_pool, + STRATIX10_GET_JOBID(handler->transaction_id)); + hash_del(&handler->next); + kfree(handler); + } + } + + for (i = 0; i < SVC_NUM_CHANNEL; i++) { + if (ctrl->chans[i].async_chan) { + stratix10_svc_remove_async_client(&ctrl->chans[i]); + ctrl->chans[i].async_chan = NULL; + } + } + + ida_destroy(&actrl->async_id_pool); + actrl->invoke_fn = NULL; + + return 0; +} + +/** * stratix10_svc_free_channel() - free service channel * @chan: service channel to be freed * @@ -990,6 +1722,7 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg) p_data->flag = ct->flags; } } else { + guard(mutex)(&svc_mem_lock); list_for_each_entry(p_mem, &svc_data_mem, node) if (p_mem->vaddr == p_msg->payload) { p_data->paddr = p_mem->paddr; @@ -1072,6 +1805,7 @@ void *stratix10_svc_allocate_memory(struct stratix10_svc_chan *chan, if (!pmem) return ERR_PTR(-ENOMEM); + guard(mutex)(&svc_mem_lock); va = gen_pool_alloc(genpool, s); if (!va) return ERR_PTR(-ENOMEM); @@ -1100,6 +1834,7 @@ EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory); void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr) { struct stratix10_svc_data_mem *pmem; + guard(mutex)(&svc_mem_lock); list_for_each_entry(pmem, &svc_data_mem, node) if (pmem->vaddr == kaddr) { @@ -1174,11 +1909,18 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) controller->invoke_fn = invoke_fn; init_completion(&controller->complete_status); + ret = stratix10_svc_async_init(controller); + if (ret) { + dev_dbg(dev, "Intel Service Layer Driver: Error on stratix10_svc_async_init %d\n", + ret); + goto err_destroy_pool; + } + fifo_size = sizeof(struct stratix10_svc_data) * SVC_NUM_DATA_IN_FIFO; ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL); if (ret) { dev_err(dev, "failed to allocate FIFO\n"); - goto err_destroy_pool; + goto err_async_exit; } spin_lock_init(&controller->svc_fifo_lock); @@ -1197,6 +1939,11 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) chans[2].name = SVC_CLIENT_FCS; spin_lock_init(&chans[2].lock); + chans[3].scl = NULL; + chans[3].ctrl = controller; + chans[3].name = SVC_CLIENT_HWMON; + spin_lock_init(&chans[3].lock); + list_add_tail(&controller->node, &svc_ctrl); platform_set_drvdata(pdev, controller); @@ -1206,6 +1953,7 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) ret = -ENOMEM; goto err_free_kfifo; } + controller->svc = svc; svc->stratix10_svc_rsu = platform_device_alloc(STRATIX10_RSU, 0); if (!svc->stratix10_svc_rsu) { @@ -1237,8 +1985,6 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) if (ret) goto err_unregister_fcs_dev; - dev_set_drvdata(dev, svc); - pr_info("Intel Service Layer Driver Initialized\n"); return 0; @@ -1249,6 +1995,8 @@ err_unregister_rsu_dev: platform_device_unregister(svc->stratix10_svc_rsu); err_free_kfifo: kfifo_free(&controller->svc_fifo); +err_async_exit: + stratix10_svc_async_exit(controller); err_destroy_pool: gen_pool_destroy(genpool); return ret; @@ -1256,8 +2004,10 @@ err_destroy_pool: static void stratix10_svc_drv_remove(struct platform_device *pdev) { - struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev); struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev); + struct stratix10_svc *svc = ctrl->svc; + + stratix10_svc_async_exit(ctrl); of_platform_depopulate(ctrl->dev); diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 49fd2ae01055..e027a2bd8f26 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -398,6 +398,9 @@ static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo, static inline int ti_sci_do_xfer(struct ti_sci_info *info, struct ti_sci_xfer *xfer) { + struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; + bool response_expected = !!(hdr->flags & (TI_SCI_FLAG_REQ_ACK_ON_PROCESSED | + TI_SCI_FLAG_REQ_ACK_ON_RECEIVED)); int ret; int timeout; struct device *dev = info->dev; @@ -409,12 +412,12 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info, ret = 0; - if (system_state <= SYSTEM_RUNNING) { + if (response_expected && system_state <= SYSTEM_RUNNING) { /* And we wait for the response. */ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); if (!wait_for_completion_timeout(&xfer->done, timeout)) ret = -ETIMEDOUT; - } else { + } else if (response_expected) { /* * If we are !running, we cannot use wait_for_completion_timeout * during noirq phase, so we must manually poll the completion. @@ -1670,6 +1673,9 @@ fail: static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode, u32 ctx_lo, u32 ctx_hi, u32 debug_flags) { + u32 msg_flags = mode == TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO ? + TI_SCI_FLAG_REQ_GENERIC_NORESPONSE : + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED; struct ti_sci_info *info; struct ti_sci_msg_req_prepare_sleep *req; struct ti_sci_msg_hdr *resp; @@ -1686,7 +1692,7 @@ static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode, dev = info->dev; xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PREPARE_SLEEP, - TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + msg_flags, sizeof(*req), sizeof(*resp)); if (IS_ERR(xfer)) { ret = PTR_ERR(xfer); @@ -1706,11 +1712,12 @@ static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode, goto fail; } - resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; - - if (!ti_sci_is_response_ack(resp)) { - dev_err(dev, "Failed to prepare sleep\n"); - ret = -ENODEV; + if (msg_flags == TI_SCI_FLAG_REQ_ACK_ON_PROCESSED) { + resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; + if (!ti_sci_is_response_ack(resp)) { + dev_err(dev, "Failed to prepare sleep\n"); + ret = -ENODEV; + } } fail: @@ -3664,6 +3671,78 @@ devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev, } EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource); +/* + * Iterate all device nodes that have a wakeup-source property and check if one + * of the possible phandles points to a Partial-IO system state. If it + * does resolve the device node to an actual device and check if wakeup is + * enabled. + */ +static bool ti_sci_partial_io_wakeup_enabled(struct ti_sci_info *info) +{ + struct device_node *wakeup_node = NULL; + + for_each_node_with_property(wakeup_node, "wakeup-source") { + struct of_phandle_iterator it; + int err; + + of_for_each_phandle(&it, err, wakeup_node, "wakeup-source", NULL, 0) { + struct platform_device *pdev; + bool may_wakeup; + + /* + * Continue if idle-state-name is not off-wake. Return + * value is the index of the string which should be 0 if + * off-wake is present. + */ + if (of_property_match_string(it.node, "idle-state-name", "off-wake")) + continue; + + pdev = of_find_device_by_node(wakeup_node); + if (!pdev) + continue; + + may_wakeup = device_may_wakeup(&pdev->dev); + put_device(&pdev->dev); + + if (may_wakeup) { + dev_dbg(info->dev, "%pOF identified as wakeup source for Partial-IO\n", + wakeup_node); + of_node_put(it.node); + of_node_put(wakeup_node); + return true; + } + } + } + + return false; +} + +static int ti_sci_sys_off_handler(struct sys_off_data *data) +{ + struct ti_sci_info *info = data->cb_data; + const struct ti_sci_handle *handle = &info->handle; + bool enter_partial_io = ti_sci_partial_io_wakeup_enabled(info); + int ret; + + if (!enter_partial_io) + return NOTIFY_DONE; + + dev_info(info->dev, "Entering Partial-IO because a powered wakeup-enabled device was found.\n"); + + ret = ti_sci_cmd_prepare_sleep(handle, TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO, 0, 0, 0); + if (ret) { + dev_err(info->dev, + "Failed to enter Partial-IO %pe, trying to do an emergency restart\n", + ERR_PTR(ret)); + emergency_restart(); + } + + mdelay(5000); + emergency_restart(); + + return NOTIFY_DONE; +} + static int tisci_reboot_handler(struct sys_off_data *data) { struct ti_sci_info *info = data->cb_data; @@ -3706,7 +3785,7 @@ static int ti_sci_prepare_system_suspend(struct ti_sci_info *info) } } -static int __maybe_unused ti_sci_suspend(struct device *dev) +static int ti_sci_suspend(struct device *dev) { struct ti_sci_info *info = dev_get_drvdata(dev); struct device *cpu_dev, *cpu_dev_max = NULL; @@ -3746,19 +3825,21 @@ static int __maybe_unused ti_sci_suspend(struct device *dev) return 0; } -static int __maybe_unused ti_sci_suspend_noirq(struct device *dev) +static int ti_sci_suspend_noirq(struct device *dev) { struct ti_sci_info *info = dev_get_drvdata(dev); int ret = 0; - ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE); - if (ret) - return ret; + if (info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION) { + ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE); + if (ret) + return ret; + } return 0; } -static int __maybe_unused ti_sci_resume_noirq(struct device *dev) +static int ti_sci_resume_noirq(struct device *dev) { struct ti_sci_info *info = dev_get_drvdata(dev); int ret = 0; @@ -3767,9 +3848,11 @@ static int __maybe_unused ti_sci_resume_noirq(struct device *dev) u8 pin; u8 mode; - ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE); - if (ret) - return ret; + if (info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION) { + ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE); + if (ret) + return ret; + } ret = ti_sci_msg_cmd_lpm_wake_reason(&info->handle, &source, &time, &pin, &mode); /* Do not fail to resume on error as the wake reason is not critical */ @@ -3780,7 +3863,7 @@ static int __maybe_unused ti_sci_resume_noirq(struct device *dev) return 0; } -static void __maybe_unused ti_sci_pm_complete(struct device *dev) +static void ti_sci_pm_complete(struct device *dev) { struct ti_sci_info *info = dev_get_drvdata(dev); @@ -3791,12 +3874,10 @@ static void __maybe_unused ti_sci_pm_complete(struct device *dev) } static const struct dev_pm_ops ti_sci_pm_ops = { -#ifdef CONFIG_PM_SLEEP - .suspend = ti_sci_suspend, - .suspend_noirq = ti_sci_suspend_noirq, - .resume_noirq = ti_sci_resume_noirq, - .complete = ti_sci_pm_complete, -#endif + .suspend = pm_sleep_ptr(ti_sci_suspend), + .suspend_noirq = pm_sleep_ptr(ti_sci_suspend_noirq), + .resume_noirq = pm_sleep_ptr(ti_sci_resume_noirq), + .complete = pm_sleep_ptr(ti_sci_pm_complete), }; /* Description for K2G */ @@ -3928,11 +4009,12 @@ static int ti_sci_probe(struct platform_device *pdev) } ti_sci_msg_cmd_query_fw_caps(&info->handle, &info->fw_caps); - dev_dbg(dev, "Detected firmware capabilities: %s%s%s%s\n", + dev_dbg(dev, "Detected firmware capabilities: %s%s%s%s%s\n", info->fw_caps & MSG_FLAG_CAPS_GENERIC ? "Generic" : "", info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO ? " Partial-IO" : "", info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : "", - info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT ? " LPM-Abort" : "" + info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT ? " LPM-Abort" : "", + info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION ? " IO-Isolation" : "" ); ti_sci_setup_ops(info); @@ -3943,6 +4025,19 @@ static int ti_sci_probe(struct platform_device *pdev) goto out; } + if (info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO) { + ret = devm_register_sys_off_handler(dev, + SYS_OFF_MODE_POWER_OFF, + SYS_OFF_PRIO_FIRMWARE, + ti_sci_sys_off_handler, + info); + if (ret) { + dev_err(dev, "Failed to register sys_off_handler %pe\n", + ERR_PTR(ret)); + goto out; + } + } + dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n", info->handle.version.abi_major, info->handle.version.abi_minor, info->handle.version.firmware_revision, @@ -3952,7 +4047,13 @@ static int ti_sci_probe(struct platform_device *pdev) list_add_tail(&info->node, &ti_sci_list); mutex_unlock(&ti_sci_list_mutex); - return of_platform_populate(dev->of_node, NULL, NULL, dev); + ret = of_platform_populate(dev->of_node, NULL, NULL, dev); + if (ret) { + dev_err(dev, "platform_populate failed %pe\n", ERR_PTR(ret)); + goto out; + } + return 0; + out: if (!IS_ERR(info->chan_tx)) mbox_free_channel(info->chan_tx); diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h index 701c416b2e78..91f234550c43 100644 --- a/drivers/firmware/ti_sci.h +++ b/drivers/firmware/ti_sci.h @@ -149,6 +149,7 @@ struct ti_sci_msg_req_reboot { * MSG_FLAG_CAPS_LPM_PARTIAL_IO: Partial IO in LPM * MSG_FLAG_CAPS_LPM_DM_MANAGED: LPM can be managed by DM * MSG_FLAG_CAPS_LPM_ABORT: Abort entry to LPM + * MSG_FLAG_CAPS_IO_ISOLATION: IO Isolation support * * Response to a generic message with message type TI_SCI_MSG_QUERY_FW_CAPS * providing currently available SOC/firmware capabilities. SoC that don't @@ -160,6 +161,7 @@ struct ti_sci_msg_resp_query_fw_caps { #define MSG_FLAG_CAPS_LPM_PARTIAL_IO TI_SCI_MSG_FLAG(4) #define MSG_FLAG_CAPS_LPM_DM_MANAGED TI_SCI_MSG_FLAG(5) #define MSG_FLAG_CAPS_LPM_ABORT TI_SCI_MSG_FLAG(9) +#define MSG_FLAG_CAPS_IO_ISOLATION TI_SCI_MSG_FLAG(7) #define MSG_MASK_CAPS_LPM GENMASK_ULL(4, 1) u64 fw_caps; } __packed; @@ -595,6 +597,11 @@ struct ti_sci_msg_resp_get_clock_freq { struct ti_sci_msg_req_prepare_sleep { struct ti_sci_msg_hdr hdr; +/* + * When sending prepare_sleep with MODE_PARTIAL_IO no response will be sent, + * no further steps are required. + */ +#define TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO 0x03 #define TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED 0xfd u8 mode; u32 ctx_lo; diff --git a/drivers/firmware/xilinx/Makefile b/drivers/firmware/xilinx/Makefile index 875a53703c82..70f8f02f14a3 100644 --- a/drivers/firmware/xilinx/Makefile +++ b/drivers/firmware/xilinx/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for Xilinx firmwares -obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o +obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o zynqmp-ufs.o obj-$(CONFIG_ZYNQMP_FIRMWARE_DEBUG) += zynqmp-debug.o diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c index 22853ae0efdf..36efb827f3da 100644 --- a/drivers/firmware/xilinx/zynqmp-debug.c +++ b/drivers/firmware/xilinx/zynqmp-debug.c @@ -3,6 +3,7 @@ * Xilinx Zynq MPSoC Firmware layer for debugfs APIs * * Copyright (C) 2014-2018 Xilinx, Inc. + * Copyright (C) 2022 - 2025 Advanced Micro Devices, Inc. * * Michal Simek <michal.simek@amd.com> * Davorin Mista <davorin.mista@aggios.com> @@ -38,6 +39,7 @@ static struct pm_api_info pm_api_list[] = { PM_API(PM_RELEASE_NODE), PM_API(PM_SET_REQUIREMENT), PM_API(PM_GET_API_VERSION), + PM_API(PM_GET_NODE_STATUS), PM_API(PM_REGISTER_NOTIFIER), PM_API(PM_RESET_ASSERT), PM_API(PM_RESET_GET_STATUS), @@ -167,6 +169,17 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret) pm_api_arg[3] ? pm_api_arg[3] : ZYNQMP_PM_REQUEST_ACK_BLOCKING); break; + case PM_GET_NODE_STATUS: + ret = zynqmp_pm_get_node_status(pm_api_arg[0], + &pm_api_ret[0], + &pm_api_ret[1], + &pm_api_ret[2]); + if (!ret) + sprintf(debugfs_buf, + "GET_NODE_STATUS:\n\tNodeId: %llu\n\tStatus: %u\n\tRequirements: %u\n\tUsage: %u\n", + pm_api_arg[0], pm_api_ret[0], + pm_api_ret[1], pm_api_ret[2]); + break; case PM_REGISTER_NOTIFIER: ret = zynqmp_pm_register_notifier(pm_api_arg[0], pm_api_arg[1] ? diff --git a/drivers/firmware/xilinx/zynqmp-ufs.c b/drivers/firmware/xilinx/zynqmp-ufs.c new file mode 100644 index 000000000000..85da8a822f3a --- /dev/null +++ b/drivers/firmware/xilinx/zynqmp-ufs.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Firmware Layer for UFS APIs + * + * Copyright (C) 2025 Advanced Micro Devices, Inc. + */ + +#include <linux/firmware/xlnx-zynqmp.h> +#include <linux/module.h> + +/* Register Node IDs */ +#define PM_REGNODE_PMC_IOU_SLCR 0x30000002 /* PMC IOU SLCR */ +#define PM_REGNODE_EFUSE_CACHE 0x30000003 /* EFUSE Cache */ + +/* Register Offsets for PMC IOU SLCR */ +#define SRAM_CSR_OFFSET 0x104C /* SRAM Control and Status */ +#define TXRX_CFGRDY_OFFSET 0x1054 /* M-PHY TX-RX Config ready */ + +/* Masks for SRAM Control and Status Register */ +#define SRAM_CSR_INIT_DONE_MASK BIT(0) /* SRAM initialization done */ +#define SRAM_CSR_EXT_LD_DONE_MASK BIT(1) /* SRAM External load done */ +#define SRAM_CSR_BYPASS_MASK BIT(2) /* Bypass SRAM interface */ + +/* Mask to check M-PHY TX-RX configuration readiness */ +#define TX_RX_CFG_RDY_MASK GENMASK(3, 0) + +/* Register Offsets for EFUSE Cache */ +#define UFS_CAL_1_OFFSET 0xBE8 /* UFS Calibration Value */ + +/** + * zynqmp_pm_is_mphy_tx_rx_config_ready - check M-PHY TX-RX config readiness + * @is_ready: Store output status (true/false) + * + * Return: Returns 0 on success or error value on failure. + */ +int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready) +{ + u32 regval; + int ret; + + if (!is_ready) + return -EINVAL; + + ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, TXRX_CFGRDY_OFFSET, ®val); + if (ret) + return ret; + + regval &= TX_RX_CFG_RDY_MASK; + if (regval) + *is_ready = true; + else + *is_ready = false; + + return ret; +} +EXPORT_SYMBOL_GPL(zynqmp_pm_is_mphy_tx_rx_config_ready); + +/** + * zynqmp_pm_is_sram_init_done - check SRAM initialization + * @is_done: Store output status (true/false) + * + * Return: Returns 0 on success or error value on failure. + */ +int zynqmp_pm_is_sram_init_done(bool *is_done) +{ + u32 regval; + int ret; + + if (!is_done) + return -EINVAL; + + ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, ®val); + if (ret) + return ret; + + regval &= SRAM_CSR_INIT_DONE_MASK; + if (regval) + *is_done = true; + else + *is_done = false; + + return ret; +} +EXPORT_SYMBOL_GPL(zynqmp_pm_is_sram_init_done); + +/** + * zynqmp_pm_set_sram_bypass - Set SRAM bypass Control + * + * Return: Returns 0 on success or error value on failure. + */ +int zynqmp_pm_set_sram_bypass(void) +{ + u32 sram_csr; + int ret; + + ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, &sram_csr); + if (ret) + return ret; + + sram_csr &= ~SRAM_CSR_EXT_LD_DONE_MASK; + sram_csr |= SRAM_CSR_BYPASS_MASK; + + return zynqmp_pm_sec_mask_write_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, + GENMASK(2, 1), sram_csr); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_set_sram_bypass); + +/** + * zynqmp_pm_get_ufs_calibration_values - Read UFS calibration values + * @val: Store the calibration value + * + * Return: Returns 0 on success or error value on failure. + */ +int zynqmp_pm_get_ufs_calibration_values(u32 *val) +{ + return zynqmp_pm_sec_read_reg(PM_REGNODE_EFUSE_CACHE, UFS_CAL_1_OFFSET, val); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_get_ufs_calibration_values); diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 02da3e48bc8f..ad811f40e059 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -3,7 +3,7 @@ * Xilinx Zynq MPSoC Firmware layer * * Copyright (C) 2014-2022 Xilinx, Inc. - * Copyright (C) 2022 - 2024, Advanced Micro Devices, Inc. + * Copyright (C) 2022 - 2025 Advanced Micro Devices, Inc. * * Michal Simek <michal.simek@amd.com> * Davorin Mista <davorin.mista@aggios.com> @@ -72,6 +72,15 @@ struct pm_api_feature_data { struct hlist_node hentry; }; +struct platform_fw_data { + /* + * Family code for platform. + */ + const u32 family_code; +}; + +static struct platform_fw_data *active_platform_fw_data; + static const struct mfd_cell firmware_devs[] = { { .name = "zynqmp_power_controller", @@ -464,8 +473,6 @@ int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...) static u32 pm_api_version; static u32 pm_tz_version; -static u32 pm_family_code; -static u32 pm_sub_family_code; int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset) { @@ -532,32 +539,18 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_chipid); /** * zynqmp_pm_get_family_info() - Get family info of platform * @family: Returned family code value - * @subfamily: Returned sub-family code value * * Return: Returns status, either success or error+reason */ -int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily) +int zynqmp_pm_get_family_info(u32 *family) { - u32 ret_payload[PAYLOAD_ARG_CNT]; - u32 idcode; - int ret; + if (!active_platform_fw_data) + return -ENODEV; - /* Check is family or sub-family code already received */ - if (pm_family_code && pm_sub_family_code) { - *family = pm_family_code; - *subfamily = pm_sub_family_code; - return 0; - } - - ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, ret_payload, 0); - if (ret < 0) - return ret; + if (!family) + return -EINVAL; - idcode = ret_payload[1]; - pm_family_code = FIELD_GET(FAMILY_CODE_MASK, idcode); - pm_sub_family_code = FIELD_GET(SUB_FAMILY_CODE_MASK, idcode); - *family = pm_family_code; - *subfamily = pm_sub_family_code; + *family = active_platform_fw_data->family_code; return 0; } @@ -1238,8 +1231,13 @@ int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param, u32 value) { int ret; + u32 pm_family_code; + + ret = zynqmp_pm_get_family_info(&pm_family_code); + if (ret) + return ret; - if (pm_family_code == ZYNQMP_FAMILY_CODE && + if (pm_family_code == PM_ZYNQMP_FAMILY_CODE && param == PM_PINCTRL_CONFIG_TRI_STATE) { ret = zynqmp_pm_feature(PM_PINCTRL_CONFIG_PARAM_SET); if (ret < PM_PINCTRL_PARAM_SET_VERSION) { @@ -1414,6 +1412,45 @@ int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode) EXPORT_SYMBOL_GPL(zynqmp_pm_set_tcm_config); /** + * zynqmp_pm_get_node_status - PM call to request a node's current power state + * @node: ID of the component or sub-system in question + * @status: Current operating state of the requested node + * @requirements: Current requirements asserted on the node, + * used for slave nodes only. + * @usage: Usage information, used for slave nodes only: + * PM_USAGE_NO_MASTER - No master is currently using + * the node + * PM_USAGE_CURRENT_MASTER - Only requesting master is + * currently using the node + * PM_USAGE_OTHER_MASTER - Only other masters are + * currently using the node + * PM_USAGE_BOTH_MASTERS - Both the current and at least + * one other master is currently + * using the node + * + * Return: Returns status, either success or error+reason + */ +int zynqmp_pm_get_node_status(const u32 node, u32 *const status, + u32 *const requirements, u32 *const usage) +{ + u32 ret_payload[PAYLOAD_ARG_CNT]; + int ret; + + if (!status || !requirements || !usage) + return -EINVAL; + + ret = zynqmp_pm_invoke_fn(PM_GET_NODE_STATUS, ret_payload, 1, node); + if (ret_payload[0] == XST_PM_SUCCESS) { + *status = ret_payload[1]; + *requirements = ret_payload[2]; + *usage = ret_payload[3]; + } + + return ret; +} +EXPORT_SYMBOL_GPL(zynqmp_pm_get_node_status); + +/** * zynqmp_pm_force_pwrdwn - PM call to request for another PU or subsystem to * be powered down forcefully * @node: Node ID of the targeted PU or subsystem @@ -1617,6 +1654,52 @@ int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, } /** + * zynqmp_pm_sec_read_reg - PM call to securely read from given offset + * of the node + * @node_id: Node Id of the device + * @offset: Offset to be used (20-bit) + * @ret_value: Output data read from the given offset after + * firmware access policy is successfully enforced + * + * Return: Returns 0 on success or error value on failure + */ +int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value) +{ + u32 ret_payload[PAYLOAD_ARG_CNT]; + u32 count = 1; + int ret; + + if (!ret_value) + return -EINVAL; + + ret = zynqmp_pm_invoke_fn(PM_IOCTL, ret_payload, 4, node_id, IOCTL_READ_REG, + offset, count); + + *ret_value = ret_payload[1]; + + return ret; +} +EXPORT_SYMBOL_GPL(zynqmp_pm_sec_read_reg); + +/** + * zynqmp_pm_sec_mask_write_reg - PM call to securely write to given offset + * of the node + * @node_id: Node Id of the device + * @offset: Offset to be used (20-bit) + * @mask: Mask to be used + * @value: Value to be written + * + * Return: Returns 0 on success or error value on failure + */ +int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset, u32 mask, + u32 value) +{ + return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 5, node_id, IOCTL_MASK_WRITE_REG, + offset, mask, value); +} +EXPORT_SYMBOL_GPL(zynqmp_pm_sec_mask_write_reg); + +/** * zynqmp_pm_set_sd_config - PM call to set value of SD config registers * @node: SD node ID * @config: The config type of SD registers @@ -2007,12 +2090,18 @@ static int zynqmp_firmware_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct zynqmp_devinfo *devinfo; + u32 pm_family_code; int ret; ret = get_set_conduit_method(dev->of_node); if (ret) return ret; + /* Get platform-specific firmware data from device tree match */ + active_platform_fw_data = (struct platform_fw_data *)device_get_match_data(dev); + if (!active_platform_fw_data) + return -EINVAL; + /* Get SiP SVC version number */ ret = zynqmp_pm_get_sip_svc_version(&sip_svc_version); if (ret) @@ -2045,8 +2134,8 @@ static int zynqmp_firmware_probe(struct platform_device *pdev) pr_info("%s Platform Management API v%d.%d\n", __func__, pm_api_version >> 16, pm_api_version & 0xFFFF); - /* Get the Family code and sub family code of platform */ - ret = zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code); + /* Get the Family code of platform */ + ret = zynqmp_pm_get_family_info(&pm_family_code); if (ret < 0) return ret; @@ -2073,7 +2162,7 @@ static int zynqmp_firmware_probe(struct platform_device *pdev) zynqmp_pm_api_debugfs_init(); - if (pm_family_code == VERSAL_FAMILY_CODE) { + if (pm_family_code != PM_ZYNQMP_FAMILY_CODE) { em_dev = platform_device_register_data(&pdev->dev, "xlnx_event_manager", -1, NULL, 0); if (IS_ERR(em_dev)) @@ -2113,9 +2202,22 @@ static void zynqmp_firmware_sync_state(struct device *dev) dev_warn(dev, "failed to release power management to firmware\n"); } +static const struct platform_fw_data platform_fw_data_versal = { + .family_code = PM_VERSAL_FAMILY_CODE, +}; + +static const struct platform_fw_data platform_fw_data_versal_net = { + .family_code = PM_VERSAL_NET_FAMILY_CODE, +}; + +static const struct platform_fw_data platform_fw_data_zynqmp = { + .family_code = PM_ZYNQMP_FAMILY_CODE, +}; + static const struct of_device_id zynqmp_firmware_of_match[] = { - {.compatible = "xlnx,zynqmp-firmware"}, - {.compatible = "xlnx,versal-firmware"}, + {.compatible = "xlnx,zynqmp-firmware", .data = &platform_fw_data_zynqmp}, + {.compatible = "xlnx,versal-firmware", .data = &platform_fw_data_versal}, + {.compatible = "xlnx,versal-net-firmware", .data = &platform_fw_data_versal_net}, {}, }; MODULE_DEVICE_TABLE(of, zynqmp_firmware_of_match); |
