diff options
Diffstat (limited to 'include/linux')
127 files changed, 4211 insertions, 1517 deletions
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index 0c2a8b846c20..ebd7f8935f96 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -80,6 +80,11 @@ extern struct cpu_topology cpu_topology[NR_CPUS]; #define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) #define topology_cluster_cpumask(cpu) (&cpu_topology[cpu].cluster_sibling) #define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling) + +#ifndef arch_cpu_is_threaded +#define arch_cpu_is_threaded() (0) +#endif + void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); const struct cpumask *cpu_coregroup_mask(int cpu); diff --git a/include/linux/ata.h b/include/linux/ata.h index c9013e472aa3..54b416e26995 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -29,6 +29,7 @@ enum { ATA_MAX_SECTORS_128 = 128, ATA_MAX_SECTORS = 256, ATA_MAX_SECTORS_1024 = 1024, + ATA_MAX_SECTORS_8191 = 8191, ATA_MAX_SECTORS_LBA48 = 65535,/* avoid count to be 0000h */ ATA_MAX_SECTORS_TAPE = 65535, ATA_MAX_TRIM_RNUM = 64, /* 512-byte payload / (6-byte LBA + 2-byte range per entry) */ diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 10e626db7eee..f29a9ef1052e 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -10,7 +10,6 @@ #define _LINUX_BACKLIGHT_H #include <linux/device.h> -#include <linux/fb.h> #include <linux/mutex.h> #include <linux/types.h> diff --git a/include/linux/cache_coherency.h b/include/linux/cache_coherency.h new file mode 100644 index 000000000000..cc81c5733e31 --- /dev/null +++ b/include/linux/cache_coherency.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cache coherency maintenance operation device drivers + * + * Copyright Huawei 2025 + */ +#ifndef _LINUX_CACHE_COHERENCY_H_ +#define _LINUX_CACHE_COHERENCY_H_ + +#include <linux/list.h> +#include <linux/kref.h> +#include <linux/types.h> + +struct cc_inval_params { + phys_addr_t addr; + size_t size; +}; + +struct cache_coherency_ops_inst; + +struct cache_coherency_ops { + int (*wbinv)(struct cache_coherency_ops_inst *cci, + struct cc_inval_params *invp); + int (*done)(struct cache_coherency_ops_inst *cci); +}; + +struct cache_coherency_ops_inst { + struct kref kref; + struct list_head node; + const struct cache_coherency_ops *ops; +}; + +int cache_coherency_ops_instance_register(struct cache_coherency_ops_inst *cci); +void cache_coherency_ops_instance_unregister(struct cache_coherency_ops_inst *cci); + +struct cache_coherency_ops_inst * +_cache_coherency_ops_instance_alloc(const struct cache_coherency_ops *ops, + size_t size); +/** + * cache_coherency_ops_instance_alloc - Allocate cache coherency ops instance + * @ops: Cache maintenance operations + * @drv_struct: structure that contains the struct cache_coherency_ops_inst + * @member: Name of the struct cache_coherency_ops_inst member in @drv_struct. + * + * This allocates a driver specific structure and initializes the + * cache_coherency_ops_inst embedded in the drv_struct. Upon success the + * pointer must be freed via cache_coherency_ops_instance_put(). + * + * Returns a &drv_struct * on success, %NULL on error. + */ +#define cache_coherency_ops_instance_alloc(ops, drv_struct, member) \ + ({ \ + static_assert(__same_type(struct cache_coherency_ops_inst, \ + ((drv_struct *)NULL)->member)); \ + static_assert(offsetof(drv_struct, member) == 0); \ + (drv_struct *)_cache_coherency_ops_instance_alloc(ops, \ + sizeof(drv_struct)); \ + }) +void cache_coherency_ops_instance_put(struct cache_coherency_ops_inst *cci); + +#endif diff --git a/include/linux/configfs.h b/include/linux/configfs.h index 698520b1bfdb..ef65c75beeaa 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -64,8 +64,8 @@ extern void config_item_put(struct config_item *); struct config_item_type { struct module *ct_owner; - struct configfs_item_operations *ct_item_ops; - struct configfs_group_operations *ct_group_ops; + const struct configfs_item_operations *ct_item_ops; + const struct configfs_group_operations *ct_group_ops; struct configfs_attribute **ct_attrs; struct configfs_bin_attribute **ct_bin_attrs; }; diff --git a/include/linux/cper.h b/include/linux/cper.h index 0ed60a91eca9..5b1236d8c65b 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h @@ -297,11 +297,11 @@ enum { #define CPER_ARM_INFO_FLAGS_PROPAGATED BIT(2) #define CPER_ARM_INFO_FLAGS_OVERFLOW BIT(3) -#define CPER_ARM_CACHE_ERROR 0 -#define CPER_ARM_TLB_ERROR 1 -#define CPER_ARM_BUS_ERROR 2 -#define CPER_ARM_VENDOR_ERROR 3 -#define CPER_ARM_MAX_TYPE CPER_ARM_VENDOR_ERROR +#define CPER_ARM_ERR_TYPE_MASK GENMASK(4,1) +#define CPER_ARM_CACHE_ERROR BIT(1) +#define CPER_ARM_TLB_ERROR BIT(2) +#define CPER_ARM_BUS_ERROR BIT(3) +#define CPER_ARM_VENDOR_ERROR BIT(4) #define CPER_ARM_ERR_VALID_TRANSACTION_TYPE BIT(0) #define CPER_ARM_ERR_VALID_OPERATION_TYPE BIT(1) @@ -588,6 +588,8 @@ const char *cper_mem_err_type_str(unsigned int); const char *cper_mem_err_status_str(u64 status); void cper_print_bits(const char *prefix, unsigned int bits, const char * const strs[], unsigned int strs_size); +int cper_bits_to_str(char *buf, int buf_size, unsigned long bits, + const char * const strs[], unsigned int strs_size); void cper_mem_err_pack(const struct cper_sec_mem_err *, struct cper_mem_err_compact *); const char *cper_mem_err_unpack(struct trace_seq *, diff --git a/include/linux/damon.h b/include/linux/damon.h index cae8c613c5fc..3813373a9200 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -91,17 +91,23 @@ struct damon_region { * @nr_regions: Number of monitoring target regions of this target. * @regions_list: Head of the monitoring target regions of this target. * @list: List head for siblings. + * @obsolete: Whether the commit destination target is obsolete. * * Each monitoring context could have multiple targets. For example, a context * for virtual memory address spaces could have multiple target processes. The * @pid should be set for appropriate &struct damon_operations including the * virtual address spaces monitoring operations. + * + * @obsolete is used only for damon_commit_targets() source targets, to specify + * the matching destination targets are obsolete. Read damon_commit_targets() + * to see how it is handled. */ struct damon_target { struct pid *pid; unsigned int nr_regions; struct list_head regions_list; struct list_head list; + bool obsolete; }; /** @@ -147,6 +153,8 @@ enum damos_action { * @DAMOS_QUOTA_SOME_MEM_PSI_US: System level some memory PSI in us. * @DAMOS_QUOTA_NODE_MEM_USED_BP: MemUsed ratio of a node. * @DAMOS_QUOTA_NODE_MEM_FREE_BP: MemFree ratio of a node. + * @DAMOS_QUOTA_NODE_MEMCG_USED_BP: MemUsed ratio of a node for a cgroup. + * @DAMOS_QUOTA_NODE_MEMCG_FREE_BP: MemFree ratio of a node for a cgroup. * @NR_DAMOS_QUOTA_GOAL_METRICS: Number of DAMOS quota goal metrics. * * Metrics equal to larger than @NR_DAMOS_QUOTA_GOAL_METRICS are unsupported. @@ -156,6 +164,8 @@ enum damos_quota_goal_metric { DAMOS_QUOTA_SOME_MEM_PSI_US, DAMOS_QUOTA_NODE_MEM_USED_BP, DAMOS_QUOTA_NODE_MEM_FREE_BP, + DAMOS_QUOTA_NODE_MEMCG_USED_BP, + DAMOS_QUOTA_NODE_MEMCG_FREE_BP, NR_DAMOS_QUOTA_GOAL_METRICS, }; @@ -166,6 +176,7 @@ enum damos_quota_goal_metric { * @current_value: Current value of @metric. * @last_psi_total: Last measured total PSI * @nid: Node id. + * @memcg_id: Memcg id. * @list: List head for siblings. * * Data structure for getting the current score of the quota tuning goal. The @@ -176,6 +187,12 @@ enum damos_quota_goal_metric { * If @metric is DAMOS_QUOTA_USER_INPUT, @current_value should be manually * entered by the user, probably inside the kdamond callbacks. Otherwise, * DAMON sets @current_value with self-measured value of @metric. + * + * If @metric is DAMOS_QUOTA_NODE_MEM_{USED,FREE}_BP, @nid represents the node + * id of the target node to account the used/free memory. + * + * If @metric is DAMOS_QUOTA_NODE_MEMCG_{USED,FREE}_BP, @nid and @memcg_id + * represents the node id and the cgroup to account the used memory for. */ struct damos_quota_goal { enum damos_quota_goal_metric metric; @@ -184,7 +201,10 @@ struct damos_quota_goal { /* metric-dependent fields */ union { u64 last_psi_total; - int nid; + struct { + int nid; + unsigned short memcg_id; + }; }; struct list_head list; }; @@ -472,7 +492,7 @@ struct damos_migrate_dests { * @wmarks: Watermarks for automated (in)activation of this scheme. * @migrate_dests: Destination nodes if @action is "migrate_{hot,cold}". * @target_nid: Destination node if @action is "migrate_{hot,cold}". - * @filters: Additional set of &struct damos_filter for &action. + * @core_filters: Additional set of &struct damos_filter for &action. * @ops_filters: ops layer handling &struct damos_filter objects list. * @last_applied: Last @action applied ops-managing entity. * @stat: Statistics of this scheme. @@ -498,7 +518,7 @@ struct damos_migrate_dests { * * Before applying the &action to a memory region, &struct damon_operations * implementation could check pages of the region and skip &action to respect - * &filters + * &core_filters * * The minimum entity that @action can be applied depends on the underlying * &struct damon_operations. Since it may not be aligned with the core layer @@ -542,7 +562,7 @@ struct damos { struct damos_migrate_dests migrate_dests; }; }; - struct list_head filters; + struct list_head core_filters; struct list_head ops_filters; void *last_applied; struct damos_stat stat; @@ -851,11 +871,11 @@ static inline unsigned long damon_sz_region(struct damon_region *r) #define damos_for_each_quota_goal_safe(goal, next, quota) \ list_for_each_entry_safe(goal, next, &(quota)->goals, list) -#define damos_for_each_filter(f, scheme) \ - list_for_each_entry(f, &(scheme)->filters, list) +#define damos_for_each_core_filter(f, scheme) \ + list_for_each_entry(f, &(scheme)->core_filters, list) -#define damos_for_each_filter_safe(f, next, scheme) \ - list_for_each_entry_safe(f, next, &(scheme)->filters, list) +#define damos_for_each_core_filter_safe(f, next, scheme) \ + list_for_each_entry_safe(f, next, &(scheme)->core_filters, list) #define damos_for_each_ops_filter(f, scheme) \ list_for_each_entry(f, &(scheme)->ops_filters, list) @@ -947,7 +967,8 @@ int damon_call(struct damon_ctx *ctx, struct damon_call_control *control); int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control); int damon_set_region_biggest_system_ram_default(struct damon_target *t, - unsigned long *start, unsigned long *end); + unsigned long *start, unsigned long *end, + unsigned long min_sz_region); #endif /* CONFIG_DAMON */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index c83e02b94389..898c60d21c92 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -198,7 +198,6 @@ enum dentry_flags { DCACHE_REFERENCED = BIT(6), /* Recently used, don't discard. */ DCACHE_DONTCACHE = BIT(7), /* Purge from memory on final dput() */ DCACHE_CANT_MOUNT = BIT(8), - DCACHE_GENOCIDE = BIT(9), DCACHE_SHRINK_LIST = BIT(10), DCACHE_OP_WEAK_REVALIDATE = BIT(11), /* @@ -225,6 +224,7 @@ enum dentry_flags { DCACHE_PAR_LOOKUP = BIT(24), /* being looked up (with parent locked shared) */ DCACHE_DENTRY_CURSOR = BIT(25), DCACHE_NORCU = BIT(26), /* No RCU delay for freeing */ + DCACHE_PERSISTENT = BIT(27) }; #define DCACHE_MANAGED_DENTRY \ @@ -268,6 +268,8 @@ extern void d_tmpfile(struct file *, struct inode *); extern struct dentry *d_find_alias(struct inode *); extern void d_prune_aliases(struct inode *); +extern void d_dispose_if_unused(struct dentry *, struct list_head *); +extern void shrink_dentry_list(struct list_head *); extern struct dentry *d_find_alias_rcu(struct inode *); @@ -610,5 +612,7 @@ static inline struct dentry *d_next_sibling(const struct dentry *dentry) } void set_default_d_op(struct super_block *, const struct dentry_operations *); +struct dentry *d_make_persistent(struct dentry *, struct inode *); +void d_make_discardable(struct dentry *dentry); #endif /* __LINUX_DCACHE_H */ diff --git a/include/linux/device.h b/include/linux/device.h index b031ff71a5bd..0be95294b6e6 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -281,25 +281,6 @@ int __must_check device_create_bin_file(struct device *dev, void device_remove_bin_file(struct device *dev, const struct bin_attribute *attr); -/** - * devm_alloc_percpu - Resource-managed alloc_percpu - * @dev: Device to allocate per-cpu memory for - * @type: Type to allocate per-cpu memory for - * - * Managed alloc_percpu. Per-cpu memory allocated with this function is - * automatically freed on driver detach. - * - * RETURNS: - * Pointer to allocated memory on success, NULL on failure. - */ -#define devm_alloc_percpu(dev, type) \ - ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \ - __alignof__(type))) - -void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, - size_t align); -void devm_free_percpu(struct device *dev, void __percpu *pdata); - struct device_dma_parameters { /* * a low level driver may set these to teach IOMMU code about diff --git a/include/linux/device/devres.h b/include/linux/device/devres.h index 8c5f57e0d613..9c1e3d643d69 100644 --- a/include/linux/device/devres.h +++ b/include/linux/device/devres.h @@ -9,6 +9,7 @@ #include <linux/stdarg.h> #include <linux/types.h> #include <asm/bug.h> +#include <asm/percpu.h> struct device; struct device_node; @@ -96,6 +97,22 @@ devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap); char * __printf(3, 4) __malloc devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...); +/** + * devm_alloc_percpu - Resource-managed alloc_percpu + * @dev: Device to allocate per-cpu memory for + * @type: Type to allocate per-cpu memory for + * + * Managed alloc_percpu. Per-cpu memory allocated with this function is + * automatically freed on driver detach. + * + * RETURNS: + * Pointer to allocated memory on success, NULL on failure. + */ +#define devm_alloc_percpu(dev, type) \ + ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), __alignof__(type))) + +void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, size_t align); + unsigned long devm_get_free_pages(struct device *dev, gfp_t gfp_mask, unsigned int order); void devm_free_pages(struct device *dev, unsigned long addr); diff --git a/include/linux/dma-buf-mapping.h b/include/linux/dma-buf-mapping.h new file mode 100644 index 000000000000..a3c0ce2d3a42 --- /dev/null +++ b/include/linux/dma-buf-mapping.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * DMA BUF Mapping Helpers + * + */ +#ifndef __DMA_BUF_MAPPING_H__ +#define __DMA_BUF_MAPPING_H__ +#include <linux/dma-buf.h> + +struct sg_table *dma_buf_phys_vec_to_sgt(struct dma_buf_attachment *attach, + struct p2pdma_provider *provider, + struct dma_buf_phys_vec *phys_vec, + size_t nr_ranges, size_t size, + enum dma_data_direction dir); +void dma_buf_free_sgt(struct dma_buf_attachment *attach, struct sg_table *sgt, + enum dma_data_direction dir); +#endif diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index d58e329ac0e7..0bc492090237 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -22,6 +22,7 @@ #include <linux/fs.h> #include <linux/dma-fence.h> #include <linux/wait.h> +#include <linux/pci-p2pdma.h> struct device; struct dma_buf; @@ -531,6 +532,16 @@ struct dma_buf_export_info { }; /** + * struct dma_buf_phys_vec - describe continuous chunk of memory + * @paddr: physical address of that chunk + * @len: Length of this chunk + */ +struct dma_buf_phys_vec { + phys_addr_t paddr; + size_t len; +}; + +/** * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters * @name: export-info name * diff --git a/include/linux/dma-buf/heaps/cma.h b/include/linux/dma-buf/heaps/cma.h new file mode 100644 index 000000000000..e751479e21e7 --- /dev/null +++ b/include/linux/dma-buf/heaps/cma.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DMA_BUF_HEAP_CMA_H_ +#define DMA_BUF_HEAP_CMA_H_ + +struct cma; + +#ifdef CONFIG_DMABUF_HEAPS_CMA +int dma_heap_cma_register_heap(struct cma *cma); +#else +static inline int dma_heap_cma_register_heap(struct cma *cma) +{ + return 0; +} +#endif // CONFIG_DMABUF_HEAPS_CMA + +#endif // DMA_BUF_HEAP_CMA_H_ diff --git a/include/linux/efi.h b/include/linux/efi.h index b23ff8b83219..2a43094e23f7 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -290,7 +290,7 @@ typedef efi_status_t efi_get_variable_t (efi_char16_t *name, efi_guid_t *vendor, unsigned long *data_size, void *data); typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor); -typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor, +typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor, u32 attr, unsigned long data_size, void *data); typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count); @@ -373,6 +373,8 @@ void efi_native_runtime_setup(void); #define EFI_DEVICE_PATH_TO_TEXT_PROTOCOL_GUID EFI_GUID(0x8b843e20, 0x8132, 0x4852, 0x90, 0xcc, 0x55, 0x1a, 0x4e, 0x4a, 0x7f, 0x1c) #define EFI_DEVICE_PATH_FROM_TEXT_PROTOCOL_GUID EFI_GUID(0x05c99a21, 0xc70f, 0x4ad2, 0x8a, 0x5f, 0x35, 0xdf, 0x33, 0x43, 0xf5, 0x1e) #define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a) +#define EFI_EDID_DISCOVERED_PROTOCOL_GUID EFI_GUID(0x1c0c34f6, 0xd380, 0x41fa, 0xa0, 0x49, 0x8a, 0xd0, 0x6c, 0x1a, 0x66, 0xaa) +#define EFI_EDID_ACTIVE_PROTOCOL_GUID EFI_GUID(0xbd8c1056, 0x9f36, 0x44ec, 0x92, 0xa8, 0xa6, 0x33, 0x7f, 0x81, 0x79, 0x86) #define EFI_PCI_IO_PROTOCOL_GUID EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a) #define EFI_FILE_INFO_ID EFI_GUID(0x09576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) #define EFI_SYSTEM_RESOURCE_TABLE_GUID EFI_GUID(0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80) @@ -772,7 +774,7 @@ extern unsigned long efi_mem_attr_table; */ typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *, bool); -extern int efi_memattr_init(void); +extern void efi_memattr_init(void); extern int efi_memattr_apply_permissions(struct mm_struct *mm, efi_memattr_perm_setter fn); diff --git a/include/linux/ehl_pse_io_aux.h b/include/linux/ehl_pse_io_aux.h new file mode 100644 index 000000000000..afb8587ee5fb --- /dev/null +++ b/include/linux/ehl_pse_io_aux.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Intel Elkhart Lake PSE I/O Auxiliary Device + * + * Copyright (c) 2025 Intel Corporation. + * + * Author: Raag Jadav <raag.jadav@intel.com> + */ + +#ifndef _EHL_PSE_IO_AUX_H_ +#define _EHL_PSE_IO_AUX_H_ + +#include <linux/ioport.h> + +#define EHL_PSE_IO_NAME "ehl_pse_io" +#define EHL_PSE_GPIO_NAME "gpio" +#define EHL_PSE_TIO_NAME "pps_tio" + +struct ehl_pse_io_data { + struct resource mem; + int irq; +}; + +#endif /* _EHL_PSE_IO_AUX_H_ */ diff --git a/include/linux/err.h b/include/linux/err.h index 1d60aa86db53..8c37be0620ab 100644 --- a/include/linux/err.h +++ b/include/linux/err.h @@ -41,6 +41,14 @@ static inline void * __must_check ERR_PTR(long error) return (void *) error; } +/** + * INIT_ERR_PTR - Init a const error pointer. + * @error: A negative error code. + * + * Like ERR_PTR(), but usable to initialize static variables. + */ +#define INIT_ERR_PTR(error) ((void *)(error)) + /* Return the pointer in the percpu address space. */ #define ERR_PTR_PCPU(error) ((void __percpu *)(unsigned long)ERR_PTR(error)) diff --git a/include/linux/firewire.h b/include/linux/firewire.h index 6d208769d456..6143b7d28eac 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -170,6 +170,20 @@ struct fw_attribute_group { struct attribute *attrs[13]; }; +enum fw_device_quirk { + // See afa1282a35d3 ("firewire: core: check for 1394a compliant IRM, fix inaccessibility of Sony camcorder"). + FW_DEVICE_QUIRK_IRM_IS_1394_1995_ONLY = BIT(0), + + // See a509e43ff338 ("firewire: core: fix unstable I/O with Canon camcorder"). + FW_DEVICE_QUIRK_IRM_IGNORES_BUS_MANAGER = BIT(1), + + // MOTU Audio Express transfers acknowledge packet with 0x10 for pending state. + FW_DEVICE_QUIRK_ACK_PACKET_WITH_INVALID_PENDING_CODE = BIT(2), + + // TASCAM FW-1082/FW-1804/FW-1884 often freezes when receiving S400 packets. + FW_DEVICE_QUIRK_UNSTABLE_AT_S400 = BIT(3), +}; + enum fw_device_state { FW_DEVICE_INITIALIZING, FW_DEVICE_RUNNING, @@ -203,6 +217,9 @@ struct fw_device { struct fw_card *card; struct device device; + // A set of enum fw_device_quirk. + int quirks; + struct mutex client_list_mutex; struct list_head client_list; diff --git a/include/linux/firmware/cirrus/cs_dsp.h b/include/linux/firmware/cirrus/cs_dsp.h index a66eb7624730..0ec1cdc5585d 100644 --- a/include/linux/firmware/cirrus/cs_dsp.h +++ b/include/linux/firmware/cirrus/cs_dsp.h @@ -102,7 +102,7 @@ struct cs_dsp_coeff_ctl { const char *subname; unsigned int subname_len; unsigned int offset; - size_t len; + unsigned int len; unsigned int type; unsigned int flags; unsigned int set:1; @@ -188,8 +188,8 @@ struct cs_dsp { #ifdef CONFIG_DEBUG_FS struct dentry *debugfs_root; - char *wmfw_file_name; - char *bin_file_name; + const char *wmfw_file_name; + const char *bin_file_name; #endif }; diff --git a/include/linux/firmware/cirrus/cs_dsp_test_utils.h b/include/linux/firmware/cirrus/cs_dsp_test_utils.h index ecd821ed8064..1f97764fdfd7 100644 --- a/include/linux/firmware/cirrus/cs_dsp_test_utils.h +++ b/include/linux/firmware/cirrus/cs_dsp_test_utils.h @@ -26,21 +26,21 @@ struct cs_dsp_test { struct cs_dsp_test_local *local; - /* Following members are private */ + /* private: Following members are private */ bool saw_bus_write; }; /** * struct cs_dsp_mock_alg_def - Info for creating a mock algorithm entry. * - * @id Algorithm ID. - * @ver; Algorithm version. - * @xm_base_words XM base address in DSP words. - * @xm_size_words XM size in DSP words. - * @ym_base_words YM base address in DSP words. - * @ym_size_words YM size in DSP words. - * @zm_base_words ZM base address in DSP words. - * @zm_size_words ZM size in DSP words. + * @id: Algorithm ID. + * @ver: Algorithm version. + * @xm_base_words: XM base address in DSP words. + * @xm_size_words: XM size in DSP words. + * @ym_base_words: YM base address in DSP words. + * @ym_size_words: YM size in DSP words. + * @zm_base_words: ZM base address in DSP words. + * @zm_size_words: ZM size in DSP words. */ struct cs_dsp_mock_alg_def { unsigned int id; diff --git a/include/linux/firmware/qcom/qcom_tzmem.h b/include/linux/firmware/qcom/qcom_tzmem.h index 48ac0e5454c7..23173e0c3ddd 100644 --- a/include/linux/firmware/qcom/qcom_tzmem.h +++ b/include/linux/firmware/qcom/qcom_tzmem.h @@ -17,11 +17,20 @@ struct qcom_tzmem_pool; * enum qcom_tzmem_policy - Policy for pool growth. */ enum qcom_tzmem_policy { - /**< Static pool, never grow above initial size. */ + /** + * @QCOM_TZMEM_POLICY_STATIC: Static pool, + * never grow above initial size. + */ QCOM_TZMEM_POLICY_STATIC = 1, - /**< When out of memory, add increment * current size of memory. */ + /** + * @QCOM_TZMEM_POLICY_MULTIPLIER: When out of memory, + * add increment * current size of memory. + */ QCOM_TZMEM_POLICY_MULTIPLIER, - /**< When out of memory add as much as is needed until max_size. */ + /** + * @QCOM_TZMEM_POLICY_ON_DEMAND: When out of memory + * add as much as is needed until max_size. + */ QCOM_TZMEM_POLICY_ON_DEMAND, }; diff --git a/include/linux/firmware/xlnx-zynqmp-ufs.h b/include/linux/firmware/xlnx-zynqmp-ufs.h new file mode 100644 index 000000000000..d3538dd5822a --- /dev/null +++ b/include/linux/firmware/xlnx-zynqmp-ufs.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Firmware layer for UFS APIs. + * + * Copyright (c) 2025 Advanced Micro Devices, Inc. + */ + +#ifndef __FIRMWARE_XLNX_ZYNQMP_UFS_H__ +#define __FIRMWARE_XLNX_ZYNQMP_UFS_H__ + +#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE) +int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready); +int zynqmp_pm_is_sram_init_done(bool *is_done); +int zynqmp_pm_set_sram_bypass(void); +int zynqmp_pm_get_ufs_calibration_values(u32 *val); +#else +static inline int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_is_sram_init_done(bool *is_done) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_set_sram_bypass(void) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_get_ufs_calibration_values(u32 *val) +{ + return -ENODEV; +} +#endif + +#endif /* __FIRMWARE_XLNX_ZYNQMP_UFS_H__ */ diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index ae48d619c4e0..15fdbd089bbf 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -3,7 +3,7 @@ * Xilinx Zynq MPSoC Firmware layer * * Copyright (C) 2014-2021 Xilinx - * Copyright (C) 2022 - 2024, Advanced Micro Devices, Inc. + * Copyright (C) 2022 - 2025 Advanced Micro Devices, Inc. * * Michal Simek <michal.simek@amd.com> * Davorin Mista <davorin.mista@aggios.com> @@ -16,6 +16,7 @@ #include <linux/types.h> #include <linux/err.h> +#include <linux/firmware/xlnx-zynqmp-ufs.h> #define ZYNQMP_PM_VERSION_MAJOR 1 #define ZYNQMP_PM_VERSION_MINOR 0 @@ -51,16 +52,10 @@ #define PM_PINCTRL_PARAM_SET_VERSION 2 -#define ZYNQMP_FAMILY_CODE 0x23 -#define VERSAL_FAMILY_CODE 0x26 - -/* When all subfamily of platform need to support */ -#define ALL_SUB_FAMILY_CODE 0x00 -#define VERSAL_SUB_FAMILY_CODE 0x01 -#define VERSALNET_SUB_FAMILY_CODE 0x03 - -#define FAMILY_CODE_MASK GENMASK(27, 21) -#define SUB_FAMILY_CODE_MASK GENMASK(20, 19) +/* Family codes */ +#define PM_ZYNQMP_FAMILY_CODE 0x1 /* ZynqMP family code */ +#define PM_VERSAL_FAMILY_CODE 0x2 /* Versal family code */ +#define PM_VERSAL_NET_FAMILY_CODE 0x3 /* Versal NET family code */ #define API_ID_MASK GENMASK(7, 0) #define MODULE_ID_MASK GENMASK(11, 8) @@ -164,6 +159,7 @@ enum pm_api_cb_id { enum pm_api_id { PM_API_FEATURES = 0, PM_GET_API_VERSION = 1, + PM_GET_NODE_STATUS = 3, PM_REGISTER_NOTIFIER = 5, PM_FORCE_POWERDOWN = 8, PM_REQUEST_WAKEUP = 10, @@ -241,6 +237,7 @@ enum pm_ioctl_id { IOCTL_GET_FEATURE_CONFIG = 27, /* IOCTL for Secure Read/Write Interface */ IOCTL_READ_REG = 28, + IOCTL_MASK_WRITE_REG = 29, /* Dynamic SD/GEM configuration */ IOCTL_SET_SD_CONFIG = 30, IOCTL_SET_GEM_CONFIG = 31, @@ -564,7 +561,7 @@ int zynqmp_pm_invoke_fw_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...); #if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE) int zynqmp_pm_get_api_version(u32 *version); int zynqmp_pm_get_chipid(u32 *idcode, u32 *version); -int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily); +int zynqmp_pm_get_family_info(u32 *family); int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out); int zynqmp_pm_clock_enable(u32 clock_id); int zynqmp_pm_clock_disable(u32 clock_id); @@ -619,6 +616,9 @@ int zynqmp_pm_feature(const u32 api_id); int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id); int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value); int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload); +int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value); +int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset, + u32 mask, u32 value); int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset); int zynqmp_pm_force_pwrdwn(const u32 target, const enum zynqmp_pm_request_ack ack); @@ -629,6 +629,8 @@ int zynqmp_pm_request_wake(const u32 node, int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode); int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode); int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode); +int zynqmp_pm_get_node_status(const u32 node, u32 *const status, + u32 *const requirements, u32 *const usage); int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value); int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config, u32 value); @@ -643,7 +645,7 @@ static inline int zynqmp_pm_get_chipid(u32 *idcode, u32 *version) return -ENODEV; } -static inline int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily) +static inline int zynqmp_pm_get_family_info(u32 *family) { return -ENODEV; } @@ -916,6 +918,17 @@ static inline int zynqmp_pm_request_wake(const u32 node, return -ENODEV; } +static inline int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset, + u32 mask, u32 value) +{ + return -ENODEV; +} + static inline int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode) { return -ENODEV; @@ -931,6 +944,13 @@ static inline int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mo return -ENODEV; } +static inline int zynqmp_pm_get_node_status(const u32 node, u32 *const status, + u32 *const requirements, + u32 *const usage) +{ + return -ENODEV; +} + static inline int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value) diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h index 7964db96e41a..0a3bcd1718f3 100644 --- a/include/linux/fprobe.h +++ b/include/linux/fprobe.h @@ -7,6 +7,7 @@ #include <linux/ftrace.h> #include <linux/rcupdate.h> #include <linux/refcount.h> +#include <linux/rhashtable.h> #include <linux/slab.h> struct fprobe; @@ -26,7 +27,7 @@ typedef void (*fprobe_exit_cb)(struct fprobe *fp, unsigned long entry_ip, * @fp: The fprobe which owns this. */ struct fprobe_hlist_node { - struct hlist_node hlist; + struct rhlist_head hlist; unsigned long addr; struct fprobe *fp; }; diff --git a/include/linux/fs.h b/include/linux/fs.h index ce25feb06727..04ceeca12a0d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2041,14 +2041,14 @@ static inline bool can_mmap_file(struct file *file) return true; } -int __compat_vma_mmap_prepare(const struct file_operations *f_op, +int __compat_vma_mmap(const struct file_operations *f_op, struct file *file, struct vm_area_struct *vma); -int compat_vma_mmap_prepare(struct file *file, struct vm_area_struct *vma); +int compat_vma_mmap(struct file *file, struct vm_area_struct *vma); static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma) { if (file->f_op->mmap_prepare) - return compat_vma_mmap_prepare(file, vma); + return compat_vma_mmap(file, vma); return file->f_op->mmap(file, vma); } @@ -2310,7 +2310,6 @@ void retire_super(struct super_block *sb); void generic_shutdown_super(struct super_block *sb); void kill_block_super(struct super_block *sb); void kill_anon_super(struct super_block *sb); -void kill_litter_super(struct super_block *sb); void deactivate_super(struct super_block *sb); void deactivate_locked_super(struct super_block *sb); int set_anon_super(struct super_block *s, void *data); @@ -3191,6 +3190,8 @@ extern int simple_open(struct inode *inode, struct file *file); extern int simple_link(struct dentry *, struct inode *, struct dentry *); extern int simple_unlink(struct inode *, struct dentry *); extern int simple_rmdir(struct inode *, struct dentry *); +extern void __simple_unlink(struct inode *, struct dentry *); +extern void __simple_rmdir(struct inode *, struct dentry *); void simple_rename_timestamp(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry); extern int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry, @@ -3200,6 +3201,8 @@ extern int simple_rename(struct mnt_idmap *, struct inode *, unsigned int); extern void simple_recursive_removal(struct dentry *, void (*callback)(struct dentry *)); +extern void simple_remove_by_name(struct dentry *, const char *, + void (*callback)(struct dentry *)); extern void locked_recursive_removal(struct dentry *, void (*callback)(struct dentry *)); extern int noop_fsync(struct file *, loff_t, loff_t, int); @@ -3229,6 +3232,7 @@ extern int simple_fill_super(struct super_block *, unsigned long, extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count); extern void simple_release_fs(struct vfsmount **mount, int *count); struct dentry *simple_start_creating(struct dentry *, const char *); +void simple_done_creating(struct dentry *); extern ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos, const void *from, size_t available); diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 015dd1049bea..770f0dc993cc 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -1167,17 +1167,14 @@ static inline void ftrace_init(void) { } */ struct ftrace_graph_ent { unsigned long func; /* Current function */ - int depth; + unsigned long depth; } __packed; /* * Structure that defines an entry function trace with retaddr. - * It's already packed but the attribute "packed" is needed - * to remove extra padding at the end. */ struct fgraph_retaddr_ent { - unsigned long func; /* Current function */ - int depth; + struct ftrace_graph_ent ent; unsigned long retaddr; /* Return address */ } __packed; diff --git a/include/linux/generic_pt/common.h b/include/linux/generic_pt/common.h new file mode 100644 index 000000000000..6a9a1acb5aad --- /dev/null +++ b/include/linux/generic_pt/common.h @@ -0,0 +1,191 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES + */ +#ifndef __GENERIC_PT_COMMON_H +#define __GENERIC_PT_COMMON_H + +#include <linux/types.h> +#include <linux/build_bug.h> +#include <linux/bits.h> + +/** + * DOC: Generic Radix Page Table + * + * Generic Radix Page Table is a set of functions and helpers to efficiently + * parse radix style page tables typically seen in HW implementations. The + * interface is built to deliver similar code generation as the mm's pte/pmd/etc + * system by fully inlining the exact code required to handle each table level. + * + * Like the mm subsystem each format contributes its parsing implementation + * under common names and the common code implements the required algorithms. + * + * The system is divided into three logical levels: + * + * - The page table format and its manipulation functions + * - Generic helpers to give a consistent API regardless of underlying format + * - An algorithm implementation (e.g. IOMMU/DRM/KVM/MM) + * + * Multiple implementations are supported. The intention is to have the generic + * format code be re-usable for whatever specialized implementation is required. + * The generic code is solely about the format of the radix tree; it does not + * include memory allocation or higher level decisions that are left for the + * implementation. + * + * The generic framework supports a superset of functions across many HW + * implementations: + * + * - Entries comprised of contiguous blocks of IO PTEs for larger page sizes + * - Multi-level tables, up to 6 levels. Runtime selected top level + * - Runtime variable table level size (ARM's concatenated tables) + * - Expandable top level allowing dynamic sizing of table levels + * - Optional leaf entries at any level + * - 32-bit/64-bit virtual and output addresses, using every address bit + * - Dirty tracking + * - Sign extended addressing + */ + +/** + * struct pt_common - struct for all page table implementations + */ +struct pt_common { + /** + * @top_of_table: Encodes the table top pointer and the top level in a + * single value. Must use READ_ONCE/WRITE_ONCE to access it. The lower + * bits of the aligned table pointer are used for the level. + */ + uintptr_t top_of_table; + /** + * @max_oasz_lg2: Maximum number of bits the OA can contain. Upper bits + * must be zero. This may be less than what the page table format + * supports, but must not be more. + */ + u8 max_oasz_lg2; + /** + * @max_vasz_lg2: Maximum number of bits the VA can contain. Upper bits + * are 0 or 1 depending on pt_full_va_prefix(). This may be less than + * what the page table format supports, but must not be more. When + * PT_FEAT_DYNAMIC_TOP is set this reflects the maximum VA capability. + */ + u8 max_vasz_lg2; + /** + * @features: Bitmap of `enum pt_features` + */ + unsigned int features; +}; + +/* Encoding parameters for top_of_table */ +enum { + PT_TOP_LEVEL_BITS = 3, + PT_TOP_LEVEL_MASK = GENMASK(PT_TOP_LEVEL_BITS - 1, 0), +}; + +/** + * enum pt_features - Features turned on in the table. Each symbol is a bit + * position. + */ +enum pt_features { + /** + * @PT_FEAT_DMA_INCOHERENT: Cache flush page table memory before + * assuming the HW can read it. Otherwise a SMP release is sufficient + * for HW to read it. + */ + PT_FEAT_DMA_INCOHERENT, + /** + * @PT_FEAT_FULL_VA: The table can span the full VA range from 0 to + * PT_VADDR_MAX. + */ + PT_FEAT_FULL_VA, + /** + * @PT_FEAT_DYNAMIC_TOP: The table's top level can be increased + * dynamically during map. This requires HW support for atomically + * setting both the table top pointer and the starting table level. + */ + PT_FEAT_DYNAMIC_TOP, + /** + * @PT_FEAT_SIGN_EXTEND: The top most bit of the valid VA range sign + * extends up to the full pt_vaddr_t. This divides the page table into + * three VA ranges:: + * + * 0 -> 2^N - 1 Lower + * 2^N -> (MAX - 2^N - 1) Non-Canonical + * MAX - 2^N -> MAX Upper + * + * In this mode pt_common::max_vasz_lg2 includes the sign bit and the + * upper bits that don't fall within the translation are just validated. + * + * If not set there is no sign extension and valid VA goes from 0 to 2^N + * - 1. + */ + PT_FEAT_SIGN_EXTEND, + /** + * @PT_FEAT_FLUSH_RANGE: IOTLB maintenance is done by flushing IOVA + * ranges which will clean out any walk cache or any IOPTE fully + * contained by the range. The optimization objective is to minimize the + * number of flushes even if ranges include IOVA gaps that do not need + * to be flushed. + */ + PT_FEAT_FLUSH_RANGE, + /** + * @PT_FEAT_FLUSH_RANGE_NO_GAPS: Like PT_FEAT_FLUSH_RANGE except that + * the optimization objective is to only flush IOVA that has been + * changed. This mode is suitable for cases like hypervisor shadowing + * where flushing unchanged ranges may cause the hypervisor to reparse + * significant amount of page table. + */ + PT_FEAT_FLUSH_RANGE_NO_GAPS, + /* private: */ + PT_FEAT_FMT_START, +}; + +struct pt_amdv1 { + struct pt_common common; +}; + +enum { + /* + * The memory backing the tables is encrypted. Use __sme_set() to adjust + * the page table pointers in the tree. This only works with + * CONFIG_AMD_MEM_ENCRYPT. + */ + PT_FEAT_AMDV1_ENCRYPT_TABLES = PT_FEAT_FMT_START, + /* + * The PTEs are set to prevent cache incoherent traffic, such as PCI no + * snoop. This is set either at creation time or before the first map + * operation. + */ + PT_FEAT_AMDV1_FORCE_COHERENCE, +}; + +struct pt_vtdss { + struct pt_common common; +}; + +enum { + /* + * The PTEs are set to prevent cache incoherent traffic, such as PCI no + * snoop. This is set either at creation time or before the first map + * operation. + */ + PT_FEAT_VTDSS_FORCE_COHERENCE = PT_FEAT_FMT_START, + /* + * Prevent creating read-only PTEs. Used to work around HW errata + * ERRATA_772415_SPR17. + */ + PT_FEAT_VTDSS_FORCE_WRITEABLE, +}; + +struct pt_x86_64 { + struct pt_common common; +}; + +enum { + /* + * The memory backing the tables is encrypted. Use __sme_set() to adjust + * the page table pointers in the tree. This only works with + * CONFIG_AMD_MEM_ENCRYPT. + */ + PT_FEAT_X86_64_AMD_ENCRYPT_TABLES = PT_FEAT_FMT_START, +}; + +#endif diff --git a/include/linux/generic_pt/iommu.h b/include/linux/generic_pt/iommu.h new file mode 100644 index 000000000000..9eefbb74efd0 --- /dev/null +++ b/include/linux/generic_pt/iommu.h @@ -0,0 +1,293 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES + */ +#ifndef __GENERIC_PT_IOMMU_H +#define __GENERIC_PT_IOMMU_H + +#include <linux/generic_pt/common.h> +#include <linux/iommu.h> +#include <linux/mm_types.h> + +struct iommu_iotlb_gather; +struct pt_iommu_ops; +struct pt_iommu_driver_ops; +struct iommu_dirty_bitmap; + +/** + * DOC: IOMMU Radix Page Table + * + * The IOMMU implementation of the Generic Page Table provides an ops struct + * that is useful to go with an iommu_domain to serve the DMA API, IOMMUFD and + * the generic map/unmap interface. + * + * This interface uses a caller provided locking approach. The caller must have + * a VA range lock concept that prevents concurrent threads from calling ops on + * the same VA. Generally the range lock must be at least as large as a single + * map call. + */ + +/** + * struct pt_iommu - Base structure for IOMMU page tables + * + * The format-specific struct will include this as the first member. + */ +struct pt_iommu { + /** + * @domain: The core IOMMU domain. The driver should use a union to + * overlay this memory with its previously existing domain struct to + * create an alias. + */ + struct iommu_domain domain; + + /** + * @ops: Function pointers to access the API + */ + const struct pt_iommu_ops *ops; + + /** + * @driver_ops: Function pointers provided by the HW driver to help + * manage HW details like caches. + */ + const struct pt_iommu_driver_ops *driver_ops; + + /** + * @nid: Node ID to use for table memory allocations. The IOMMU driver + * may want to set the NID to the device's NID, if there are multiple + * table walkers. + */ + int nid; + + /** + * @iommu_device: Device pointer used for any DMA cache flushing when + * PT_FEAT_DMA_INCOHERENT. This is the iommu device that created the + * page table which must have dma ops that perform cache flushing. + */ + struct device *iommu_device; +}; + +/** + * struct pt_iommu_info - Details about the IOMMU page table + * + * Returned from pt_iommu_ops->get_info() + */ +struct pt_iommu_info { + /** + * @pgsize_bitmap: A bitmask where each set bit indicates + * a page size that can be natively stored in the page table. + */ + u64 pgsize_bitmap; +}; + +struct pt_iommu_ops { + /** + * @set_dirty: Make the iova write dirty + * @iommu_table: Table to manipulate + * @iova: IO virtual address to start + * + * This is only used by iommufd testing. It makes the iova dirty so that + * read_and_clear_dirty() will see it as dirty. Unlike all the other ops + * this one is safe to call without holding any locking. It may return + * -EAGAIN if there is a race. + */ + int (*set_dirty)(struct pt_iommu *iommu_table, dma_addr_t iova); + + /** + * @get_info: Return the pt_iommu_info structure + * @iommu_table: Table to query + * + * Return some basic static information about the page table. + */ + void (*get_info)(struct pt_iommu *iommu_table, + struct pt_iommu_info *info); + + /** + * @deinit: Undo a format specific init operation + * @iommu_table: Table to destroy + * + * Release all of the memory. The caller must have already removed the + * table from all HW access and all caches. + */ + void (*deinit)(struct pt_iommu *iommu_table); +}; + +/** + * struct pt_iommu_driver_ops - HW IOTLB cache flushing operations + * + * The IOMMU driver should implement these using container_of(iommu_table) to + * get to it's iommu_domain derived structure. All ops can be called in atomic + * contexts as they are buried under DMA API calls. + */ +struct pt_iommu_driver_ops { + /** + * @change_top: Update the top of table pointer + * @iommu_table: Table to operate on + * @top_paddr: New CPU physical address of the top pointer + * @top_level: IOMMU PT level of the new top + * + * Called under the get_top_lock() spinlock. The driver must update all + * HW references to this domain with a new top address and + * configuration. On return mappings placed in the new top must be + * reachable by the HW. + * + * top_level encodes the level in IOMMU PT format, level 0 is the + * smallest page size increasing from there. This has to be translated + * to any HW specific format. During this call the new top will not be + * visible to any other API. + * + * This op is only used by PT_FEAT_DYNAMIC_TOP, and is required if + * enabled. + */ + void (*change_top)(struct pt_iommu *iommu_table, phys_addr_t top_paddr, + unsigned int top_level); + + /** + * @get_top_lock: lock to hold when changing the table top + * @iommu_table: Table to operate on + * + * Return a lock to hold when changing the table top page table from + * being stored in HW. The lock will be held prior to calling + * change_top() and released once the top is fully visible. + * + * Typically this would be a lock that protects the iommu_domain's + * attachment list. + * + * This op is only used by PT_FEAT_DYNAMIC_TOP, and is required if + * enabled. + */ + spinlock_t *(*get_top_lock)(struct pt_iommu *iommu_table); +}; + +static inline void pt_iommu_deinit(struct pt_iommu *iommu_table) +{ + /* + * It is safe to call pt_iommu_deinit() before an init, or if init + * fails. The ops pointer will only become non-NULL if deinit needs to be + * run. + */ + if (iommu_table->ops) + iommu_table->ops->deinit(iommu_table); +} + +/** + * struct pt_iommu_cfg - Common configuration values for all formats + */ +struct pt_iommu_cfg { + /** + * @features: Features required. Only these features will be turned on. + * The feature list should reflect what the IOMMU HW is capable of. + */ + unsigned int features; + /** + * @hw_max_vasz_lg2: Maximum VA the IOMMU HW can support. This will + * imply the top level of the table. + */ + u8 hw_max_vasz_lg2; + /** + * @hw_max_oasz_lg2: Maximum OA the IOMMU HW can support. The format + * might select a lower maximum OA. + */ + u8 hw_max_oasz_lg2; +}; + +/* Generate the exported function signatures from iommu_pt.h */ +#define IOMMU_PROTOTYPES(fmt) \ + phys_addr_t pt_iommu_##fmt##_iova_to_phys(struct iommu_domain *domain, \ + dma_addr_t iova); \ + int pt_iommu_##fmt##_map_pages(struct iommu_domain *domain, \ + unsigned long iova, phys_addr_t paddr, \ + size_t pgsize, size_t pgcount, \ + int prot, gfp_t gfp, size_t *mapped); \ + size_t pt_iommu_##fmt##_unmap_pages( \ + struct iommu_domain *domain, unsigned long iova, \ + size_t pgsize, size_t pgcount, \ + struct iommu_iotlb_gather *iotlb_gather); \ + int pt_iommu_##fmt##_read_and_clear_dirty( \ + struct iommu_domain *domain, unsigned long iova, size_t size, \ + unsigned long flags, struct iommu_dirty_bitmap *dirty); \ + int pt_iommu_##fmt##_init(struct pt_iommu_##fmt *table, \ + const struct pt_iommu_##fmt##_cfg *cfg, \ + gfp_t gfp); \ + void pt_iommu_##fmt##_hw_info(struct pt_iommu_##fmt *table, \ + struct pt_iommu_##fmt##_hw_info *info) +#define IOMMU_FORMAT(fmt, member) \ + struct pt_iommu_##fmt { \ + struct pt_iommu iommu; \ + struct pt_##fmt member; \ + }; \ + IOMMU_PROTOTYPES(fmt) + +/* + * A driver uses IOMMU_PT_DOMAIN_OPS to populate the iommu_domain_ops for the + * iommu_pt + */ +#define IOMMU_PT_DOMAIN_OPS(fmt) \ + .iova_to_phys = &pt_iommu_##fmt##_iova_to_phys, \ + .map_pages = &pt_iommu_##fmt##_map_pages, \ + .unmap_pages = &pt_iommu_##fmt##_unmap_pages +#define IOMMU_PT_DIRTY_OPS(fmt) \ + .read_and_clear_dirty = &pt_iommu_##fmt##_read_and_clear_dirty + +/* + * The driver should setup its domain struct like + * union { + * struct iommu_domain domain; + * struct pt_iommu_xxx xx; + * }; + * PT_IOMMU_CHECK_DOMAIN(struct mock_iommu_domain, xx.iommu, domain); + * + * Which creates an alias between driver_domain.domain and + * driver_domain.xx.iommu.domain. This is to avoid a mass rename of existing + * driver_domain.domain users. + */ +#define PT_IOMMU_CHECK_DOMAIN(s, pt_iommu_memb, domain_memb) \ + static_assert(offsetof(s, pt_iommu_memb.domain) == \ + offsetof(s, domain_memb)) + +struct pt_iommu_amdv1_cfg { + struct pt_iommu_cfg common; + unsigned int starting_level; +}; + +struct pt_iommu_amdv1_hw_info { + u64 host_pt_root; + u8 mode; +}; + +IOMMU_FORMAT(amdv1, amdpt); + +/* amdv1_mock is used by the iommufd selftest */ +#define pt_iommu_amdv1_mock pt_iommu_amdv1 +#define pt_iommu_amdv1_mock_cfg pt_iommu_amdv1_cfg +struct pt_iommu_amdv1_mock_hw_info; +IOMMU_PROTOTYPES(amdv1_mock); + +struct pt_iommu_vtdss_cfg { + struct pt_iommu_cfg common; + /* 4 is a 57 bit 5 level table */ + unsigned int top_level; +}; + +struct pt_iommu_vtdss_hw_info { + u64 ssptptr; + u8 aw; +}; + +IOMMU_FORMAT(vtdss, vtdss_pt); + +struct pt_iommu_x86_64_cfg { + struct pt_iommu_cfg common; + /* 4 is a 57 bit 5 level table */ + unsigned int top_level; +}; + +struct pt_iommu_x86_64_hw_info { + u64 gcr3_pt; + u8 levels; +}; + +IOMMU_FORMAT(x86_64, x86_64_pt); + +#undef IOMMU_PROTOTYPES +#undef IOMMU_FORMAT +#endif diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 623bee335383..b155929af5b1 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -387,7 +387,7 @@ extern void free_pages(unsigned long addr, unsigned int order); #define free_page(addr) free_pages((addr), 0) void page_alloc_init_cpuhp(void); -int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp); +bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp); void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); void drain_all_pages(struct zone *zone); void drain_local_pages(struct zone *zone); diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 00df68c51405..cafeb7a40ad1 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -167,10 +167,14 @@ int gpiod_cansleep(const struct gpio_desc *desc); int gpiod_to_irq(const struct gpio_desc *desc); int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name); +bool gpiod_is_shared(const struct gpio_desc *desc); + /* Convert between the old gpio_ and new gpiod_ interfaces */ struct gpio_desc *gpio_to_desc(unsigned gpio); int desc_to_gpio(const struct gpio_desc *desc); +int gpiod_hwgpio(const struct gpio_desc *desc); + struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode, const char *con_id, int index, enum gpiod_flags flags, @@ -520,6 +524,13 @@ static inline int gpiod_set_consumer_name(struct gpio_desc *desc, return -EINVAL; } +static inline bool gpiod_is_shared(const struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(desc); + return false; +} + static inline struct gpio_desc *gpio_to_desc(unsigned gpio) { return NULL; diff --git a/include/linux/gpio/legacy-of-mm-gpiochip.h b/include/linux/gpio/legacy-of-mm-gpiochip.h deleted file mode 100644 index 2e2bd3b19cc3..000000000000 --- a/include/linux/gpio/legacy-of-mm-gpiochip.h +++ /dev/null @@ -1,36 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * OF helpers for the old of_mm_gpio_chip, used on ppc32 and nios2, - * do not use in new code. - * - * Copyright (c) 2007-2008 MontaVista Software, Inc. - * - * Author: Anton Vorontsov <avorontsov@ru.mvista.com> - */ - -#ifndef __LINUX_GPIO_LEGACY_OF_MM_GPIO_CHIP_H -#define __LINUX_GPIO_LEGACY_OF_MM_GPIO_CHIP_H - -#include <linux/gpio/driver.h> -#include <linux/of.h> - -/* - * OF GPIO chip for memory mapped banks - */ -struct of_mm_gpio_chip { - struct gpio_chip gc; - void (*save_regs)(struct of_mm_gpio_chip *mm_gc); - void __iomem *regs; -}; - -static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc) -{ - return container_of(gc, struct of_mm_gpio_chip, gc); -} - -extern int of_mm_gpiochip_add_data(struct device_node *np, - struct of_mm_gpio_chip *mm_gc, - void *data); -extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc); - -#endif /* __LINUX_GPIO_LEGACY_OF_MM_GPIO_CHIP_H */ diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h index 87983a5f3681..12d154732ca9 100644 --- a/include/linux/gpio/regmap.h +++ b/include/linux/gpio/regmap.h @@ -50,8 +50,8 @@ struct regmap; * @regmap_irq_chip: (Optional) Pointer on an regmap_irq_chip structure. If * set, a regmap-irq device will be created and the IRQ * domain will be set accordingly. - * @regmap_irq_line (Optional) The IRQ the device uses to signal interrupts. - * @regmap_irq_flags (Optional) The IRQF_ flags to use for the interrupt. + * @regmap_irq_line: (Optional) The IRQ the device uses to signal interrupts. + * @regmap_irq_flags: (Optional) The IRQF_ flags to use for the interrupt. * * The ->reg_mask_xlate translates a given base address and GPIO offset to * register and mask pair. The base address is one of the given register diff --git a/include/linux/hid.h b/include/linux/hid.h index a4ddb94e3ee5..dce862cafbbd 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -984,6 +984,7 @@ extern void hidinput_hid_event(struct hid_device *, struct hid_field *, struct h extern void hidinput_report_event(struct hid_device *hid, struct hid_report *report); extern int hidinput_connect(struct hid_device *hid, unsigned int force); extern void hidinput_disconnect(struct hid_device *); +void hidinput_reset_resume(struct hid_device *hid); struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type, unsigned int application, unsigned int usage); diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index c4690e365ade..ca1ec437a3ca 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -99,6 +99,9 @@ #define QM_DEV_ALG_MAX_LEN 256 +#define QM_MIG_REGION_SEL 0x100198 +#define QM_MIG_REGION_EN BIT(0) + /* uacce mode of the driver */ #define UACCE_MODE_NOUACCE 0 /* don't use uacce */ #define UACCE_MODE_SVA 1 /* use uacce sva mode */ diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 11cab07f322a..ae7f21aad0ac 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -364,20 +364,35 @@ unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long add unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags); +enum split_type { + SPLIT_TYPE_UNIFORM, + SPLIT_TYPE_NON_UNIFORM, +}; + bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins); -int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, +int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list, unsigned int new_order); +int folio_split_unmapped(struct folio *folio, unsigned int new_order); int min_order_for_split(struct folio *folio); int split_folio_to_list(struct folio *folio, struct list_head *list); -bool uniform_split_supported(struct folio *folio, unsigned int new_order, - bool warns); -bool non_uniform_split_supported(struct folio *folio, unsigned int new_order, - bool warns); +bool folio_split_supported(struct folio *folio, unsigned int new_order, + enum split_type split_type, bool warns); int folio_split(struct folio *folio, unsigned int new_order, struct page *page, struct list_head *list); -/* - * try_folio_split_to_order - try to split a @folio at @page to @new_order using - * non uniform split. + +static inline int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, + unsigned int new_order) +{ + return __split_huge_page_to_list_to_order(page, list, new_order); +} +static inline int split_huge_page_to_order(struct page *page, unsigned int new_order) +{ + return split_huge_page_to_list_to_order(page, NULL, new_order); +} + +/** + * try_folio_split_to_order() - try to split a @folio at @page to @new_order + * using non uniform split. * @folio: folio to be split * @page: split to @new_order at the given page * @new_order: the target split order @@ -387,14 +402,13 @@ int folio_split(struct folio *folio, unsigned int new_order, struct page *page, * folios are put back to LRU list. Use min_order_for_split() to get the lower * bound of @new_order. * - * Return: 0: split is successful, otherwise split failed. + * Return: 0 - split is successful, otherwise split failed. */ static inline int try_folio_split_to_order(struct folio *folio, struct page *page, unsigned int new_order) { - if (!non_uniform_split_supported(folio, new_order, /* warns= */ false)) - return split_huge_page_to_list_to_order(&folio->page, NULL, - new_order); + if (!folio_split_supported(folio, new_order, SPLIT_TYPE_NON_UNIFORM, /* warns= */ false)) + return split_huge_page_to_order(&folio->page, new_order); return folio_split(folio, new_order, page, NULL); } static inline int split_huge_page(struct page *page) @@ -402,14 +416,43 @@ static inline int split_huge_page(struct page *page) return split_huge_page_to_list_to_order(page, NULL, 0); } void deferred_split_folio(struct folio *folio, bool partially_mapped); +#ifdef CONFIG_MEMCG +void reparent_deferred_split_queue(struct mem_cgroup *memcg); +#endif void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long address, bool freeze); +/** + * pmd_is_huge() - Is this PMD either a huge PMD entry or a software leaf entry? + * @pmd: The PMD to check. + * + * A huge PMD entry is a non-empty entry which is present and marked huge or a + * software leaf entry. This check be performed without the appropriate locks + * held, in which case the condition should be rechecked after they are + * acquired. + * + * Returns: true if this PMD is huge, false otherwise. + */ +static inline bool pmd_is_huge(pmd_t pmd) +{ + if (pmd_present(pmd)) { + return pmd_trans_huge(pmd); + } else if (!pmd_none(pmd)) { + /* + * Non-present PMDs must be valid huge non-present entries. We + * cannot assert that here due to header dependency issues. + */ + return true; + } + + return false; +} + #define split_huge_pmd(__vma, __pmd, __address) \ do { \ pmd_t *____pmd = (__pmd); \ - if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)) \ + if (pmd_is_huge(*____pmd)) \ __split_huge_pmd(__vma, __pmd, __address, \ false); \ } while (0) @@ -447,19 +490,14 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma); spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma); -static inline int is_swap_pmd(pmd_t pmd) -{ - return !pmd_none(pmd) && !pmd_present(pmd); -} - /* mmap_lock must be held on entry */ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) { - if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd)) + if (pmd_is_huge(*pmd)) return __pmd_trans_huge_lock(pmd, vma); - else - return NULL; + + return NULL; } static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) @@ -473,6 +511,8 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, /** * folio_test_pmd_mappable - Can we map this folio with a PMD? * @folio: The folio to test + * + * Return: true - @folio can be mapped, false - @folio cannot be mapped. */ static inline bool folio_test_pmd_mappable(struct folio *folio) { @@ -481,6 +521,8 @@ static inline bool folio_test_pmd_mappable(struct folio *folio) vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf); +vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf); + extern struct folio *huge_zero_folio; extern unsigned long huge_zero_pfn; @@ -524,6 +566,8 @@ void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, bool freeze); bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp, struct folio *folio); +void map_anon_folio_pmd_nopf(struct folio *folio, pmd_t *pmd, + struct vm_area_struct *vma, unsigned long haddr); #else /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -576,6 +620,11 @@ split_huge_page_to_list_to_order(struct page *page, struct list_head *list, VM_WARN_ON_ONCE_PAGE(1, page); return -EINVAL; } +static inline int split_huge_page_to_order(struct page *page, unsigned int new_order) +{ + VM_WARN_ON_ONCE_PAGE(1, page); + return -EINVAL; +} static inline int split_huge_page(struct page *page) { VM_WARN_ON_ONCE_PAGE(1, page); @@ -602,6 +651,7 @@ static inline int try_folio_split_to_order(struct folio *folio, } static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {} +static inline void reparent_deferred_split_queue(struct mem_cgroup *memcg) {} #define split_huge_pmd(__vma, __pmd, __address) \ do { } while (0) @@ -642,10 +692,6 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, struct vm_area_struct *next) { } -static inline int is_swap_pmd(pmd_t pmd) -{ - return 0; -} static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) { @@ -662,6 +708,11 @@ static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) return 0; } +static inline vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf) +{ + return 0; +} + static inline bool is_huge_zero_folio(const struct folio *folio) { return false; @@ -682,12 +733,6 @@ static inline void mm_put_huge_zero_folio(struct mm_struct *mm) return; } -static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, - unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) -{ - return NULL; -} - static inline bool thp_migration_supported(void) { return false; @@ -720,6 +765,11 @@ static inline struct folio *get_persistent_huge_zero_folio(void) { return NULL; } + +static inline bool pmd_is_huge(pmd_t pmd) +{ + return false; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline int split_folio_to_list_to_order(struct folio *folio, diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 8e63e46b8e1f..019a1c5281e4 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -150,8 +150,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, struct folio **foliop); #endif /* CONFIG_USERFAULTFD */ long hugetlb_reserve_pages(struct inode *inode, long from, long to, - struct vm_area_struct *vma, - vm_flags_t vm_flags); + struct vm_area_desc *desc, vm_flags_t vm_flags); long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long freed); bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list); @@ -172,7 +171,7 @@ bool hugetlbfs_pagecache_present(struct hstate *h, struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio); -extern int sysctl_hugetlb_shm_group; +extern int sysctl_hugetlb_shm_group __read_mostly; extern struct list_head huge_boot_pages[MAX_NUMNODES]; void hugetlb_bootmem_alloc(void); @@ -275,11 +274,10 @@ void hugetlb_vma_lock_release(struct kref *kref); long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot, unsigned long cp_flags); -bool is_hugetlb_entry_migration(pte_t pte); -bool is_hugetlb_entry_hwpoisoned(pte_t pte); void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); void fixup_hugetlb_reservations(struct vm_area_struct *vma); void hugetlb_split(struct vm_area_struct *vma, unsigned long addr); +int hugetlb_vma_lock_alloc(struct vm_area_struct *vma); #else /* !CONFIG_HUGETLB_PAGE */ @@ -466,6 +464,11 @@ static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma) static inline void hugetlb_split(struct vm_area_struct *vma, unsigned long addr) {} +static inline int hugetlb_vma_lock_alloc(struct vm_area_struct *vma) +{ + return 0; +} + #endif /* !CONFIG_HUGETLB_PAGE */ #ifndef pgd_write diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h index 0660a03d37d9..a27aa0162918 100644 --- a/include/linux/hugetlb_inline.h +++ b/include/linux/hugetlb_inline.h @@ -2,22 +2,27 @@ #ifndef _LINUX_HUGETLB_INLINE_H #define _LINUX_HUGETLB_INLINE_H -#ifdef CONFIG_HUGETLB_PAGE - #include <linux/mm.h> -static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) +#ifdef CONFIG_HUGETLB_PAGE + +static inline bool is_vm_hugetlb_flags(vm_flags_t vm_flags) { - return !!(vma->vm_flags & VM_HUGETLB); + return !!(vm_flags & VM_HUGETLB); } #else -static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) +static inline bool is_vm_hugetlb_flags(vm_flags_t vm_flags) { return false; } #endif +static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) +{ + return is_vm_hugetlb_flags(vma->vm_flags); +} + #endif diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h index dfbf7d9d7bb5..2cd4f65aaa37 100644 --- a/include/linux/intel-ish-client-if.h +++ b/include/linux/intel-ish-client-if.h @@ -87,6 +87,8 @@ bool ishtp_wait_resume(struct ishtp_device *dev); ishtp_print_log ishtp_trace_callback(struct ishtp_cl_device *cl_device); /* Get device pointer of PCI device for DMA acces */ struct device *ishtp_get_pci_device(struct ishtp_cl_device *cl_device); +/* Get the ISHTP workqueue */ +struct workqueue_struct *ishtp_get_workqueue(struct ishtp_cl_device *cl_device); struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device); void ishtp_cl_free(struct ishtp_cl *cl); @@ -107,6 +109,7 @@ struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl); void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size); void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size); void ishtp_set_connection_state(struct ishtp_cl *cl, int state); +int ishtp_get_connection_state(struct ishtp_cl *cl); void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id); void ishtp_put_device(struct ishtp_cl_device *cl_dev); diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 8a823c6f2b4a..7a1516011ccf 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -15,8 +15,6 @@ enum io_pgtable_fmt { ARM_64_LPAE_S2, ARM_V7S, ARM_MALI_LPAE, - AMD_IOMMU_V1, - AMD_IOMMU_V2, APPLE_DART, APPLE_DART2, IO_PGTABLE_NUM_FMTS, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index c30d12e16473..8c66284a91a8 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -751,7 +751,8 @@ struct iommu_ops { * @free: Release the domain after use. */ struct iommu_domain_ops { - int (*attach_dev)(struct iommu_domain *domain, struct device *dev); + int (*attach_dev)(struct iommu_domain *domain, struct device *dev, + struct iommu_domain *old); int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev, ioasid_t pasid, struct iommu_domain *old); @@ -1134,7 +1135,9 @@ struct iommu_sva { struct iommu_mm_data { u32 pasid; + struct mm_struct *mm; struct list_head sva_domains; + struct list_head mm_list_elm; }; int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode); @@ -1615,6 +1618,7 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm); void iommu_sva_unbind_device(struct iommu_sva *handle); u32 iommu_sva_get_pasid(struct iommu_sva *handle); +void iommu_sva_invalidate_kva_range(unsigned long start, unsigned long end); #else static inline struct iommu_sva * iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) @@ -1639,6 +1643,7 @@ static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm) } static inline void mm_pasid_drop(struct mm_struct *mm) {} +static inline void iommu_sva_invalidate_kva_range(unsigned long start, unsigned long end) {} #endif /* CONFIG_IOMMU_SVA */ #ifdef CONFIG_IOMMU_IOPF diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 2223f95079ce..d45fa19f9e47 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -86,7 +86,13 @@ #define GICH_HCR_EN (1 << 0) #define GICH_HCR_UIE (1 << 1) +#define GICH_HCR_LRENPIE (1 << 2) #define GICH_HCR_NPIE (1 << 3) +#define GICH_HCR_VGrp0EIE (1 << 4) +#define GICH_HCR_VGrp0DIE (1 << 5) +#define GICH_HCR_VGrp1EIE (1 << 6) +#define GICH_HCR_VGrp1DIE (1 << 7) +#define GICH_HCR_EOICOUNT GENMASK(31, 27) #define GICH_LR_VIRTUALID (0x3ff << 0) #define GICH_LR_PHYSID_CPUID_SHIFT (10) diff --git a/include/linux/irqchip/arm-vgic-info.h b/include/linux/irqchip/arm-vgic-info.h index a470a73a805a..67d9d960273b 100644 --- a/include/linux/irqchip/arm-vgic-info.h +++ b/include/linux/irqchip/arm-vgic-info.h @@ -24,6 +24,8 @@ struct gic_kvm_info { enum gic_type type; /* Virtual CPU interface */ struct resource vcpu; + /* GICv2 GICC VA */ + void __iomem *gicc_base; /* Interrupt number */ unsigned int maint_irq; /* No interrupt mask, no need to use the above field */ diff --git a/include/linux/irqchip/riscv-imsic.h b/include/linux/irqchip/riscv-imsic.h index 7494952c5518..7f3ff5c5ea53 100644 --- a/include/linux/irqchip/riscv-imsic.h +++ b/include/linux/irqchip/riscv-imsic.h @@ -10,7 +10,6 @@ #include <linux/bitops.h> #include <linux/device.h> #include <linux/fwnode.h> -#include <asm/csr.h> #define IMSIC_MMIO_PAGE_SHIFT 12 #define IMSIC_MMIO_PAGE_SZ BIT(IMSIC_MMIO_PAGE_SHIFT) @@ -86,7 +85,7 @@ static inline const struct imsic_global_config *imsic_get_global_config(void) #endif -#ifdef CONFIG_ACPI +#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_RISCV_IMSIC) int imsic_platform_acpi_probe(struct fwnode_handle *fwnode); struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev); #else diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 0d1927da8055..fdef2c155c27 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -611,4 +611,16 @@ extern unsigned long nsecs_to_jiffies(u64 n); #define TIMESTAMP_SIZE 30 +struct ctl_table; +int proc_dointvec_jiffies(const struct ctl_table *table, int dir, void *buffer, + size_t *lenp, loff_t *ppos); +int proc_dointvec_ms_jiffies_minmax(const struct ctl_table *table, int dir, + void *buffer, size_t *lenp, loff_t *ppos); +int proc_dointvec_userhz_jiffies(const struct ctl_table *table, int dir, + void *buffer, size_t *lenp, loff_t *ppos); +int proc_dointvec_ms_jiffies(const struct ctl_table *table, int dir, void *buffer, + size_t *lenp, loff_t *ppos); +int proc_doulongvec_ms_jiffies_minmax(const struct ctl_table *table, int dir, + void *buffer, size_t *lenp, loff_t *ppos); + #endif diff --git a/include/linux/kasan.h b/include/linux/kasan.h index d12e1a5f5a9a..f335c1d7b61d 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -571,11 +571,27 @@ static inline void kasan_init_hw_tags(void) { } #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); -int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask); -void kasan_release_vmalloc(unsigned long start, unsigned long end, +int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask); +static inline int kasan_populate_vmalloc(unsigned long addr, + unsigned long size, gfp_t gfp_mask) +{ + if (kasan_enabled()) + return __kasan_populate_vmalloc(addr, size, gfp_mask); + return 0; +} +void __kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, unsigned long free_region_end, unsigned long flags); +static inline void kasan_release_vmalloc(unsigned long start, unsigned long end, + unsigned long free_region_start, + unsigned long free_region_end, + unsigned long flags) +{ + if (kasan_enabled()) + return __kasan_release_vmalloc(start, end, free_region_start, + free_region_end, flags); +} #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h index f2fd221107bb..7da9fd506b39 100644 --- a/include/linux/kmsan.h +++ b/include/linux/kmsan.h @@ -133,6 +133,7 @@ void kmsan_kfree_large(const void *ptr); * @prot: page protection flags used for vmap. * @pages: array of pages. * @page_shift: page_shift passed to vmap_range_noflush(). + * @gfp_mask: gfp_mask to use internally. * * KMSAN maps shadow and origin pages of @pages into contiguous ranges in * vmalloc metadata address range. Returns 0 on success, callers must check @@ -142,7 +143,8 @@ int __must_check kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end, pgprot_t prot, struct page **pages, - unsigned int page_shift); + unsigned int page_shift, + gfp_t gfp_mask); /** * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap. @@ -347,7 +349,7 @@ static inline void kmsan_kfree_large(const void *ptr) static inline int __must_check kmsan_vmap_pages_range_noflush( unsigned long start, unsigned long end, pgprot_t prot, - struct page **pages, unsigned int page_shift) + struct page **pages, unsigned int page_shift, gfp_t gfp_mask) { return 0; } diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 067538fc4d58..c982694c987b 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -17,7 +17,7 @@ #ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, vm_flags_t *vm_flags); -vm_flags_t ksm_vma_flags(const struct mm_struct *mm, const struct file *file, +vm_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file, vm_flags_t vm_flags); int ksm_enable_merge_any(struct mm_struct *mm); int ksm_disable_merge_any(struct mm_struct *mm); @@ -103,7 +103,7 @@ bool ksm_process_mergeable(struct mm_struct *mm); #else /* !CONFIG_KSM */ -static inline vm_flags_t ksm_vma_flags(const struct mm_struct *mm, +static inline vm_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file, vm_flags_t vm_flags) { return vm_flags; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 5bd76cf394fa..d93f75b05ae2 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1557,6 +1557,8 @@ long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); +long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg); vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); @@ -2437,18 +2439,6 @@ static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) } #endif /* CONFIG_HAVE_KVM_NO_POLL */ -#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL -long kvm_arch_vcpu_async_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg); -#else -static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, - unsigned int ioctl, - unsigned long arg) -{ - return -ENOIOCTLCMD; -} -#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ - void kvm_arch_guest_memory_reclaimed(struct kvm *kvm); #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE diff --git a/include/linux/leafops.h b/include/linux/leafops.h new file mode 100644 index 000000000000..cfafe7a5e7b1 --- /dev/null +++ b/include/linux/leafops.h @@ -0,0 +1,619 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Describes operations that can be performed on software-defined page table + * leaf entries. These are abstracted from the hardware page table entries + * themselves by the softleaf_t type, see mm_types.h. + */ +#ifndef _LINUX_LEAFOPS_H +#define _LINUX_LEAFOPS_H + +#include <linux/mm_types.h> +#include <linux/swapops.h> +#include <linux/swap.h> + +#ifdef CONFIG_MMU + +/* Temporary until swp_entry_t eliminated. */ +#define LEAF_TYPE_SHIFT SWP_TYPE_SHIFT + +enum softleaf_type { + /* Fundamental types. */ + SOFTLEAF_NONE, + SOFTLEAF_SWAP, + /* Migration types. */ + SOFTLEAF_MIGRATION_READ, + SOFTLEAF_MIGRATION_READ_EXCLUSIVE, + SOFTLEAF_MIGRATION_WRITE, + /* Device types. */ + SOFTLEAF_DEVICE_PRIVATE_READ, + SOFTLEAF_DEVICE_PRIVATE_WRITE, + SOFTLEAF_DEVICE_EXCLUSIVE, + /* H/W posion types. */ + SOFTLEAF_HWPOISON, + /* Marker types. */ + SOFTLEAF_MARKER, +}; + +/** + * softleaf_mk_none() - Create an empty ('none') leaf entry. + * Returns: empty leaf entry. + */ +static inline softleaf_t softleaf_mk_none(void) +{ + return ((softleaf_t) { 0 }); +} + +/** + * softleaf_from_pte() - Obtain a leaf entry from a PTE entry. + * @pte: PTE entry. + * + * If @pte is present (therefore not a leaf entry) the function returns an empty + * leaf entry. Otherwise, it returns a leaf entry. + * + * Returns: Leaf entry. + */ +static inline softleaf_t softleaf_from_pte(pte_t pte) +{ + softleaf_t arch_entry; + + if (pte_present(pte) || pte_none(pte)) + return softleaf_mk_none(); + + pte = pte_swp_clear_flags(pte); + arch_entry = __pte_to_swp_entry(pte); + + /* Temporary until swp_entry_t eliminated. */ + return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); +} + +/** + * softleaf_to_pte() - Obtain a PTE entry from a leaf entry. + * @entry: Leaf entry. + * + * This generates an architecture-specific PTE entry that can be utilised to + * encode the metadata the leaf entry encodes. + * + * Returns: Architecture-specific PTE entry encoding leaf entry. + */ +static inline pte_t softleaf_to_pte(softleaf_t entry) +{ + /* Temporary until swp_entry_t eliminated. */ + return swp_entry_to_pte(entry); +} + +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION +/** + * softleaf_from_pmd() - Obtain a leaf entry from a PMD entry. + * @pmd: PMD entry. + * + * If @pmd is present (therefore not a leaf entry) the function returns an empty + * leaf entry. Otherwise, it returns a leaf entry. + * + * Returns: Leaf entry. + */ +static inline softleaf_t softleaf_from_pmd(pmd_t pmd) +{ + softleaf_t arch_entry; + + if (pmd_present(pmd) || pmd_none(pmd)) + return softleaf_mk_none(); + + if (pmd_swp_soft_dirty(pmd)) + pmd = pmd_swp_clear_soft_dirty(pmd); + if (pmd_swp_uffd_wp(pmd)) + pmd = pmd_swp_clear_uffd_wp(pmd); + arch_entry = __pmd_to_swp_entry(pmd); + + /* Temporary until swp_entry_t eliminated. */ + return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); +} + +#else + +static inline softleaf_t softleaf_from_pmd(pmd_t pmd) +{ + return softleaf_mk_none(); +} + +#endif + +/** + * softleaf_is_none() - Is the leaf entry empty? + * @entry: Leaf entry. + * + * Empty entries are typically the result of a 'none' page table leaf entry + * being converted to a leaf entry. + * + * Returns: true if the entry is empty, false otherwise. + */ +static inline bool softleaf_is_none(softleaf_t entry) +{ + return entry.val == 0; +} + +/** + * softleaf_type() - Identify the type of leaf entry. + * @enntry: Leaf entry. + * + * Returns: the leaf entry type associated with @entry. + */ +static inline enum softleaf_type softleaf_type(softleaf_t entry) +{ + unsigned int type_num; + + if (softleaf_is_none(entry)) + return SOFTLEAF_NONE; + + type_num = entry.val >> LEAF_TYPE_SHIFT; + + if (type_num < MAX_SWAPFILES) + return SOFTLEAF_SWAP; + + switch (type_num) { +#ifdef CONFIG_MIGRATION + case SWP_MIGRATION_READ: + return SOFTLEAF_MIGRATION_READ; + case SWP_MIGRATION_READ_EXCLUSIVE: + return SOFTLEAF_MIGRATION_READ_EXCLUSIVE; + case SWP_MIGRATION_WRITE: + return SOFTLEAF_MIGRATION_WRITE; +#endif +#ifdef CONFIG_DEVICE_PRIVATE + case SWP_DEVICE_WRITE: + return SOFTLEAF_DEVICE_PRIVATE_WRITE; + case SWP_DEVICE_READ: + return SOFTLEAF_DEVICE_PRIVATE_READ; + case SWP_DEVICE_EXCLUSIVE: + return SOFTLEAF_DEVICE_EXCLUSIVE; +#endif +#ifdef CONFIG_MEMORY_FAILURE + case SWP_HWPOISON: + return SOFTLEAF_HWPOISON; +#endif + case SWP_PTE_MARKER: + return SOFTLEAF_MARKER; + } + + /* Unknown entry type. */ + VM_WARN_ON_ONCE(1); + return SOFTLEAF_NONE; +} + +/** + * softleaf_is_swap() - Is this leaf entry a swap entry? + * @entry: Leaf entry. + * + * Returns: true if the leaf entry is a swap entry, otherwise false. + */ +static inline bool softleaf_is_swap(softleaf_t entry) +{ + return softleaf_type(entry) == SOFTLEAF_SWAP; +} + +/** + * softleaf_is_migration_write() - Is this leaf entry a writable migration entry? + * @entry: Leaf entry. + * + * Returns: true if the leaf entry is a writable migration entry, otherwise + * false. + */ +static inline bool softleaf_is_migration_write(softleaf_t entry) +{ + return softleaf_type(entry) == SOFTLEAF_MIGRATION_WRITE; +} + +/** + * softleaf_is_migration_read() - Is this leaf entry a readable migration entry? + * @entry: Leaf entry. + * + * Returns: true if the leaf entry is a readable migration entry, otherwise + * false. + */ +static inline bool softleaf_is_migration_read(softleaf_t entry) +{ + return softleaf_type(entry) == SOFTLEAF_MIGRATION_READ; +} + +/** + * softleaf_is_migration_read_exclusive() - Is this leaf entry an exclusive + * readable migration entry? + * @entry: Leaf entry. + * + * Returns: true if the leaf entry is an exclusive readable migration entry, + * otherwise false. + */ +static inline bool softleaf_is_migration_read_exclusive(softleaf_t entry) +{ + return softleaf_type(entry) == SOFTLEAF_MIGRATION_READ_EXCLUSIVE; +} + +/** + * softleaf_is_migration() - Is this leaf entry a migration entry? + * @entry: Leaf entry. + * + * Returns: true if the leaf entry is a migration entry, otherwise false. + */ +static inline bool softleaf_is_migration(softleaf_t entry) +{ + switch (softleaf_type(entry)) { + case SOFTLEAF_MIGRATION_READ: + case SOFTLEAF_MIGRATION_READ_EXCLUSIVE: + case SOFTLEAF_MIGRATION_WRITE: + return true; + default: + return false; + } +} + +/** + * softleaf_is_device_private_write() - Is this leaf entry a device private + * writable entry? + * @entry: Leaf entry. + * + * Returns: true if the leaf entry is a device private writable entry, otherwise + * false. + */ +static inline bool softleaf_is_device_private_write(softleaf_t entry) +{ + return softleaf_type(entry) == SOFTLEAF_DEVICE_PRIVATE_WRITE; +} + +/** + * softleaf_is_device_private() - Is this leaf entry a device private entry? + * @entry: Leaf entry. + * + * Returns: true if the leaf entry is a device private entry, otherwise false. + */ +static inline bool softleaf_is_device_private(softleaf_t entry) +{ + switch (softleaf_type(entry)) { + case SOFTLEAF_DEVICE_PRIVATE_WRITE: + case SOFTLEAF_DEVICE_PRIVATE_READ: + return true; + default: + return false; + } +} + +/** + * softleaf_is_device_exclusive() - Is this leaf entry a device-exclusive entry? + * @entry: Leaf entry. + * + * Returns: true if the leaf entry is a device-exclusive entry, otherwise false. + */ +static inline bool softleaf_is_device_exclusive(softleaf_t entry) +{ + return softleaf_type(entry) == SOFTLEAF_DEVICE_EXCLUSIVE; +} + +/** + * softleaf_is_hwpoison() - Is this leaf entry a hardware poison entry? + * @entry: Leaf entry. + * + * Returns: true if the leaf entry is a hardware poison entry, otherwise false. + */ +static inline bool softleaf_is_hwpoison(softleaf_t entry) +{ + return softleaf_type(entry) == SOFTLEAF_HWPOISON; +} + +/** + * softleaf_is_marker() - Is this leaf entry a marker? + * @entry: Leaf entry. + * + * Returns: true if the leaf entry is a marker entry, otherwise false. + */ +static inline bool softleaf_is_marker(softleaf_t entry) +{ + return softleaf_type(entry) == SOFTLEAF_MARKER; +} + +/** + * softleaf_to_marker() - Obtain marker associated with leaf entry. + * @entry: Leaf entry, softleaf_is_marker(@entry) must return true. + * + * Returns: Marker associated with the leaf entry. + */ +static inline pte_marker softleaf_to_marker(softleaf_t entry) +{ + VM_WARN_ON_ONCE(!softleaf_is_marker(entry)); + + return swp_offset(entry) & PTE_MARKER_MASK; +} + +/** + * softleaf_has_pfn() - Does this leaf entry encode a valid PFN number? + * @entry: Leaf entry. + * + * A pfn swap entry is a special type of swap entry that always has a pfn stored + * in the swap offset. They can either be used to represent unaddressable device + * memory, to restrict access to a page undergoing migration or to represent a + * pfn which has been hwpoisoned and unmapped. + * + * Returns: true if the leaf entry encodes a PFN, otherwise false. + */ +static inline bool softleaf_has_pfn(softleaf_t entry) +{ + /* Make sure the swp offset can always store the needed fields. */ + BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS); + + if (softleaf_is_migration(entry)) + return true; + if (softleaf_is_device_private(entry)) + return true; + if (softleaf_is_device_exclusive(entry)) + return true; + if (softleaf_is_hwpoison(entry)) + return true; + + return false; +} + +/** + * softleaf_to_pfn() - Obtain PFN encoded within leaf entry. + * @entry: Leaf entry, softleaf_has_pfn(@entry) must return true. + * + * Returns: The PFN associated with the leaf entry. + */ +static inline unsigned long softleaf_to_pfn(softleaf_t entry) +{ + VM_WARN_ON_ONCE(!softleaf_has_pfn(entry)); + + /* Temporary until swp_entry_t eliminated. */ + return swp_offset(entry) & SWP_PFN_MASK; +} + +/** + * softleaf_to_page() - Obtains struct page for PFN encoded within leaf entry. + * @entry: Leaf entry, softleaf_has_pfn(@entry) must return true. + * + * Returns: Pointer to the struct page associated with the leaf entry's PFN. + */ +static inline struct page *softleaf_to_page(softleaf_t entry) +{ + struct page *page = pfn_to_page(softleaf_to_pfn(entry)); + + VM_WARN_ON_ONCE(!softleaf_has_pfn(entry)); + /* + * Any use of migration entries may only occur while the + * corresponding page is locked + */ + VM_WARN_ON_ONCE(softleaf_is_migration(entry) && !PageLocked(page)); + + return page; +} + +/** + * softleaf_to_folio() - Obtains struct folio for PFN encoded within leaf entry. + * @entry: Leaf entry, softleaf_has_pfn(@entry) must return true. + * + * Returns: Pointer to the struct folio associated with the leaf entry's PFN. + */ +static inline struct folio *softleaf_to_folio(softleaf_t entry) +{ + struct folio *folio = pfn_folio(softleaf_to_pfn(entry)); + + VM_WARN_ON_ONCE(!softleaf_has_pfn(entry)); + /* + * Any use of migration entries may only occur while the + * corresponding folio is locked. + */ + VM_WARN_ON_ONCE(softleaf_is_migration(entry) && + !folio_test_locked(folio)); + + return folio; +} + +/** + * softleaf_is_poison_marker() - Is this leaf entry a poison marker? + * @entry: Leaf entry. + * + * The poison marker is set via UFFDIO_POISON. Userfaultfd-specific. + * + * Returns: true if the leaf entry is a poison marker, otherwise false. + */ +static inline bool softleaf_is_poison_marker(softleaf_t entry) +{ + if (!softleaf_is_marker(entry)) + return false; + + return softleaf_to_marker(entry) & PTE_MARKER_POISONED; +} + +/** + * softleaf_is_guard_marker() - Is this leaf entry a guard region marker? + * @entry: Leaf entry. + * + * Returns: true if the leaf entry is a guard marker, otherwise false. + */ +static inline bool softleaf_is_guard_marker(softleaf_t entry) +{ + if (!softleaf_is_marker(entry)) + return false; + + return softleaf_to_marker(entry) & PTE_MARKER_GUARD; +} + +/** + * softleaf_is_uffd_wp_marker() - Is this leaf entry a userfautlfd write protect + * marker? + * @entry: Leaf entry. + * + * Userfaultfd-specific. + * + * Returns: true if the leaf entry is a UFFD WP marker, otherwise false. + */ +static inline bool softleaf_is_uffd_wp_marker(softleaf_t entry) +{ + if (!softleaf_is_marker(entry)) + return false; + + return softleaf_to_marker(entry) & PTE_MARKER_UFFD_WP; +} + +#ifdef CONFIG_MIGRATION + +/** + * softleaf_is_migration_young() - Does this migration entry contain an accessed + * bit? + * @entry: Leaf entry. + * + * If the architecture can support storing A/D bits in migration entries, this + * determines whether the accessed (or 'young') bit was set on the migrated page + * table entry. + * + * Returns: true if the entry contains an accessed bit, otherwise false. + */ +static inline bool softleaf_is_migration_young(softleaf_t entry) +{ + VM_WARN_ON_ONCE(!softleaf_is_migration(entry)); + + if (migration_entry_supports_ad()) + return swp_offset(entry) & SWP_MIG_YOUNG; + /* Keep the old behavior of aging page after migration */ + return false; +} + +/** + * softleaf_is_migration_dirty() - Does this migration entry contain a dirty bit? + * @entry: Leaf entry. + * + * If the architecture can support storing A/D bits in migration entries, this + * determines whether the dirty bit was set on the migrated page table entry. + * + * Returns: true if the entry contains a dirty bit, otherwise false. + */ +static inline bool softleaf_is_migration_dirty(softleaf_t entry) +{ + VM_WARN_ON_ONCE(!softleaf_is_migration(entry)); + + if (migration_entry_supports_ad()) + return swp_offset(entry) & SWP_MIG_DIRTY; + /* Keep the old behavior of clean page after migration */ + return false; +} + +#else /* CONFIG_MIGRATION */ + +static inline bool softleaf_is_migration_young(softleaf_t entry) +{ + return false; +} + +static inline bool softleaf_is_migration_dirty(softleaf_t entry) +{ + return false; +} +#endif /* CONFIG_MIGRATION */ + +/** + * pte_is_marker() - Does the PTE entry encode a marker leaf entry? + * @pte: PTE entry. + * + * Returns: true if this PTE is a marker leaf entry, otherwise false. + */ +static inline bool pte_is_marker(pte_t pte) +{ + return softleaf_is_marker(softleaf_from_pte(pte)); +} + +/** + * pte_is_uffd_wp_marker() - Does this PTE entry encode a userfaultfd write + * protect marker leaf entry? + * @pte: PTE entry. + * + * Returns: true if this PTE is a UFFD WP marker leaf entry, otherwise false. + */ +static inline bool pte_is_uffd_wp_marker(pte_t pte) +{ + const softleaf_t entry = softleaf_from_pte(pte); + + return softleaf_is_uffd_wp_marker(entry); +} + +/** + * pte_is_uffd_marker() - Does this PTE entry encode a userfault-specific marker + * leaf entry? + * @entry: Leaf entry. + * + * It's useful to be able to determine which leaf entries encode UFFD-specific + * markers so we can handle these correctly. + * + * Returns: true if this PTE entry is a UFFD-specific marker, otherwise false. + */ +static inline bool pte_is_uffd_marker(pte_t pte) +{ + const softleaf_t entry = softleaf_from_pte(pte); + + if (!softleaf_is_marker(entry)) + return false; + + /* UFFD WP, poisoned swap entries are UFFD-handled. */ + if (softleaf_is_uffd_wp_marker(entry)) + return true; + if (softleaf_is_poison_marker(entry)) + return true; + + return false; +} + +#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_ENABLE_THP_MIGRATION) + +/** + * pmd_is_device_private_entry() - Check if PMD contains a device private swap + * entry. + * @pmd: The PMD to check. + * + * Returns true if the PMD contains a swap entry that represents a device private + * page mapping. This is used for zone device private pages that have been + * swapped out but still need special handling during various memory management + * operations. + * + * Return: true if PMD contains device private entry, false otherwise + */ +static inline bool pmd_is_device_private_entry(pmd_t pmd) +{ + return softleaf_is_device_private(softleaf_from_pmd(pmd)); +} + +#else /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */ + +static inline bool pmd_is_device_private_entry(pmd_t pmd) +{ + return false; +} + +#endif /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */ + +/** + * pmd_is_migration_entry() - Does this PMD entry encode a migration entry? + * @pmd: PMD entry. + * + * Returns: true if the PMD encodes a migration entry, otherwise false. + */ +static inline bool pmd_is_migration_entry(pmd_t pmd) +{ + return softleaf_is_migration(softleaf_from_pmd(pmd)); +} + +/** + * pmd_is_valid_softleaf() - Is this PMD entry a valid leaf entry? + * @pmd: PMD entry. + * + * PMD leaf entries are valid only if they are device private or migration + * entries. This function asserts that a PMD leaf entry is valid in this + * respect. + * + * Returns: true if the PMD entry is a valid leaf entry, otherwise false. + */ +static inline bool pmd_is_valid_softleaf(pmd_t pmd) +{ + const softleaf_t entry = softleaf_from_pmd(pmd); + + /* Only device private, migration entries valid for PMD. */ + return softleaf_is_device_private(entry) || + softleaf_is_migration(entry); +} + +#endif /* CONFIG_MMU */ +#endif /* _LINUX_LEAFOPS_H */ diff --git a/include/linux/libata.h b/include/linux/libata.h index 7a98de1cc995..39534fafa36a 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -75,6 +75,7 @@ enum ata_quirks { __ATA_QUIRK_NO_DMA_LOG, /* Do not use DMA for log read */ __ATA_QUIRK_NOTRIM, /* Do not use TRIM */ __ATA_QUIRK_MAX_SEC_1024, /* Limit max sects to 1024 */ + __ATA_QUIRK_MAX_SEC_8191, /* Limit max sects to 8191 */ __ATA_QUIRK_MAX_TRIM_128M, /* Limit max trim size to 128M */ __ATA_QUIRK_NO_NCQ_ON_ATI, /* Disable NCQ on ATI chipset */ __ATA_QUIRK_NO_LPM_ON_ATI, /* Disable LPM on ATI chipset */ @@ -85,6 +86,45 @@ enum ata_quirks { __ATA_QUIRK_MAX, }; +/* + * Quirk flags: may be set by libata or controller drivers on drives. + * Some quirks may be drive/controller pair dependent. + */ +enum { + ATA_QUIRK_DIAGNOSTIC = (1U << __ATA_QUIRK_DIAGNOSTIC), + ATA_QUIRK_NODMA = (1U << __ATA_QUIRK_NODMA), + ATA_QUIRK_NONCQ = (1U << __ATA_QUIRK_NONCQ), + ATA_QUIRK_MAX_SEC_128 = (1U << __ATA_QUIRK_MAX_SEC_128), + ATA_QUIRK_BROKEN_HPA = (1U << __ATA_QUIRK_BROKEN_HPA), + ATA_QUIRK_DISABLE = (1U << __ATA_QUIRK_DISABLE), + ATA_QUIRK_HPA_SIZE = (1U << __ATA_QUIRK_HPA_SIZE), + ATA_QUIRK_IVB = (1U << __ATA_QUIRK_IVB), + ATA_QUIRK_STUCK_ERR = (1U << __ATA_QUIRK_STUCK_ERR), + ATA_QUIRK_BRIDGE_OK = (1U << __ATA_QUIRK_BRIDGE_OK), + ATA_QUIRK_ATAPI_MOD16_DMA = (1U << __ATA_QUIRK_ATAPI_MOD16_DMA), + ATA_QUIRK_FIRMWARE_WARN = (1U << __ATA_QUIRK_FIRMWARE_WARN), + ATA_QUIRK_1_5_GBPS = (1U << __ATA_QUIRK_1_5_GBPS), + ATA_QUIRK_NOSETXFER = (1U << __ATA_QUIRK_NOSETXFER), + ATA_QUIRK_BROKEN_FPDMA_AA = (1U << __ATA_QUIRK_BROKEN_FPDMA_AA), + ATA_QUIRK_DUMP_ID = (1U << __ATA_QUIRK_DUMP_ID), + ATA_QUIRK_MAX_SEC_LBA48 = (1U << __ATA_QUIRK_MAX_SEC_LBA48), + ATA_QUIRK_ATAPI_DMADIR = (1U << __ATA_QUIRK_ATAPI_DMADIR), + ATA_QUIRK_NO_NCQ_TRIM = (1U << __ATA_QUIRK_NO_NCQ_TRIM), + ATA_QUIRK_NOLPM = (1U << __ATA_QUIRK_NOLPM), + ATA_QUIRK_WD_BROKEN_LPM = (1U << __ATA_QUIRK_WD_BROKEN_LPM), + ATA_QUIRK_ZERO_AFTER_TRIM = (1U << __ATA_QUIRK_ZERO_AFTER_TRIM), + ATA_QUIRK_NO_DMA_LOG = (1U << __ATA_QUIRK_NO_DMA_LOG), + ATA_QUIRK_NOTRIM = (1U << __ATA_QUIRK_NOTRIM), + ATA_QUIRK_MAX_SEC_1024 = (1U << __ATA_QUIRK_MAX_SEC_1024), + ATA_QUIRK_MAX_SEC_8191 = (1U << __ATA_QUIRK_MAX_SEC_8191), + ATA_QUIRK_MAX_TRIM_128M = (1U << __ATA_QUIRK_MAX_TRIM_128M), + ATA_QUIRK_NO_NCQ_ON_ATI = (1U << __ATA_QUIRK_NO_NCQ_ON_ATI), + ATA_QUIRK_NO_LPM_ON_ATI = (1U << __ATA_QUIRK_NO_LPM_ON_ATI), + ATA_QUIRK_NO_ID_DEV_LOG = (1U << __ATA_QUIRK_NO_ID_DEV_LOG), + ATA_QUIRK_NO_LOG_DIR = (1U << __ATA_QUIRK_NO_LOG_DIR), + ATA_QUIRK_NO_FUA = (1U << __ATA_QUIRK_NO_FUA), +}; + enum { /* various global constants */ LIBATA_MAX_PRD = ATA_MAX_PRD / 2, @@ -390,42 +430,6 @@ enum { */ ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8, - /* - * Quirk flags: may be set by libata or controller drivers on drives. - * Some quirks may be drive/controller pair dependent. - */ - ATA_QUIRK_DIAGNOSTIC = (1U << __ATA_QUIRK_DIAGNOSTIC), - ATA_QUIRK_NODMA = (1U << __ATA_QUIRK_NODMA), - ATA_QUIRK_NONCQ = (1U << __ATA_QUIRK_NONCQ), - ATA_QUIRK_MAX_SEC_128 = (1U << __ATA_QUIRK_MAX_SEC_128), - ATA_QUIRK_BROKEN_HPA = (1U << __ATA_QUIRK_BROKEN_HPA), - ATA_QUIRK_DISABLE = (1U << __ATA_QUIRK_DISABLE), - ATA_QUIRK_HPA_SIZE = (1U << __ATA_QUIRK_HPA_SIZE), - ATA_QUIRK_IVB = (1U << __ATA_QUIRK_IVB), - ATA_QUIRK_STUCK_ERR = (1U << __ATA_QUIRK_STUCK_ERR), - ATA_QUIRK_BRIDGE_OK = (1U << __ATA_QUIRK_BRIDGE_OK), - ATA_QUIRK_ATAPI_MOD16_DMA = (1U << __ATA_QUIRK_ATAPI_MOD16_DMA), - ATA_QUIRK_FIRMWARE_WARN = (1U << __ATA_QUIRK_FIRMWARE_WARN), - ATA_QUIRK_1_5_GBPS = (1U << __ATA_QUIRK_1_5_GBPS), - ATA_QUIRK_NOSETXFER = (1U << __ATA_QUIRK_NOSETXFER), - ATA_QUIRK_BROKEN_FPDMA_AA = (1U << __ATA_QUIRK_BROKEN_FPDMA_AA), - ATA_QUIRK_DUMP_ID = (1U << __ATA_QUIRK_DUMP_ID), - ATA_QUIRK_MAX_SEC_LBA48 = (1U << __ATA_QUIRK_MAX_SEC_LBA48), - ATA_QUIRK_ATAPI_DMADIR = (1U << __ATA_QUIRK_ATAPI_DMADIR), - ATA_QUIRK_NO_NCQ_TRIM = (1U << __ATA_QUIRK_NO_NCQ_TRIM), - ATA_QUIRK_NOLPM = (1U << __ATA_QUIRK_NOLPM), - ATA_QUIRK_WD_BROKEN_LPM = (1U << __ATA_QUIRK_WD_BROKEN_LPM), - ATA_QUIRK_ZERO_AFTER_TRIM = (1U << __ATA_QUIRK_ZERO_AFTER_TRIM), - ATA_QUIRK_NO_DMA_LOG = (1U << __ATA_QUIRK_NO_DMA_LOG), - ATA_QUIRK_NOTRIM = (1U << __ATA_QUIRK_NOTRIM), - ATA_QUIRK_MAX_SEC_1024 = (1U << __ATA_QUIRK_MAX_SEC_1024), - ATA_QUIRK_MAX_TRIM_128M = (1U << __ATA_QUIRK_MAX_TRIM_128M), - ATA_QUIRK_NO_NCQ_ON_ATI = (1U << __ATA_QUIRK_NO_NCQ_ON_ATI), - ATA_QUIRK_NO_LPM_ON_ATI = (1U << __ATA_QUIRK_NO_LPM_ON_ATI), - ATA_QUIRK_NO_ID_DEV_LOG = (1U << __ATA_QUIRK_NO_ID_DEV_LOG), - ATA_QUIRK_NO_LOG_DIR = (1U << __ATA_QUIRK_NO_LOG_DIR), - ATA_QUIRK_NO_FUA = (1U << __ATA_QUIRK_NO_FUA), - /* User visible DMA mask for DMA control. DO NOT renumber. */ ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */ ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */ diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 873e510d6f8d..0651865a4564 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -52,6 +52,7 @@ enum memcg_memory_event { MEMCG_SWAP_HIGH, MEMCG_SWAP_MAX, MEMCG_SWAP_FAIL, + MEMCG_SOCK_THROTTLED, MEMCG_NR_MEMORY_EVENTS, }; @@ -956,17 +957,7 @@ unsigned long lruvec_page_state_local(struct lruvec *lruvec, void mem_cgroup_flush_stats(struct mem_cgroup *memcg); void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg); -void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); - -static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, - int val) -{ - unsigned long flags; - - local_irq_save(flags); - __mod_lruvec_kmem_state(p, idx, val); - local_irq_restore(flags); -} +void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, unsigned long count); @@ -1001,36 +992,8 @@ static inline void count_memcg_event_mm(struct mm_struct *mm, count_memcg_events_mm(mm, idx, 1); } -static inline void __memcg_memory_event(struct mem_cgroup *memcg, - enum memcg_memory_event event, - bool allow_spinning) -{ - bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || - event == MEMCG_SWAP_FAIL; - - /* For now only MEMCG_MAX can happen with !allow_spinning context. */ - VM_WARN_ON_ONCE(!allow_spinning && event != MEMCG_MAX); - - atomic_long_inc(&memcg->memory_events_local[event]); - if (!swap_event && allow_spinning) - cgroup_file_notify(&memcg->events_local_file); - - do { - atomic_long_inc(&memcg->memory_events[event]); - if (allow_spinning) { - if (swap_event) - cgroup_file_notify(&memcg->swap_events_file); - else - cgroup_file_notify(&memcg->events_file); - } - - if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) - break; - if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) - break; - } while ((memcg = parent_mem_cgroup(memcg)) && - !mem_cgroup_is_root(memcg)); -} +void __memcg_memory_event(struct mem_cgroup *memcg, + enum memcg_memory_event event, bool allow_spinning); static inline void memcg_memory_event(struct mem_cgroup *memcg, enum memcg_memory_event event) @@ -1430,14 +1393,6 @@ static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg) { } -static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, - int val) -{ - struct page *page = virt_to_head_page(p); - - __mod_node_page_state(page_pgdat(page), idx, val); -} - static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) { @@ -1497,16 +1452,6 @@ struct slabobj_ext { #endif } __aligned(8); -static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) -{ - __mod_lruvec_kmem_state(p, idx, 1); -} - -static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx) -{ - __mod_lruvec_kmem_state(p, idx, -1); -} - static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) { struct mem_cgroup *memcg; @@ -1674,6 +1619,11 @@ int alloc_shrinker_info(struct mem_cgroup *memcg); void free_shrinker_info(struct mem_cgroup *memcg); void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); void reparent_shrinker_deferred(struct mem_cgroup *memcg); + +static inline int shrinker_id(struct shrinker *shrinker) +{ + return shrinker->id; +} #else #define mem_cgroup_sockets_enabled 0 @@ -1705,6 +1655,11 @@ static inline void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) { } + +static inline int shrinker_id(struct shrinker *shrinker) +{ + return -1; +} #endif #ifdef CONFIG_MEMCG @@ -1791,6 +1746,13 @@ static inline void count_objcg_events(struct obj_cgroup *objcg, bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid); +void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg); + +static inline bool memcg_is_dying(struct mem_cgroup *memcg) +{ + return memcg ? css_is_dying(&memcg->css) : false; +} + #else static inline bool mem_cgroup_kmem_disabled(void) { @@ -1857,6 +1819,15 @@ static inline bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid) { return true; } + +static inline void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg) +{ +} + +static inline bool memcg_is_dying(struct mem_cgroup *memcg) +{ + return false; +} #endif /* CONFIG_MEMCG */ #if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP) diff --git a/include/linux/memory-failure.h b/include/linux/memory-failure.h new file mode 100644 index 000000000000..bc326503d2d2 --- /dev/null +++ b/include/linux/memory-failure.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MEMORY_FAILURE_H +#define _LINUX_MEMORY_FAILURE_H + +#include <linux/interval_tree.h> + +struct pfn_address_space; + +struct pfn_address_space { + struct interval_tree_node node; + struct address_space *mapping; +}; + +int register_pfn_address_space(struct pfn_address_space *pfn_space); +void unregister_pfn_address_space(struct pfn_address_space *pfn_space); + +#endif /* _LINUX_MEMORY_FAILURE_H */ diff --git a/include/linux/memory.h b/include/linux/memory.h index ba1515160894..faeaa921e55b 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -64,9 +64,19 @@ struct memory_group { }; }; +enum memory_block_state { + /* These states are exposed to userspace as text strings in sysfs */ + MEM_ONLINE, /* exposed to userspace */ + MEM_GOING_OFFLINE, /* exposed to userspace */ + MEM_OFFLINE, /* exposed to userspace */ + MEM_GOING_ONLINE, + MEM_CANCEL_ONLINE, + MEM_CANCEL_OFFLINE, +}; + struct memory_block { unsigned long start_section_nr; - unsigned long state; /* serialized by the dev->lock */ + enum memory_block_state state; /* serialized by the dev->lock */ int online_type; /* for passing data to online routine */ int nid; /* NID for this memory block */ /* @@ -89,14 +99,6 @@ int arch_get_memory_phys_device(unsigned long start_pfn); unsigned long memory_block_size_bytes(void); int set_memory_block_size_order(unsigned int order); -/* These states are exposed to userspace as text strings in sysfs */ -#define MEM_ONLINE (1<<0) /* exposed to userspace */ -#define MEM_GOING_OFFLINE (1<<1) /* exposed to userspace */ -#define MEM_OFFLINE (1<<2) /* exposed to userspace */ -#define MEM_GOING_ONLINE (1<<3) -#define MEM_CANCEL_ONLINE (1<<4) -#define MEM_CANCEL_OFFLINE (1<<5) - struct memory_notify { unsigned long start_pfn; unsigned long nr_pages; @@ -130,7 +132,7 @@ static inline int register_memory_notifier(struct notifier_block *nb) static inline void unregister_memory_notifier(struct notifier_block *nb) { } -static inline int memory_notify(unsigned long val, void *v) +static inline int memory_notify(enum memory_block_state state, void *v) { return 0; } @@ -154,7 +156,7 @@ int create_memory_block_devices(unsigned long start, unsigned long size, struct memory_group *group); void remove_memory_block_devices(unsigned long start, unsigned long size); extern void memory_dev_init(void); -extern int memory_notify(unsigned long val, void *v); +extern int memory_notify(enum memory_block_state state, void *v); extern struct memory_block *find_memory_block(unsigned long section_nr); typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *); extern int walk_memory_blocks(unsigned long start, unsigned long size, diff --git a/include/linux/memregion.h b/include/linux/memregion.h index c01321467789..a55f62cc5266 100644 --- a/include/linux/memregion.h +++ b/include/linux/memregion.h @@ -26,8 +26,10 @@ static inline void memregion_free(int id) /** * cpu_cache_invalidate_memregion - drop any CPU cached data for - * memregions described by @res_desc - * @res_desc: one of the IORES_DESC_* types + * memregion + * @start: start physical address of the target memory region. + * @len: length of the target memory region. -1 for all the regions of + * the target type. * * Perform cache maintenance after a memory event / operation that * changes the contents of physical memory in a cache-incoherent manner. @@ -46,7 +48,7 @@ static inline void memregion_free(int id) * the cache maintenance. */ #ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION -int cpu_cache_invalidate_memregion(int res_desc); +int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len); bool cpu_cache_has_invalidate_memregion(void); #else static inline bool cpu_cache_has_invalidate_memregion(void) @@ -54,10 +56,16 @@ static inline bool cpu_cache_has_invalidate_memregion(void) return false; } -static inline int cpu_cache_invalidate_memregion(int res_desc) +static inline int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len) { WARN_ON_ONCE("CPU cache invalidation required"); return -ENXIO; } #endif + +static inline int cpu_cache_invalidate_all(void) +{ + return cpu_cache_invalidate_memregion(0, -1); +} + #endif /* _MEMREGION_H_ */ diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 30c7aecbd245..713ec0435b48 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -76,11 +76,11 @@ enum memory_type { struct dev_pagemap_ops { /* - * Called once the page refcount reaches 0. The reference count will be + * Called once the folio refcount reaches 0. The reference count will be * reset to one by the core code after the method is called to prepare - * for handing out the page again. + * for handing out the folio again. */ - void (*page_free)(struct page *page); + void (*folio_free)(struct folio *folio); /* * Used for private (un-addressable) device memory only. Must migrate @@ -99,6 +99,13 @@ struct dev_pagemap_ops { */ int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn, unsigned long nr_pages, int mf_flags); + + /* + * Used for private (un-addressable) device memory only. + * This callback is used when a folio is split into + * a smaller folio + */ + void (*folio_split)(struct folio *head, struct folio *tail); }; #define PGMAP_ALTMAP_VALID (1 << 0) @@ -176,6 +183,18 @@ static inline bool folio_is_pci_p2pdma(const struct folio *folio) folio->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; } +static inline void *folio_zone_device_data(const struct folio *folio) +{ + VM_WARN_ON_FOLIO(!folio_is_device_private(folio), folio); + return folio->page.zone_device_data; +} + +static inline void folio_set_zone_device_data(struct folio *folio, void *data) +{ + VM_WARN_ON_FOLIO(!folio_is_device_private(folio), folio); + folio->page.zone_device_data = data; +} + static inline bool is_pci_p2pdma_page(const struct page *page) { return IS_ENABLED(CONFIG_PCI_P2PDMA) && @@ -205,7 +224,7 @@ static inline bool is_fsdax_page(const struct page *page) } #ifdef CONFIG_ZONE_DEVICE -void zone_device_page_init(struct page *page); +void zone_device_page_init(struct page *page, unsigned int order); void *memremap_pages(struct dev_pagemap *pgmap, int nid); void memunmap_pages(struct dev_pagemap *pgmap); void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); @@ -214,6 +233,31 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn); bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn); unsigned long memremap_compat_align(void); + +static inline void zone_device_folio_init(struct folio *folio, unsigned int order) +{ + zone_device_page_init(&folio->page, order); + if (order) + folio_set_large_rmappable(folio); +} + +static inline void zone_device_private_split_cb(struct folio *original_folio, + struct folio *new_folio) +{ + if (folio_is_device_private(original_folio)) { + if (!original_folio->pgmap->ops->folio_split) { + if (new_folio) { + new_folio->pgmap = original_folio->pgmap; + new_folio->page.mapping = + original_folio->page.mapping; + } + } else { + original_folio->pgmap->ops->folio_split(original_folio, + new_folio); + } + } +} + #else static inline void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) @@ -247,6 +291,11 @@ static inline unsigned long memremap_compat_align(void) { return PAGE_SIZE; } + +static inline void zone_device_private_split_cb(struct folio *original_folio, + struct folio *new_folio) +{ +} #endif /* CONFIG_ZONE_DEVICE */ static inline void put_dev_pagemap(struct dev_pagemap *pgmap) diff --git a/include/linux/mfd/macsmc.h b/include/linux/mfd/macsmc.h index 6b13f01a8592..cc09ecce0df7 100644 --- a/include/linux/mfd/macsmc.h +++ b/include/linux/mfd/macsmc.h @@ -41,6 +41,7 @@ typedef u32 smc_key; */ #define SMC_KEY(s) (smc_key)(_SMC_KEY(#s)) #define _SMC_KEY(s) (((s)[0] << 24) | ((s)[1] << 16) | ((s)[2] << 8) | (s)[3]) +#define __SMC_KEY(a, b, c, d) (((u32)(a) << 24) | ((u32)(b) << 16) | ((u32)(c) << 8) | ((u32)(d))) #define APPLE_SMC_READABLE BIT(7) #define APPLE_SMC_WRITABLE BIT(6) @@ -149,7 +150,7 @@ int apple_smc_read(struct apple_smc *smc, smc_key key, void *buf, size_t size); * * Return: Zero on success, negative errno on error */ -int apple_smc_write(struct apple_smc *smc, smc_key key, void *buf, size_t size); +int apple_smc_write(struct apple_smc *smc, smc_key key, const void *buf, size_t size); /** * apple_smc_enter_atomic - Enter atomic mode to be able to use apple_smc_write_atomic @@ -176,7 +177,7 @@ int apple_smc_enter_atomic(struct apple_smc *smc); * * Return: Zero on success, negative errno on error */ -int apple_smc_write_atomic(struct apple_smc *smc, smc_key key, void *buf, size_t size); +int apple_smc_write_atomic(struct apple_smc *smc, smc_key key, const void *buf, size_t size); /** * apple_smc_rw - Write and then read using the given SMC key @@ -189,7 +190,7 @@ int apple_smc_write_atomic(struct apple_smc *smc, smc_key key, void *buf, size_t * * Return: Zero on success, negative errno on error */ -int apple_smc_rw(struct apple_smc *smc, smc_key key, void *wbuf, size_t wsize, +int apple_smc_rw(struct apple_smc *smc, smc_key key, const void *wbuf, size_t wsize, void *rbuf, size_t rsize); /** diff --git a/include/linux/mfd/pf1550.h b/include/linux/mfd/pf1550.h new file mode 100644 index 000000000000..7cb2340ff2bd --- /dev/null +++ b/include/linux/mfd/pf1550.h @@ -0,0 +1,273 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Declarations for the PF1550 PMIC + * + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Robin Gong <yibin.gong@freescale.com> + * + * Portions Copyright (c) 2025 Savoir-faire Linux Inc. + * Samuel Kayode <samuel.kayode@savoirfairelinux.com> + */ + +#ifndef __LINUX_MFD_PF1550_H +#define __LINUX_MFD_PF1550_H + +#include <linux/i2c.h> +#include <linux/regmap.h> + +enum pf1550_pmic_reg { + /* PMIC regulator part */ + PF1550_PMIC_REG_DEVICE_ID = 0x00, + PF1550_PMIC_REG_OTP_FLAVOR = 0x01, + PF1550_PMIC_REG_SILICON_REV = 0x02, + + PF1550_PMIC_REG_INT_CATEGORY = 0x06, + PF1550_PMIC_REG_SW_INT_STAT0 = 0x08, + PF1550_PMIC_REG_SW_INT_MASK0 = 0x09, + PF1550_PMIC_REG_SW_INT_SENSE0 = 0x0a, + PF1550_PMIC_REG_SW_INT_STAT1 = 0x0b, + PF1550_PMIC_REG_SW_INT_MASK1 = 0x0c, + PF1550_PMIC_REG_SW_INT_SENSE1 = 0x0d, + PF1550_PMIC_REG_SW_INT_STAT2 = 0x0e, + PF1550_PMIC_REG_SW_INT_MASK2 = 0x0f, + PF1550_PMIC_REG_SW_INT_SENSE2 = 0x10, + PF1550_PMIC_REG_LDO_INT_STAT0 = 0x18, + PF1550_PMIC_REG_LDO_INT_MASK0 = 0x19, + PF1550_PMIC_REG_LDO_INT_SENSE0 = 0x1a, + PF1550_PMIC_REG_TEMP_INT_STAT0 = 0x20, + PF1550_PMIC_REG_TEMP_INT_MASK0 = 0x21, + PF1550_PMIC_REG_TEMP_INT_SENSE0 = 0x22, + PF1550_PMIC_REG_ONKEY_INT_STAT0 = 0x24, + PF1550_PMIC_REG_ONKEY_INT_MASK0 = 0x25, + PF1550_PMIC_REG_ONKEY_INT_SENSE0 = 0x26, + PF1550_PMIC_REG_MISC_INT_STAT0 = 0x28, + PF1550_PMIC_REG_MISC_INT_MASK0 = 0x29, + PF1550_PMIC_REG_MISC_INT_SENSE0 = 0x2a, + + PF1550_PMIC_REG_COINCELL_CONTROL = 0x30, + + PF1550_PMIC_REG_SW1_VOLT = 0x32, + PF1550_PMIC_REG_SW1_STBY_VOLT = 0x33, + PF1550_PMIC_REG_SW1_SLP_VOLT = 0x34, + PF1550_PMIC_REG_SW1_CTRL = 0x35, + PF1550_PMIC_REG_SW1_CTRL1 = 0x36, + PF1550_PMIC_REG_SW2_VOLT = 0x38, + PF1550_PMIC_REG_SW2_STBY_VOLT = 0x39, + PF1550_PMIC_REG_SW2_SLP_VOLT = 0x3a, + PF1550_PMIC_REG_SW2_CTRL = 0x3b, + PF1550_PMIC_REG_SW2_CTRL1 = 0x3c, + PF1550_PMIC_REG_SW3_VOLT = 0x3e, + PF1550_PMIC_REG_SW3_STBY_VOLT = 0x3f, + PF1550_PMIC_REG_SW3_SLP_VOLT = 0x40, + PF1550_PMIC_REG_SW3_CTRL = 0x41, + PF1550_PMIC_REG_SW3_CTRL1 = 0x42, + PF1550_PMIC_REG_VSNVS_CTRL = 0x48, + PF1550_PMIC_REG_VREFDDR_CTRL = 0x4a, + PF1550_PMIC_REG_LDO1_VOLT = 0x4c, + PF1550_PMIC_REG_LDO1_CTRL = 0x4d, + PF1550_PMIC_REG_LDO2_VOLT = 0x4f, + PF1550_PMIC_REG_LDO2_CTRL = 0x50, + PF1550_PMIC_REG_LDO3_VOLT = 0x52, + PF1550_PMIC_REG_LDO3_CTRL = 0x53, + PF1550_PMIC_REG_PWRCTRL0 = 0x58, + PF1550_PMIC_REG_PWRCTRL1 = 0x59, + PF1550_PMIC_REG_PWRCTRL2 = 0x5a, + PF1550_PMIC_REG_PWRCTRL3 = 0x5b, + PF1550_PMIC_REG_SW1_PWRDN_SEQ = 0x5f, + PF1550_PMIC_REG_SW2_PWRDN_SEQ = 0x60, + PF1550_PMIC_REG_SW3_PWRDN_SEQ = 0x61, + PF1550_PMIC_REG_LDO1_PWRDN_SEQ = 0x62, + PF1550_PMIC_REG_LDO2_PWRDN_SEQ = 0x63, + PF1550_PMIC_REG_LDO3_PWRDN_SEQ = 0x64, + PF1550_PMIC_REG_VREFDDR_PWRDN_SEQ = 0x65, + + PF1550_PMIC_REG_STATE_INFO = 0x67, + PF1550_PMIC_REG_I2C_ADDR = 0x68, + PF1550_PMIC_REG_IO_DRV0 = 0x69, + PF1550_PMIC_REG_IO_DRV1 = 0x6a, + PF1550_PMIC_REG_RC_16MHZ = 0x6b, + PF1550_PMIC_REG_KEY = 0x6f, + + /* Charger part */ + PF1550_CHARG_REG_CHG_INT = 0x80, + PF1550_CHARG_REG_CHG_INT_MASK = 0x82, + PF1550_CHARG_REG_CHG_INT_OK = 0x84, + PF1550_CHARG_REG_VBUS_SNS = 0x86, + PF1550_CHARG_REG_CHG_SNS = 0x87, + PF1550_CHARG_REG_BATT_SNS = 0x88, + PF1550_CHARG_REG_CHG_OPER = 0x89, + PF1550_CHARG_REG_CHG_TMR = 0x8a, + PF1550_CHARG_REG_CHG_EOC_CNFG = 0x8d, + PF1550_CHARG_REG_CHG_CURR_CNFG = 0x8e, + PF1550_CHARG_REG_BATT_REG = 0x8f, + PF1550_CHARG_REG_BATFET_CNFG = 0x91, + PF1550_CHARG_REG_THM_REG_CNFG = 0x92, + PF1550_CHARG_REG_VBUS_INLIM_CNFG = 0x94, + PF1550_CHARG_REG_VBUS_LIN_DPM = 0x95, + PF1550_CHARG_REG_USB_PHY_LDO_CNFG = 0x96, + PF1550_CHARG_REG_DBNC_DELAY_TIME = 0x98, + PF1550_CHARG_REG_CHG_INT_CNFG = 0x99, + PF1550_CHARG_REG_THM_ADJ_SETTING = 0x9a, + PF1550_CHARG_REG_VBUS2SYS_CNFG = 0x9b, + PF1550_CHARG_REG_LED_PWM = 0x9c, + PF1550_CHARG_REG_FAULT_BATFET_CNFG = 0x9d, + PF1550_CHARG_REG_LED_CNFG = 0x9e, + PF1550_CHARG_REG_CHGR_KEY2 = 0x9f, + + PF1550_TEST_REG_FMRADDR = 0xc4, + PF1550_TEST_REG_FMRDATA = 0xc5, + PF1550_TEST_REG_KEY3 = 0xdf, + + PF1550_PMIC_REG_END = 0xff, +}; + +/* One-Time Programmable(OTP) memory */ +enum pf1550_otp_reg { + PF1550_OTP_SW1_SW2 = 0x1e, + PF1550_OTP_SW2_SW3 = 0x1f, +}; + +#define PF1550_DEVICE_ID 0x7c + +/* Keys for reading OTP */ +#define PF1550_OTP_PMIC_KEY 0x15 +#define PF1550_OTP_CHGR_KEY 0x50 +#define PF1550_OTP_TEST_KEY 0xab + +/* Supported charger modes */ +#define PF1550_CHG_BAT_OFF 1 +#define PF1550_CHG_BAT_ON 2 + +#define PF1550_CHG_PRECHARGE 0 +#define PF1550_CHG_CONSTANT_CURRENT 1 +#define PF1550_CHG_CONSTANT_VOL 2 +#define PF1550_CHG_EOC 3 +#define PF1550_CHG_DONE 4 +#define PF1550_CHG_TIMER_FAULT 6 +#define PF1550_CHG_SUSPEND 7 +#define PF1550_CHG_OFF_INV 8 +#define PF1550_CHG_BAT_OVER 9 +#define PF1550_CHG_OFF_TEMP 10 +#define PF1550_CHG_LINEAR_ONLY 12 +#define PF1550_CHG_SNS_MASK 0xf +#define PF1550_CHG_INT_MASK 0x51 + +#define PF1550_BAT_NO_VBUS 0 +#define PF1550_BAT_LOW_THAN_PRECHARG 1 +#define PF1550_BAT_CHARG_FAIL 2 +#define PF1550_BAT_HIGH_THAN_PRECHARG 4 +#define PF1550_BAT_OVER_VOL 5 +#define PF1550_BAT_NO_DETECT 6 +#define PF1550_BAT_SNS_MASK 0x7 + +#define PF1550_VBUS_UVLO BIT(2) +#define PF1550_VBUS_IN2SYS BIT(3) +#define PF1550_VBUS_OVLO BIT(4) +#define PF1550_VBUS_VALID BIT(5) + +#define PF1550_CHARG_REG_BATT_REG_CHGCV_MASK 0x3f +#define PF1550_CHARG_REG_BATT_REG_VMINSYS_SHIFT 6 +#define PF1550_CHARG_REG_BATT_REG_VMINSYS_MASK GENMASK(7, 6) +#define PF1550_CHARG_REG_THM_REG_CNFG_REGTEMP_SHIFT 2 +#define PF1550_CHARG_REG_THM_REG_CNFG_REGTEMP_MASK GENMASK(3, 2) + +#define PF1550_ONKEY_RST_EN BIT(7) + +/* DVS enable masks */ +#define OTP_SW1_DVS_ENB BIT(1) +#define OTP_SW2_DVS_ENB BIT(3) + +/* Top level interrupt masks */ +#define IRQ_REGULATOR (BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(6)) +#define IRQ_ONKEY BIT(5) +#define IRQ_CHG BIT(0) + +/* Regulator interrupt masks */ +#define PMIC_IRQ_SW1_LS BIT(0) +#define PMIC_IRQ_SW2_LS BIT(1) +#define PMIC_IRQ_SW3_LS BIT(2) +#define PMIC_IRQ_SW1_HS BIT(0) +#define PMIC_IRQ_SW2_HS BIT(1) +#define PMIC_IRQ_SW3_HS BIT(2) +#define PMIC_IRQ_LDO1_FAULT BIT(0) +#define PMIC_IRQ_LDO2_FAULT BIT(1) +#define PMIC_IRQ_LDO3_FAULT BIT(2) +#define PMIC_IRQ_TEMP_110 BIT(0) +#define PMIC_IRQ_TEMP_125 BIT(1) + +/* Onkey interrupt masks */ +#define ONKEY_IRQ_PUSHI BIT(0) +#define ONKEY_IRQ_1SI BIT(1) +#define ONKEY_IRQ_2SI BIT(2) +#define ONKEY_IRQ_3SI BIT(3) +#define ONKEY_IRQ_4SI BIT(4) +#define ONKEY_IRQ_8SI BIT(5) + +/* Charger interrupt masks */ +#define CHARG_IRQ_BAT2SOCI BIT(1) +#define CHARG_IRQ_BATI BIT(2) +#define CHARG_IRQ_CHGI BIT(3) +#define CHARG_IRQ_VBUSI BIT(5) +#define CHARG_IRQ_DPMI BIT(6) +#define CHARG_IRQ_THMI BIT(7) + +enum pf1550_irq { + PF1550_IRQ_CHG, + PF1550_IRQ_REGULATOR, + PF1550_IRQ_ONKEY, +}; + +enum pf1550_pmic_irq { + PF1550_PMIC_IRQ_SW1_LS, + PF1550_PMIC_IRQ_SW2_LS, + PF1550_PMIC_IRQ_SW3_LS, + PF1550_PMIC_IRQ_SW1_HS, + PF1550_PMIC_IRQ_SW2_HS, + PF1550_PMIC_IRQ_SW3_HS, + PF1550_PMIC_IRQ_LDO1_FAULT, + PF1550_PMIC_IRQ_LDO2_FAULT, + PF1550_PMIC_IRQ_LDO3_FAULT, + PF1550_PMIC_IRQ_TEMP_110, + PF1550_PMIC_IRQ_TEMP_125, +}; + +enum pf1550_onkey_irq { + PF1550_ONKEY_IRQ_PUSHI, + PF1550_ONKEY_IRQ_1SI, + PF1550_ONKEY_IRQ_2SI, + PF1550_ONKEY_IRQ_3SI, + PF1550_ONKEY_IRQ_4SI, + PF1550_ONKEY_IRQ_8SI, +}; + +enum pf1550_charg_irq { + PF1550_CHARG_IRQ_BAT2SOCI, + PF1550_CHARG_IRQ_BATI, + PF1550_CHARG_IRQ_CHGI, + PF1550_CHARG_IRQ_VBUSI, + PF1550_CHARG_IRQ_THMI, +}; + +enum pf1550_regulators { + PF1550_SW1, + PF1550_SW2, + PF1550_SW3, + PF1550_VREFDDR, + PF1550_LDO1, + PF1550_LDO2, + PF1550_LDO3, +}; + +struct pf1550_ddata { + struct regmap_irq_chip_data *irq_data_regulator; + struct regmap_irq_chip_data *irq_data_charger; + struct regmap_irq_chip_data *irq_data_onkey; + struct regmap_irq_chip_data *irq_data; + struct regmap *regmap; + struct device *dev; + bool dvs1_enable; + bool dvs2_enable; + int irq; +}; + +#endif /* __LINUX_MFD_PF1550_H */ diff --git a/include/linux/mfd/samsung/irq.h b/include/linux/mfd/samsung/irq.h index b4805cbd949b..8402a5f8e18a 100644 --- a/include/linux/mfd/samsung/irq.h +++ b/include/linux/mfd/samsung/irq.h @@ -57,6 +57,12 @@ enum s2mpa01_irq { #define S2MPA01_IRQ_B24_TSD_MASK (1 << 4) #define S2MPA01_IRQ_B35_TSD_MASK (1 << 5) +enum s2mpg10_common_irq { + /* Top-level (common) block */ + S2MPG10_COMMON_IRQ_PMIC, + S2MPG10_COMMON_IRQ_UNUSED, +}; + enum s2mpg10_irq { /* PMIC */ S2MPG10_IRQ_PWRONF, diff --git a/include/linux/mfd/wl1273-core.h b/include/linux/mfd/wl1273-core.h deleted file mode 100644 index c28cf76d5c31..000000000000 --- a/include/linux/mfd/wl1273-core.h +++ /dev/null @@ -1,277 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * include/linux/mfd/wl1273-core.h - * - * Some definitions for the wl1273 radio receiver/transmitter chip. - * - * Copyright (C) 2010 Nokia Corporation - * Author: Matti J. Aaltonen <matti.j.aaltonen@nokia.com> - */ - -#ifndef WL1273_CORE_H -#define WL1273_CORE_H - -#include <linux/i2c.h> -#include <linux/mfd/core.h> - -#define WL1273_FM_DRIVER_NAME "wl1273-fm" -#define RX71_FM_I2C_ADDR 0x22 - -#define WL1273_STEREO_GET 0 -#define WL1273_RSSI_LVL_GET 1 -#define WL1273_IF_COUNT_GET 2 -#define WL1273_FLAG_GET 3 -#define WL1273_RDS_SYNC_GET 4 -#define WL1273_RDS_DATA_GET 5 -#define WL1273_FREQ_SET 10 -#define WL1273_AF_FREQ_SET 11 -#define WL1273_MOST_MODE_SET 12 -#define WL1273_MOST_BLEND_SET 13 -#define WL1273_DEMPH_MODE_SET 14 -#define WL1273_SEARCH_LVL_SET 15 -#define WL1273_BAND_SET 16 -#define WL1273_MUTE_STATUS_SET 17 -#define WL1273_RDS_PAUSE_LVL_SET 18 -#define WL1273_RDS_PAUSE_DUR_SET 19 -#define WL1273_RDS_MEM_SET 20 -#define WL1273_RDS_BLK_B_SET 21 -#define WL1273_RDS_MSK_B_SET 22 -#define WL1273_RDS_PI_MASK_SET 23 -#define WL1273_RDS_PI_SET 24 -#define WL1273_RDS_SYSTEM_SET 25 -#define WL1273_INT_MASK_SET 26 -#define WL1273_SEARCH_DIR_SET 27 -#define WL1273_VOLUME_SET 28 -#define WL1273_AUDIO_ENABLE 29 -#define WL1273_PCM_MODE_SET 30 -#define WL1273_I2S_MODE_CONFIG_SET 31 -#define WL1273_POWER_SET 32 -#define WL1273_INTX_CONFIG_SET 33 -#define WL1273_PULL_EN_SET 34 -#define WL1273_HILO_SET 35 -#define WL1273_SWITCH2FREF 36 -#define WL1273_FREQ_DRIFT_REPORT 37 - -#define WL1273_PCE_GET 40 -#define WL1273_FIRM_VER_GET 41 -#define WL1273_ASIC_VER_GET 42 -#define WL1273_ASIC_ID_GET 43 -#define WL1273_MAN_ID_GET 44 -#define WL1273_TUNER_MODE_SET 45 -#define WL1273_STOP_SEARCH 46 -#define WL1273_RDS_CNTRL_SET 47 - -#define WL1273_WRITE_HARDWARE_REG 100 -#define WL1273_CODE_DOWNLOAD 101 -#define WL1273_RESET 102 - -#define WL1273_FM_POWER_MODE 254 -#define WL1273_FM_INTERRUPT 255 - -/* Transmitter API */ - -#define WL1273_CHANL_SET 55 -#define WL1273_SCAN_SPACING_SET 56 -#define WL1273_REF_SET 57 -#define WL1273_POWER_ENB_SET 90 -#define WL1273_POWER_ATT_SET 58 -#define WL1273_POWER_LEV_SET 59 -#define WL1273_AUDIO_DEV_SET 60 -#define WL1273_PILOT_DEV_SET 61 -#define WL1273_RDS_DEV_SET 62 -#define WL1273_PUPD_SET 91 -#define WL1273_AUDIO_IO_SET 63 -#define WL1273_PREMPH_SET 64 -#define WL1273_MONO_SET 66 -#define WL1273_MUTE 92 -#define WL1273_MPX_LMT_ENABLE 67 -#define WL1273_PI_SET 93 -#define WL1273_ECC_SET 69 -#define WL1273_PTY 70 -#define WL1273_AF 71 -#define WL1273_DISPLAY_MODE 74 -#define WL1273_RDS_REP_SET 77 -#define WL1273_RDS_CONFIG_DATA_SET 98 -#define WL1273_RDS_DATA_SET 99 -#define WL1273_RDS_DATA_ENB 94 -#define WL1273_TA_SET 78 -#define WL1273_TP_SET 79 -#define WL1273_DI_SET 80 -#define WL1273_MS_SET 81 -#define WL1273_PS_SCROLL_SPEED 82 -#define WL1273_TX_AUDIO_LEVEL_TEST 96 -#define WL1273_TX_AUDIO_LEVEL_TEST_THRESHOLD 73 -#define WL1273_TX_AUDIO_INPUT_LEVEL_RANGE_SET 54 -#define WL1273_RX_ANTENNA_SELECT 87 -#define WL1273_I2C_DEV_ADDR_SET 86 -#define WL1273_REF_ERR_CALIB_PARAM_SET 88 -#define WL1273_REF_ERR_CALIB_PERIODICITY_SET 89 -#define WL1273_SOC_INT_TRIGGER 52 -#define WL1273_SOC_AUDIO_PATH_SET 83 -#define WL1273_SOC_PCMI_OVERRIDE 84 -#define WL1273_SOC_I2S_OVERRIDE 85 -#define WL1273_RSSI_BLOCK_SCAN_FREQ_SET 95 -#define WL1273_RSSI_BLOCK_SCAN_START 97 -#define WL1273_RSSI_BLOCK_SCAN_DATA_GET 5 -#define WL1273_READ_FMANT_TUNE_VALUE 104 - -#define WL1273_RDS_OFF 0 -#define WL1273_RDS_ON 1 -#define WL1273_RDS_RESET 2 - -#define WL1273_AUDIO_DIGITAL 0 -#define WL1273_AUDIO_ANALOG 1 - -#define WL1273_MODE_RX BIT(0) -#define WL1273_MODE_TX BIT(1) -#define WL1273_MODE_OFF BIT(2) -#define WL1273_MODE_SUSPENDED BIT(3) - -#define WL1273_RADIO_CHILD BIT(0) -#define WL1273_CODEC_CHILD BIT(1) - -#define WL1273_RX_MONO 1 -#define WL1273_RX_STEREO 0 -#define WL1273_TX_MONO 0 -#define WL1273_TX_STEREO 1 - -#define WL1273_MAX_VOLUME 0xffff -#define WL1273_DEFAULT_VOLUME 0x78b8 - -/* I2S protocol, left channel first, data width 16 bits */ -#define WL1273_PCM_DEF_MODE 0x00 - -/* Rx */ -#define WL1273_AUDIO_ENABLE_I2S BIT(0) -#define WL1273_AUDIO_ENABLE_ANALOG BIT(1) - -/* Tx */ -#define WL1273_AUDIO_IO_SET_ANALOG 0 -#define WL1273_AUDIO_IO_SET_I2S 1 - -#define WL1273_PUPD_SET_OFF 0x00 -#define WL1273_PUPD_SET_ON 0x01 -#define WL1273_PUPD_SET_RETENTION 0x10 - -/* I2S mode */ -#define WL1273_IS2_WIDTH_32 0x0 -#define WL1273_IS2_WIDTH_40 0x1 -#define WL1273_IS2_WIDTH_22_23 0x2 -#define WL1273_IS2_WIDTH_23_22 0x3 -#define WL1273_IS2_WIDTH_48 0x4 -#define WL1273_IS2_WIDTH_50 0x5 -#define WL1273_IS2_WIDTH_60 0x6 -#define WL1273_IS2_WIDTH_64 0x7 -#define WL1273_IS2_WIDTH_80 0x8 -#define WL1273_IS2_WIDTH_96 0x9 -#define WL1273_IS2_WIDTH_128 0xa -#define WL1273_IS2_WIDTH 0xf - -#define WL1273_IS2_FORMAT_STD (0x0 << 4) -#define WL1273_IS2_FORMAT_LEFT (0x1 << 4) -#define WL1273_IS2_FORMAT_RIGHT (0x2 << 4) -#define WL1273_IS2_FORMAT_USER (0x3 << 4) - -#define WL1273_IS2_MASTER (0x0 << 6) -#define WL1273_IS2_SLAVEW (0x1 << 6) - -#define WL1273_IS2_TRI_AFTER_SENDING (0x0 << 7) -#define WL1273_IS2_TRI_ALWAYS_ACTIVE (0x1 << 7) - -#define WL1273_IS2_SDOWS_RR (0x0 << 8) -#define WL1273_IS2_SDOWS_RF (0x1 << 8) -#define WL1273_IS2_SDOWS_FR (0x2 << 8) -#define WL1273_IS2_SDOWS_FF (0x3 << 8) - -#define WL1273_IS2_TRI_OPT (0x0 << 10) -#define WL1273_IS2_TRI_ALWAYS (0x1 << 10) - -#define WL1273_IS2_RATE_48K (0x0 << 12) -#define WL1273_IS2_RATE_44_1K (0x1 << 12) -#define WL1273_IS2_RATE_32K (0x2 << 12) -#define WL1273_IS2_RATE_22_05K (0x4 << 12) -#define WL1273_IS2_RATE_16K (0x5 << 12) -#define WL1273_IS2_RATE_12K (0x8 << 12) -#define WL1273_IS2_RATE_11_025 (0x9 << 12) -#define WL1273_IS2_RATE_8K (0xa << 12) -#define WL1273_IS2_RATE (0xf << 12) - -#define WL1273_I2S_DEF_MODE (WL1273_IS2_WIDTH_32 | \ - WL1273_IS2_FORMAT_STD | \ - WL1273_IS2_MASTER | \ - WL1273_IS2_TRI_AFTER_SENDING | \ - WL1273_IS2_SDOWS_RR | \ - WL1273_IS2_TRI_OPT | \ - WL1273_IS2_RATE_48K) - -#define SCHAR_MIN (-128) -#define SCHAR_MAX 127 - -#define WL1273_FR_EVENT BIT(0) -#define WL1273_BL_EVENT BIT(1) -#define WL1273_RDS_EVENT BIT(2) -#define WL1273_BBLK_EVENT BIT(3) -#define WL1273_LSYNC_EVENT BIT(4) -#define WL1273_LEV_EVENT BIT(5) -#define WL1273_IFFR_EVENT BIT(6) -#define WL1273_PI_EVENT BIT(7) -#define WL1273_PD_EVENT BIT(8) -#define WL1273_STIC_EVENT BIT(9) -#define WL1273_MAL_EVENT BIT(10) -#define WL1273_POW_ENB_EVENT BIT(11) -#define WL1273_SCAN_OVER_EVENT BIT(12) -#define WL1273_ERROR_EVENT BIT(13) - -#define TUNER_MODE_STOP_SEARCH 0 -#define TUNER_MODE_PRESET 1 -#define TUNER_MODE_AUTO_SEEK 2 -#define TUNER_MODE_AF 3 -#define TUNER_MODE_AUTO_SEEK_PI 4 -#define TUNER_MODE_AUTO_SEEK_BULK 5 - -#define RDS_BLOCK_SIZE 3 - -struct wl1273_fm_platform_data { - int (*request_resources) (struct i2c_client *client); - void (*free_resources) (void); - void (*enable) (void); - void (*disable) (void); - - u8 forbidden_modes; - unsigned int children; -}; - -#define WL1273_FM_CORE_CELLS 2 - -#define WL1273_BAND_OTHER 0 -#define WL1273_BAND_JAPAN 1 - -#define WL1273_BAND_JAPAN_LOW 76000 -#define WL1273_BAND_JAPAN_HIGH 90000 -#define WL1273_BAND_OTHER_LOW 87500 -#define WL1273_BAND_OTHER_HIGH 108000 - -#define WL1273_BAND_TX_LOW 76000 -#define WL1273_BAND_TX_HIGH 108000 - -struct wl1273_core { - struct mfd_cell cells[WL1273_FM_CORE_CELLS]; - struct wl1273_fm_platform_data *pdata; - - unsigned int mode; - unsigned int i2s_mode; - unsigned int volume; - unsigned int audio_mode; - unsigned int channel_number; - struct mutex lock; /* for serializing fm radio operations */ - - struct i2c_client *client; - - int (*read)(struct wl1273_core *core, u8, u16 *); - int (*write)(struct wl1273_core *core, u8, u16); - int (*write_data)(struct wl1273_core *core, u8 *, u16); - int (*set_audio)(struct wl1273_core *core, unsigned int); - int (*set_volume)(struct wl1273_core *core, unsigned int); -}; - -#endif /* ifndef WL1273_CORE_H */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 1f0ac122c3bf..26ca00c325d9 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -65,7 +65,7 @@ bool isolate_folio_to_list(struct folio *folio, struct list_head *list); int migrate_huge_page_move_mapping(struct address_space *mapping, struct folio *dst, struct folio *src); -void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl) +void migration_entry_wait_on_locked(softleaf_t entry, spinlock_t *ptl) __releases(ptl); void folio_migrate_flags(struct folio *newfolio, struct folio *folio); int folio_migrate_mapping(struct address_space *mapping, @@ -125,6 +125,7 @@ static inline int migrate_misplaced_folio(struct folio *folio, int node) #define MIGRATE_PFN_VALID (1UL << 0) #define MIGRATE_PFN_MIGRATE (1UL << 1) #define MIGRATE_PFN_WRITE (1UL << 3) +#define MIGRATE_PFN_COMPOUND (1UL << 4) #define MIGRATE_PFN_SHIFT 6 static inline struct page *migrate_pfn_to_page(unsigned long mpfn) @@ -143,6 +144,7 @@ enum migrate_vma_direction { MIGRATE_VMA_SELECT_SYSTEM = 1 << 0, MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1, MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2, + MIGRATE_VMA_SELECT_COMPOUND = 1 << 3, }; struct migrate_vma { diff --git a/include/linux/mm.h b/include/linux/mm.h index 8dc0a07570cc..7a1819c20643 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -105,6 +105,8 @@ extern int mmap_rnd_compat_bits __read_mostly; # endif #endif +#define INVALID_PHYS_ADDR (~(phys_addr_t)0) + #include <asm/page.h> #include <asm/processor.h> @@ -273,178 +275,235 @@ extern unsigned int kobjsize(const void *objp); * vm_flags in vm_area_struct, see mm_types.h. * When changing, update also include/trace/events/mmflags.h */ -#define VM_NONE 0x00000000 -#define VM_READ 0x00000001 /* currently active flags */ -#define VM_WRITE 0x00000002 -#define VM_EXEC 0x00000004 -#define VM_SHARED 0x00000008 +#define VM_NONE 0x00000000 -/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ -#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ -#define VM_MAYWRITE 0x00000020 -#define VM_MAYEXEC 0x00000040 -#define VM_MAYSHARE 0x00000080 +/** + * typedef vma_flag_t - specifies an individual VMA flag by bit number. + * + * This value is made type safe by sparse to avoid passing invalid flag values + * around. + */ +typedef int __bitwise vma_flag_t; -#define VM_GROWSDOWN 0x00000100 /* general info on the segment */ +#define DECLARE_VMA_BIT(name, bitnum) \ + VMA_ ## name ## _BIT = ((__force vma_flag_t)bitnum) +#define DECLARE_VMA_BIT_ALIAS(name, aliased) \ + VMA_ ## name ## _BIT = (VMA_ ## aliased ## _BIT) +enum { + DECLARE_VMA_BIT(READ, 0), + DECLARE_VMA_BIT(WRITE, 1), + DECLARE_VMA_BIT(EXEC, 2), + DECLARE_VMA_BIT(SHARED, 3), + /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ + DECLARE_VMA_BIT(MAYREAD, 4), /* limits for mprotect() etc. */ + DECLARE_VMA_BIT(MAYWRITE, 5), + DECLARE_VMA_BIT(MAYEXEC, 6), + DECLARE_VMA_BIT(MAYSHARE, 7), + DECLARE_VMA_BIT(GROWSDOWN, 8), /* general info on the segment */ #ifdef CONFIG_MMU -#define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */ -#else /* CONFIG_MMU */ -#define VM_MAYOVERLAY 0x00000200 /* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */ -#define VM_UFFD_MISSING 0 + DECLARE_VMA_BIT(UFFD_MISSING, 9),/* missing pages tracking */ +#else + /* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */ + DECLARE_VMA_BIT(MAYOVERLAY, 9), #endif /* CONFIG_MMU */ -#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ -#define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */ - -#define VM_LOCKED 0x00002000 -#define VM_IO 0x00004000 /* Memory mapped I/O or similar */ - - /* Used by sys_madvise() */ -#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ -#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ - -#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ -#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ -#define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */ -#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ -#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ -#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ -#define VM_SYNC 0x00800000 /* Synchronous page faults */ -#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ -#define VM_WIPEONFORK 0x02000000 /* Wipe VMA contents in child. */ -#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ - + /* Page-ranges managed without "struct page", just pure PFN */ + DECLARE_VMA_BIT(PFNMAP, 10), + DECLARE_VMA_BIT(MAYBE_GUARD, 11), + DECLARE_VMA_BIT(UFFD_WP, 12), /* wrprotect pages tracking */ + DECLARE_VMA_BIT(LOCKED, 13), + DECLARE_VMA_BIT(IO, 14), /* Memory mapped I/O or similar */ + DECLARE_VMA_BIT(SEQ_READ, 15), /* App will access data sequentially */ + DECLARE_VMA_BIT(RAND_READ, 16), /* App will not benefit from clustered reads */ + DECLARE_VMA_BIT(DONTCOPY, 17), /* Do not copy this vma on fork */ + DECLARE_VMA_BIT(DONTEXPAND, 18),/* Cannot expand with mremap() */ + DECLARE_VMA_BIT(LOCKONFAULT, 19),/* Lock pages covered when faulted in */ + DECLARE_VMA_BIT(ACCOUNT, 20), /* Is a VM accounted object */ + DECLARE_VMA_BIT(NORESERVE, 21), /* should the VM suppress accounting */ + DECLARE_VMA_BIT(HUGETLB, 22), /* Huge TLB Page VM */ + DECLARE_VMA_BIT(SYNC, 23), /* Synchronous page faults */ + DECLARE_VMA_BIT(ARCH_1, 24), /* Architecture-specific flag */ + DECLARE_VMA_BIT(WIPEONFORK, 25),/* Wipe VMA contents in child. */ + DECLARE_VMA_BIT(DONTDUMP, 26), /* Do not include in the core dump */ + DECLARE_VMA_BIT(SOFTDIRTY, 27), /* NOT soft dirty clean area */ + DECLARE_VMA_BIT(MIXEDMAP, 28), /* Can contain struct page and pure PFN pages */ + DECLARE_VMA_BIT(HUGEPAGE, 29), /* MADV_HUGEPAGE marked this vma */ + DECLARE_VMA_BIT(NOHUGEPAGE, 30),/* MADV_NOHUGEPAGE marked this vma */ + DECLARE_VMA_BIT(MERGEABLE, 31), /* KSM may merge identical pages */ + /* These bits are reused, we define specific uses below. */ + DECLARE_VMA_BIT(HIGH_ARCH_0, 32), + DECLARE_VMA_BIT(HIGH_ARCH_1, 33), + DECLARE_VMA_BIT(HIGH_ARCH_2, 34), + DECLARE_VMA_BIT(HIGH_ARCH_3, 35), + DECLARE_VMA_BIT(HIGH_ARCH_4, 36), + DECLARE_VMA_BIT(HIGH_ARCH_5, 37), + DECLARE_VMA_BIT(HIGH_ARCH_6, 38), + /* + * This flag is used to connect VFIO to arch specific KVM code. It + * indicates that the memory under this VMA is safe for use with any + * non-cachable memory type inside KVM. Some VFIO devices, on some + * platforms, are thought to be unsafe and can cause machine crashes + * if KVM does not lock down the memory type. + */ + DECLARE_VMA_BIT(ALLOW_ANY_UNCACHED, 39), +#ifdef CONFIG_PPC32 + DECLARE_VMA_BIT_ALIAS(DROPPABLE, ARCH_1), +#else + DECLARE_VMA_BIT(DROPPABLE, 40), +#endif + DECLARE_VMA_BIT(UFFD_MINOR, 41), + DECLARE_VMA_BIT(SEALED, 42), + /* Flags that reuse flags above. */ + DECLARE_VMA_BIT_ALIAS(PKEY_BIT0, HIGH_ARCH_0), + DECLARE_VMA_BIT_ALIAS(PKEY_BIT1, HIGH_ARCH_1), + DECLARE_VMA_BIT_ALIAS(PKEY_BIT2, HIGH_ARCH_2), + DECLARE_VMA_BIT_ALIAS(PKEY_BIT3, HIGH_ARCH_3), + DECLARE_VMA_BIT_ALIAS(PKEY_BIT4, HIGH_ARCH_4), +#if defined(CONFIG_X86_USER_SHADOW_STACK) + /* + * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of + * support core mm. + * + * These VMAs will get a single end guard page. This helps userspace + * protect itself from attacks. A single page is enough for current + * shadow stack archs (x86). See the comments near alloc_shstk() in + * arch/x86/kernel/shstk.c for more details on the guard size. + */ + DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_5), +#elif defined(CONFIG_ARM64_GCS) + /* + * arm64's Guarded Control Stack implements similar functionality and + * has similar constraints to shadow stacks. + */ + DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_6), +#endif + DECLARE_VMA_BIT_ALIAS(SAO, ARCH_1), /* Strong Access Ordering (powerpc) */ + DECLARE_VMA_BIT_ALIAS(GROWSUP, ARCH_1), /* parisc */ + DECLARE_VMA_BIT_ALIAS(SPARC_ADI, ARCH_1), /* sparc64 */ + DECLARE_VMA_BIT_ALIAS(ARM64_BTI, ARCH_1), /* arm64 */ + DECLARE_VMA_BIT_ALIAS(ARCH_CLEAR, ARCH_1), /* sparc64, arm64 */ + DECLARE_VMA_BIT_ALIAS(MAPPED_COPY, ARCH_1), /* !CONFIG_MMU */ + DECLARE_VMA_BIT_ALIAS(MTE, HIGH_ARCH_4), /* arm64 */ + DECLARE_VMA_BIT_ALIAS(MTE_ALLOWED, HIGH_ARCH_5),/* arm64 */ +#ifdef CONFIG_STACK_GROWSUP + DECLARE_VMA_BIT_ALIAS(STACK, GROWSUP), + DECLARE_VMA_BIT_ALIAS(STACK_EARLY, GROWSDOWN), +#else + DECLARE_VMA_BIT_ALIAS(STACK, GROWSDOWN), +#endif +}; +#undef DECLARE_VMA_BIT +#undef DECLARE_VMA_BIT_ALIAS + +#define INIT_VM_FLAG(name) BIT((__force int) VMA_ ## name ## _BIT) +#define VM_READ INIT_VM_FLAG(READ) +#define VM_WRITE INIT_VM_FLAG(WRITE) +#define VM_EXEC INIT_VM_FLAG(EXEC) +#define VM_SHARED INIT_VM_FLAG(SHARED) +#define VM_MAYREAD INIT_VM_FLAG(MAYREAD) +#define VM_MAYWRITE INIT_VM_FLAG(MAYWRITE) +#define VM_MAYEXEC INIT_VM_FLAG(MAYEXEC) +#define VM_MAYSHARE INIT_VM_FLAG(MAYSHARE) +#define VM_GROWSDOWN INIT_VM_FLAG(GROWSDOWN) +#ifdef CONFIG_MMU +#define VM_UFFD_MISSING INIT_VM_FLAG(UFFD_MISSING) +#else +#define VM_UFFD_MISSING VM_NONE +#define VM_MAYOVERLAY INIT_VM_FLAG(MAYOVERLAY) +#endif +#define VM_PFNMAP INIT_VM_FLAG(PFNMAP) +#define VM_MAYBE_GUARD INIT_VM_FLAG(MAYBE_GUARD) +#define VM_UFFD_WP INIT_VM_FLAG(UFFD_WP) +#define VM_LOCKED INIT_VM_FLAG(LOCKED) +#define VM_IO INIT_VM_FLAG(IO) +#define VM_SEQ_READ INIT_VM_FLAG(SEQ_READ) +#define VM_RAND_READ INIT_VM_FLAG(RAND_READ) +#define VM_DONTCOPY INIT_VM_FLAG(DONTCOPY) +#define VM_DONTEXPAND INIT_VM_FLAG(DONTEXPAND) +#define VM_LOCKONFAULT INIT_VM_FLAG(LOCKONFAULT) +#define VM_ACCOUNT INIT_VM_FLAG(ACCOUNT) +#define VM_NORESERVE INIT_VM_FLAG(NORESERVE) +#define VM_HUGETLB INIT_VM_FLAG(HUGETLB) +#define VM_SYNC INIT_VM_FLAG(SYNC) +#define VM_ARCH_1 INIT_VM_FLAG(ARCH_1) +#define VM_WIPEONFORK INIT_VM_FLAG(WIPEONFORK) +#define VM_DONTDUMP INIT_VM_FLAG(DONTDUMP) #ifdef CONFIG_MEM_SOFT_DIRTY -# define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */ +#define VM_SOFTDIRTY INIT_VM_FLAG(SOFTDIRTY) #else -# define VM_SOFTDIRTY 0 +#define VM_SOFTDIRTY VM_NONE +#endif +#define VM_MIXEDMAP INIT_VM_FLAG(MIXEDMAP) +#define VM_HUGEPAGE INIT_VM_FLAG(HUGEPAGE) +#define VM_NOHUGEPAGE INIT_VM_FLAG(NOHUGEPAGE) +#define VM_MERGEABLE INIT_VM_FLAG(MERGEABLE) +#define VM_STACK INIT_VM_FLAG(STACK) +#ifdef CONFIG_STACK_GROWS_UP +#define VM_STACK_EARLY INIT_VM_FLAG(STACK_EARLY) +#else +#define VM_STACK_EARLY VM_NONE #endif - -#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ -#define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ -#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ -#define VM_MERGEABLE BIT(31) /* KSM may merge identical pages */ - -#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS -#define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */ -#define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */ -#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */ -#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */ -#define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */ -#define VM_HIGH_ARCH_BIT_5 37 /* bit only usable on 64-bit architectures */ -#define VM_HIGH_ARCH_BIT_6 38 /* bit only usable on 64-bit architectures */ -#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0) -#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1) -#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2) -#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3) -#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4) -#define VM_HIGH_ARCH_5 BIT(VM_HIGH_ARCH_BIT_5) -#define VM_HIGH_ARCH_6 BIT(VM_HIGH_ARCH_BIT_6) -#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */ - #ifdef CONFIG_ARCH_HAS_PKEYS -# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 -# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 -# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 -# define VM_PKEY_BIT2 VM_HIGH_ARCH_2 +#define VM_PKEY_SHIFT ((__force int)VMA_HIGH_ARCH_0_BIT) +/* Despite the naming, these are FLAGS not bits. */ +#define VM_PKEY_BIT0 INIT_VM_FLAG(PKEY_BIT0) +#define VM_PKEY_BIT1 INIT_VM_FLAG(PKEY_BIT1) +#define VM_PKEY_BIT2 INIT_VM_FLAG(PKEY_BIT2) #if CONFIG_ARCH_PKEY_BITS > 3 -# define VM_PKEY_BIT3 VM_HIGH_ARCH_3 +#define VM_PKEY_BIT3 INIT_VM_FLAG(PKEY_BIT3) #else -# define VM_PKEY_BIT3 0 -#endif +#define VM_PKEY_BIT3 VM_NONE +#endif /* CONFIG_ARCH_PKEY_BITS > 3 */ #if CONFIG_ARCH_PKEY_BITS > 4 -# define VM_PKEY_BIT4 VM_HIGH_ARCH_4 +#define VM_PKEY_BIT4 INIT_VM_FLAG(PKEY_BIT4) #else -# define VM_PKEY_BIT4 0 -#endif +#define VM_PKEY_BIT4 VM_NONE +#endif /* CONFIG_ARCH_PKEY_BITS > 4 */ #endif /* CONFIG_ARCH_HAS_PKEYS */ - -#ifdef CONFIG_X86_USER_SHADOW_STACK -/* - * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of - * support core mm. - * - * These VMAs will get a single end guard page. This helps userspace protect - * itself from attacks. A single page is enough for current shadow stack archs - * (x86). See the comments near alloc_shstk() in arch/x86/kernel/shstk.c - * for more details on the guard size. - */ -# define VM_SHADOW_STACK VM_HIGH_ARCH_5 -#endif - -#if defined(CONFIG_ARM64_GCS) -/* - * arm64's Guarded Control Stack implements similar functionality and - * has similar constraints to shadow stacks. - */ -# define VM_SHADOW_STACK VM_HIGH_ARCH_6 -#endif - -#ifndef VM_SHADOW_STACK -# define VM_SHADOW_STACK VM_NONE +#if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS) +#define VM_SHADOW_STACK INIT_VM_FLAG(SHADOW_STACK) +#else +#define VM_SHADOW_STACK VM_NONE #endif - #if defined(CONFIG_PPC64) -# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ +#define VM_SAO INIT_VM_FLAG(SAO) #elif defined(CONFIG_PARISC) -# define VM_GROWSUP VM_ARCH_1 +#define VM_GROWSUP INIT_VM_FLAG(GROWSUP) #elif defined(CONFIG_SPARC64) -# define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */ -# define VM_ARCH_CLEAR VM_SPARC_ADI +#define VM_SPARC_ADI INIT_VM_FLAG(SPARC_ADI) +#define VM_ARCH_CLEAR INIT_VM_FLAG(ARCH_CLEAR) #elif defined(CONFIG_ARM64) -# define VM_ARM64_BTI VM_ARCH_1 /* BTI guarded page, a.k.a. GP bit */ -# define VM_ARCH_CLEAR VM_ARM64_BTI +#define VM_ARM64_BTI INIT_VM_FLAG(ARM64_BTI) +#define VM_ARCH_CLEAR INIT_VM_FLAG(ARCH_CLEAR) #elif !defined(CONFIG_MMU) -# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ -#endif - -#if defined(CONFIG_ARM64_MTE) -# define VM_MTE VM_HIGH_ARCH_4 /* Use Tagged memory for access control */ -# define VM_MTE_ALLOWED VM_HIGH_ARCH_5 /* Tagged memory permitted */ -#else -# define VM_MTE VM_NONE -# define VM_MTE_ALLOWED VM_NONE +#define VM_MAPPED_COPY INIT_VM_FLAG(MAPPED_COPY) #endif - #ifndef VM_GROWSUP -# define VM_GROWSUP VM_NONE +#define VM_GROWSUP VM_NONE +#endif +#ifdef CONFIG_ARM64_MTE +#define VM_MTE INIT_VM_FLAG(MTE) +#define VM_MTE_ALLOWED INIT_VM_FLAG(MTE_ALLOWED) +#else +#define VM_MTE VM_NONE +#define VM_MTE_ALLOWED VM_NONE #endif - #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR -# define VM_UFFD_MINOR_BIT 41 -# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */ -#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ -# define VM_UFFD_MINOR VM_NONE -#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ - -/* - * This flag is used to connect VFIO to arch specific KVM code. It - * indicates that the memory under this VMA is safe for use with any - * non-cachable memory type inside KVM. Some VFIO devices, on some - * platforms, are thought to be unsafe and can cause machine crashes - * if KVM does not lock down the memory type. - */ -#ifdef CONFIG_64BIT -#define VM_ALLOW_ANY_UNCACHED_BIT 39 -#define VM_ALLOW_ANY_UNCACHED BIT(VM_ALLOW_ANY_UNCACHED_BIT) +#define VM_UFFD_MINOR INIT_VM_FLAG(UFFD_MINOR) #else -#define VM_ALLOW_ANY_UNCACHED VM_NONE +#define VM_UFFD_MINOR VM_NONE #endif - #ifdef CONFIG_64BIT -#define VM_DROPPABLE_BIT 40 -#define VM_DROPPABLE BIT(VM_DROPPABLE_BIT) -#elif defined(CONFIG_PPC32) -#define VM_DROPPABLE VM_ARCH_1 +#define VM_ALLOW_ANY_UNCACHED INIT_VM_FLAG(ALLOW_ANY_UNCACHED) +#define VM_SEALED INIT_VM_FLAG(SEALED) #else -#define VM_DROPPABLE VM_NONE +#define VM_ALLOW_ANY_UNCACHED VM_NONE +#define VM_SEALED VM_NONE #endif - -#ifdef CONFIG_64BIT -#define VM_SEALED_BIT 42 -#define VM_SEALED BIT(VM_SEALED_BIT) +#if defined(CONFIG_64BIT) || defined(CONFIG_PPC32) +#define VM_DROPPABLE INIT_VM_FLAG(DROPPABLE) #else -#define VM_SEALED VM_NONE +#define VM_DROPPABLE VM_NONE #endif /* Bits set in the VMA until the stack is in its final location */ @@ -470,12 +529,10 @@ extern unsigned int kobjsize(const void *objp); #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK) -#ifdef CONFIG_STACK_GROWSUP -#define VM_STACK VM_GROWSUP -#define VM_STACK_EARLY VM_GROWSDOWN +#ifdef CONFIG_MSEAL_SYSTEM_MAPPINGS +#define VM_SEALED_SYSMAP VM_SEALED #else -#define VM_STACK VM_GROWSDOWN -#define VM_STACK_EARLY 0 +#define VM_SEALED_SYSMAP VM_NONE #endif #define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) @@ -483,12 +540,26 @@ extern unsigned int kobjsize(const void *objp); /* VMA basic access permission flags */ #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) - /* * Special vmas that are non-mergable, non-mlock()able. */ #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) +/* + * Physically remapped pages are special. Tell the + * rest of the world about it: + * VM_IO tells people not to look at these pages + * (accesses can have side effects). + * VM_PFNMAP tells the core MM that the base pages are just + * raw PFN mappings, and do not have a "struct page" associated + * with them. + * VM_DONTEXPAND + * Disable vma merging and expanding with mremap(). + * VM_DONTDUMP + * Omit vma from core dump, even when VM_IO turned off. + */ +#define VM_REMAP_FLAGS (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP) + /* This mask prevents VMA from being scanned with khugepaged */ #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB) @@ -498,13 +569,69 @@ extern unsigned int kobjsize(const void *objp); /* This mask represents all the VMA flag bits used by mlock */ #define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT) +/* These flags can be updated atomically via VMA/mmap read lock. */ +#define VM_ATOMIC_SET_ALLOWED VM_MAYBE_GUARD + /* Arch-specific flags to clear when updating VM flags on protection change */ #ifndef VM_ARCH_CLEAR -# define VM_ARCH_CLEAR VM_NONE +#define VM_ARCH_CLEAR VM_NONE #endif #define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR) /* + * Flags which should be 'sticky' on merge - that is, flags which, when one VMA + * possesses it but the other does not, the merged VMA should nonetheless have + * applied to it: + * + * VM_SOFTDIRTY - if a VMA is marked soft-dirty, that is has not had its + * references cleared via /proc/$pid/clear_refs, any merged VMA + * should be considered soft-dirty also as it operates at a VMA + * granularity. + * + * VM_MAYBE_GUARD - If a VMA may have guard regions in place it implies that + * mapped page tables may contain metadata not described by the + * VMA and thus any merged VMA may also contain this metadata, + * and thus we must make this flag sticky. + */ +#define VM_STICKY (VM_SOFTDIRTY | VM_MAYBE_GUARD) + +/* + * VMA flags we ignore for the purposes of merge, i.e. one VMA possessing one + * of these flags and the other not does not preclude a merge. + * + * VM_STICKY - When merging VMAs, VMA flags must match, unless they are + * 'sticky'. If any sticky flags exist in either VMA, we simply + * set all of them on the merged VMA. + */ +#define VM_IGNORE_MERGE VM_STICKY + +/* + * Flags which should result in page tables being copied on fork. These are + * flags which indicate that the VMA maps page tables which cannot be + * reconsistuted upon page fault, so necessitate page table copying upon + * + * VM_PFNMAP / VM_MIXEDMAP - These contain kernel-mapped data which cannot be + * reasonably reconstructed on page fault. + * + * VM_UFFD_WP - Encodes metadata about an installed uffd + * write protect handler, which cannot be + * reconstructed on page fault. + * + * We always copy pgtables when dst_vma has uffd-wp + * enabled even if it's file-backed + * (e.g. shmem). Because when uffd-wp is enabled, + * pgtable contains uffd-wp protection information, + * that's something we can't retrieve from page cache, + * and skip copying will lose those info. + * + * VM_MAYBE_GUARD - Could contain page guard region markers which + * by design are a property of the page tables + * only and thus cannot be reconstructed on page + * fault. + */ +#define VM_COPY_ON_FORK (VM_PFNMAP | VM_MIXEDMAP | VM_UFFD_WP | VM_MAYBE_GUARD) + +/* * mapping from the currently active vm_flags protection bits (the * low four bits) to a page protection mask.. */ @@ -783,7 +910,9 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) static inline void vm_flags_init(struct vm_area_struct *vma, vm_flags_t flags) { - ACCESS_PRIVATE(vma, __vm_flags) = flags; + VM_WARN_ON_ONCE(!pgtable_supports_soft_dirty() && (flags & VM_SOFTDIRTY)); + vma_flags_clear_all(&vma->flags); + vma_flags_overwrite_word(&vma->flags, flags); } /* @@ -794,6 +923,7 @@ static inline void vm_flags_init(struct vm_area_struct *vma, static inline void vm_flags_reset(struct vm_area_struct *vma, vm_flags_t flags) { + VM_WARN_ON_ONCE(!pgtable_supports_soft_dirty() && (flags & VM_SOFTDIRTY)); vma_assert_write_locked(vma); vm_flags_init(vma, flags); } @@ -802,21 +932,33 @@ static inline void vm_flags_reset_once(struct vm_area_struct *vma, vm_flags_t flags) { vma_assert_write_locked(vma); - WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags); + /* + * If VMA flags exist beyond the first system word, also clear these. It + * is assumed the write once behaviour is required only for the first + * system word. + */ + if (NUM_VMA_FLAG_BITS > BITS_PER_LONG) { + unsigned long *bitmap = ACCESS_PRIVATE(&vma->flags, __vma_flags); + + bitmap_zero(&bitmap[1], NUM_VMA_FLAG_BITS - BITS_PER_LONG); + } + + vma_flags_overwrite_word_once(&vma->flags, flags); } static inline void vm_flags_set(struct vm_area_struct *vma, vm_flags_t flags) { vma_start_write(vma); - ACCESS_PRIVATE(vma, __vm_flags) |= flags; + vma_flags_set_word(&vma->flags, flags); } static inline void vm_flags_clear(struct vm_area_struct *vma, vm_flags_t flags) { + VM_WARN_ON_ONCE(!pgtable_supports_soft_dirty() && (flags & VM_SOFTDIRTY)); vma_start_write(vma); - ACCESS_PRIVATE(vma, __vm_flags) &= ~flags; + vma_flags_clear_word(&vma->flags, flags); } /* @@ -840,6 +982,51 @@ static inline void vm_flags_mod(struct vm_area_struct *vma, __vm_flags_mod(vma, set, clear); } +static inline bool __vma_flag_atomic_valid(struct vm_area_struct *vma, + vma_flag_t bit) +{ + const vm_flags_t mask = BIT((__force int)bit); + + /* Only specific flags are permitted */ + if (WARN_ON_ONCE(!(mask & VM_ATOMIC_SET_ALLOWED))) + return false; + + return true; +} + +/* + * Set VMA flag atomically. Requires only VMA/mmap read lock. Only specific + * valid flags are allowed to do this. + */ +static inline void vma_flag_set_atomic(struct vm_area_struct *vma, + vma_flag_t bit) +{ + unsigned long *bitmap = ACCESS_PRIVATE(&vma->flags, __vma_flags); + + /* mmap read lock/VMA read lock must be held. */ + if (!rwsem_is_locked(&vma->vm_mm->mmap_lock)) + vma_assert_locked(vma); + + if (__vma_flag_atomic_valid(vma, bit)) + set_bit((__force int)bit, bitmap); +} + +/* + * Test for VMA flag atomically. Requires no locks. Only specific valid flags + * are allowed to do this. + * + * This is necessarily racey, so callers must ensure that serialisation is + * achieved through some other means, or that races are permissible. + */ +static inline bool vma_flag_test_atomic(struct vm_area_struct *vma, + vma_flag_t bit) +{ + if (__vma_flag_atomic_valid(vma, bit)) + return test_bit((__force int)bit, &vma->vm_flags); + + return false; +} + static inline void vma_set_anonymous(struct vm_area_struct *vma) { vma->vm_ops = NULL; @@ -2438,7 +2625,7 @@ static inline void zap_vma_pages(struct vm_area_struct *vma) } void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, struct vm_area_struct *start_vma, unsigned long start, - unsigned long end, unsigned long tree_end, bool mm_wr_locked); + unsigned long end, unsigned long tree_end); struct mmu_notifier_range; @@ -2922,6 +3109,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a #endif /* CONFIG_MMU */ enum pt_flags { + PT_kernel = PG_referenced, PT_reserved = PG_reserved, /* High bits are used for zone/node/section */ }; @@ -2948,6 +3136,46 @@ static inline bool pagetable_is_reserved(struct ptdesc *pt) } /** + * ptdesc_set_kernel - Mark a ptdesc used to map the kernel + * @ptdesc: The ptdesc to be marked + * + * Kernel page tables often need special handling. Set a flag so that + * the handling code knows this ptdesc will not be used for userspace. + */ +static inline void ptdesc_set_kernel(struct ptdesc *ptdesc) +{ + set_bit(PT_kernel, &ptdesc->pt_flags.f); +} + +/** + * ptdesc_clear_kernel - Mark a ptdesc as no longer used to map the kernel + * @ptdesc: The ptdesc to be unmarked + * + * Use when the ptdesc is no longer used to map the kernel and no longer + * needs special handling. + */ +static inline void ptdesc_clear_kernel(struct ptdesc *ptdesc) +{ + /* + * Note: the 'PG_referenced' bit does not strictly need to be + * cleared before freeing the page. But this is nice for + * symmetry. + */ + clear_bit(PT_kernel, &ptdesc->pt_flags.f); +} + +/** + * ptdesc_test_kernel - Check if a ptdesc is used to map the kernel + * @ptdesc: The ptdesc being tested + * + * Call to tell if the ptdesc used to map the kernel. + */ +static inline bool ptdesc_test_kernel(const struct ptdesc *ptdesc) +{ + return test_bit(PT_kernel, &ptdesc->pt_flags.f); +} + +/** * pagetable_alloc - Allocate pagetables * @gfp: GFP flags * @order: desired pagetable order @@ -2965,6 +3193,21 @@ static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int orde } #define pagetable_alloc(...) alloc_hooks(pagetable_alloc_noprof(__VA_ARGS__)) +static inline void __pagetable_free(struct ptdesc *pt) +{ + struct page *page = ptdesc_page(pt); + + __free_pages(page, compound_order(page)); +} + +#ifdef CONFIG_ASYNC_KERNEL_PGTABLE_FREE +void pagetable_free_kernel(struct ptdesc *pt); +#else +static inline void pagetable_free_kernel(struct ptdesc *pt) +{ + __pagetable_free(pt); +} +#endif /** * pagetable_free - Free pagetables * @pt: The page table descriptor @@ -2974,9 +3217,12 @@ static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int orde */ static inline void pagetable_free(struct ptdesc *pt) { - struct page *page = ptdesc_page(pt); - - __free_pages(page, compound_order(page)); + if (ptdesc_test_kernel(pt)) { + ptdesc_clear_kernel(pt); + pagetable_free_kernel(pt); + } else { + __pagetable_free(pt); + } } #if defined(CONFIG_SPLIT_PTE_PTLOCKS) @@ -3560,6 +3806,90 @@ static inline unsigned long vma_pages(const struct vm_area_struct *vma) return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; } +static inline unsigned long vma_desc_size(const struct vm_area_desc *desc) +{ + return desc->end - desc->start; +} + +static inline unsigned long vma_desc_pages(const struct vm_area_desc *desc) +{ + return vma_desc_size(desc) >> PAGE_SHIFT; +} + +/** + * mmap_action_remap - helper for mmap_prepare hook to specify that a pure PFN + * remap is required. + * @desc: The VMA descriptor for the VMA requiring remap. + * @start: The virtual address to start the remap from, must be within the VMA. + * @start_pfn: The first PFN in the range to remap. + * @size: The size of the range to remap, in bytes, at most spanning to the end + * of the VMA. + */ +static inline void mmap_action_remap(struct vm_area_desc *desc, + unsigned long start, + unsigned long start_pfn, + unsigned long size) +{ + struct mmap_action *action = &desc->action; + + /* [start, start + size) must be within the VMA. */ + WARN_ON_ONCE(start < desc->start || start >= desc->end); + WARN_ON_ONCE(start + size > desc->end); + + action->type = MMAP_REMAP_PFN; + action->remap.start = start; + action->remap.start_pfn = start_pfn; + action->remap.size = size; + action->remap.pgprot = desc->page_prot; +} + +/** + * mmap_action_remap_full - helper for mmap_prepare hook to specify that the + * entirety of a VMA should be PFN remapped. + * @desc: The VMA descriptor for the VMA requiring remap. + * @start_pfn: The first PFN in the range to remap. + */ +static inline void mmap_action_remap_full(struct vm_area_desc *desc, + unsigned long start_pfn) +{ + mmap_action_remap(desc, desc->start, start_pfn, vma_desc_size(desc)); +} + +/** + * mmap_action_ioremap - helper for mmap_prepare hook to specify that a pure PFN + * I/O remap is required. + * @desc: The VMA descriptor for the VMA requiring remap. + * @start: The virtual address to start the remap from, must be within the VMA. + * @start_pfn: The first PFN in the range to remap. + * @size: The size of the range to remap, in bytes, at most spanning to the end + * of the VMA. + */ +static inline void mmap_action_ioremap(struct vm_area_desc *desc, + unsigned long start, + unsigned long start_pfn, + unsigned long size) +{ + mmap_action_remap(desc, start, start_pfn, size); + desc->action.type = MMAP_IO_REMAP_PFN; +} + +/** + * mmap_action_ioremap_full - helper for mmap_prepare hook to specify that the + * entirety of a VMA should be PFN I/O remapped. + * @desc: The VMA descriptor for the VMA requiring remap. + * @start_pfn: The first PFN in the range to remap. + */ +static inline void mmap_action_ioremap_full(struct vm_area_desc *desc, + unsigned long start_pfn) +{ + mmap_action_ioremap(desc, desc->start, start_pfn, vma_desc_size(desc)); +} + +void mmap_action_prepare(struct mmap_action *action, + struct vm_area_desc *desc); +int mmap_action_complete(struct mmap_action *action, + struct vm_area_struct *vma); + /* Look up the first VMA which exactly match the interval vm_start ... vm_end */ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, unsigned long vm_start, unsigned long vm_end) @@ -3601,10 +3931,9 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, struct vm_area_struct *find_extend_vma_locked(struct mm_struct *, unsigned long addr); -int remap_pfn_range(struct vm_area_struct *, unsigned long addr, - unsigned long pfn, unsigned long size, pgprot_t); -int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, - unsigned long pfn, unsigned long size, pgprot_t prot); +int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t pgprot); + int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num); @@ -3637,15 +3966,24 @@ static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, return VM_FAULT_NOPAGE; } -#ifndef io_remap_pfn_range -static inline int io_remap_pfn_range(struct vm_area_struct *vma, - unsigned long addr, unsigned long pfn, - unsigned long size, pgprot_t prot) +#ifndef io_remap_pfn_range_pfn +static inline unsigned long io_remap_pfn_range_pfn(unsigned long pfn, + unsigned long size) { - return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot)); + return pfn; } #endif +static inline int io_remap_pfn_range(struct vm_area_struct *vma, + unsigned long addr, unsigned long orig_pfn, + unsigned long size, pgprot_t orig_prot) +{ + const unsigned long pfn = io_remap_pfn_range_pfn(orig_pfn, size); + const pgprot_t prot = pgprot_decrypted(orig_prot); + + return remap_pfn_range(vma, addr, pfn, size, prot); +} + static inline vm_fault_t vmf_error(int err) { if (err == -ENOMEM) @@ -4094,6 +4432,7 @@ enum mf_action_page_type { MF_MSG_DAX, MF_MSG_UNSPLIT_THP, MF_MSG_ALREADY_POISONED, + MF_MSG_PFN_MAP, MF_MSG_UNKNOWN, }; @@ -4222,16 +4561,6 @@ int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *st int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status); int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status); - -/* - * mseal of userspace process's system mappings. - */ -#ifdef CONFIG_MSEAL_SYSTEM_MAPPINGS -#define VM_SEALED_SYSMAP VM_SEALED -#else -#define VM_SEALED_SYSMAP VM_NONE -#endif - /* * DMA mapping IDs for page_pool * diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index f6a2b2d20016..fa2d6ba811b5 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -8,7 +8,7 @@ #include <linux/swap.h> #include <linux/string.h> #include <linux/userfaultfd_k.h> -#include <linux/swapops.h> +#include <linux/leafops.h> /** * folio_is_file_lru - Should the folio be on a file LRU or anon LRU? @@ -44,7 +44,7 @@ static __always_inline void __update_lru_size(struct lruvec *lruvec, lockdep_assert_held(&lruvec->lru_lock); WARN_ON_ONCE(nr_pages != (int)nr_pages); - __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); + mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); __mod_zone_page_state(&pgdat->node_zones[zid], NR_ZONE_LRU_BASE + lru, nr_pages); } @@ -541,9 +541,9 @@ static inline bool mm_tlb_flush_nested(const struct mm_struct *mm) * The caller should insert a new pte created with make_pte_marker(). */ static inline pte_marker copy_pte_marker( - swp_entry_t entry, struct vm_area_struct *dst_vma) + softleaf_t entry, struct vm_area_struct *dst_vma) { - pte_marker srcm = pte_marker_get(entry); + const pte_marker srcm = softleaf_to_marker(entry); /* Always copy error entries. */ pte_marker dstm = srcm & (PTE_MARKER_POISONED | PTE_MARKER_GUARD); @@ -553,7 +553,6 @@ static inline pte_marker copy_pte_marker( return dstm; } -#endif /* * If this pte is wr-protected by uffd-wp in any form, arm the special pte to @@ -571,9 +570,11 @@ static inline bool pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, pte_t *pte, pte_t pteval) { -#ifdef CONFIG_PTE_MARKER_UFFD_WP bool arm_uffd_pte = false; + if (!uffd_supports_wp_marker()) + return false; + /* The current status of the pte should be "cleared" before calling */ WARN_ON_ONCE(!pte_none(ptep_get(pte))); @@ -602,7 +603,7 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, make_pte_marker(PTE_MARKER_UFFD_WP)); return true; } -#endif + return false; } @@ -616,6 +617,7 @@ static inline bool vma_has_recency(const struct vm_area_struct *vma) return true; } +#endif /** * num_pages_contiguous() - determine the number of contiguous pages diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 3b7d05e7169c..9f6de068295d 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -286,6 +286,31 @@ typedef struct { unsigned long val; } swp_entry_t; +/** + * typedef softleaf_t - Describes a page table software leaf entry, abstracted + * from its architecture-specific encoding. + * + * Page table leaf entries are those which do not reference any descendent page + * tables but rather either reference a data page, are an empty (or 'none' + * entry), or contain a non-present entry. + * + * If referencing another page table or a data page then the page table entry is + * pertinent to hardware - that is it tells the hardware how to decode the page + * table entry. + * + * Otherwise it is a software-defined leaf page table entry, which this type + * describes. See leafops.h and specifically @softleaf_type for a list of all + * possible kinds of software leaf entry. + * + * A softleaf_t entry is abstracted from the hardware page table entry, so is + * not architecture-specific. + * + * NOTE: While we transition from the confusing swp_entry_t type used for this + * purpose, we simply alias this type. This will be removed once the + * transition is complete. + */ +typedef swp_entry_t softleaf_t; + #if defined(CONFIG_MEMCG) || defined(CONFIG_SLAB_OBJ_EXT) /* We have some extra room after the refcount in tail pages. */ #define NR_PAGES_IN_LARGE_FOLIO @@ -774,6 +799,65 @@ struct pfnmap_track_ctx { }; #endif +/* What action should be taken after an .mmap_prepare call is complete? */ +enum mmap_action_type { + MMAP_NOTHING, /* Mapping is complete, no further action. */ + MMAP_REMAP_PFN, /* Remap PFN range. */ + MMAP_IO_REMAP_PFN, /* I/O remap PFN range. */ +}; + +/* + * Describes an action an mmap_prepare hook can instruct to be taken to complete + * the mapping of a VMA. Specified in vm_area_desc. + */ +struct mmap_action { + union { + /* Remap range. */ + struct { + unsigned long start; + unsigned long start_pfn; + unsigned long size; + pgprot_t pgprot; + } remap; + }; + enum mmap_action_type type; + + /* + * If specified, this hook is invoked after the selected action has been + * successfully completed. Note that the VMA write lock still held. + * + * The absolute minimum ought to be done here. + * + * Returns 0 on success, or an error code. + */ + int (*success_hook)(const struct vm_area_struct *vma); + + /* + * If specified, this hook is invoked when an error occurred when + * attempting the selection action. + * + * The hook can return an error code in order to filter the error, but + * it is not valid to clear the error here. + */ + int (*error_hook)(int err); + + /* + * This should be set in rare instances where the operation required + * that the rmap should not be able to access the VMA until + * completely set up. + */ + bool hide_from_rmap_until_complete :1; +}; + +/* + * Opaque type representing current VMA (vm_area_struct) flag state. Must be + * accessed via vma_flags_xxx() helper functions. + */ +#define NUM_VMA_FLAG_BITS BITS_PER_LONG +typedef struct { + DECLARE_BITMAP(__vma_flags, NUM_VMA_FLAG_BITS); +} __private vma_flags_t; + /* * Describes a VMA that is about to be mmap()'ed. Drivers may choose to * manipulate mutable fields which will cause those fields to be updated in the @@ -791,12 +875,18 @@ struct vm_area_desc { /* Mutable fields. Populated with initial state. */ pgoff_t pgoff; struct file *vm_file; - vm_flags_t vm_flags; + union { + vm_flags_t vm_flags; + vma_flags_t vma_flags; + }; pgprot_t page_prot; /* Write-only fields. */ const struct vm_operations_struct *vm_ops; void *private_data; + + /* Take further action? */ + struct mmap_action action; }; /* @@ -833,10 +923,12 @@ struct vm_area_struct { /* * Flags, see mm.h. * To modify use vm_flags_{init|reset|set|clear|mod} functions. + * Preferably, use vma_flags_xxx() functions. */ union { + /* Temporary while VMA flags are being converted. */ const vm_flags_t vm_flags; - vm_flags_t __private __vm_flags; + vma_flags_t flags; }; #ifdef CONFIG_PER_VMA_LOCK @@ -917,6 +1009,52 @@ struct vm_area_struct { #endif } __randomize_layout; +/* Clears all bits in the VMA flags bitmap, non-atomically. */ +static inline void vma_flags_clear_all(vma_flags_t *flags) +{ + bitmap_zero(ACCESS_PRIVATE(flags, __vma_flags), NUM_VMA_FLAG_BITS); +} + +/* + * Copy value to the first system word of VMA flags, non-atomically. + * + * IMPORTANT: This does not overwrite bytes past the first system word. The + * caller must account for this. + */ +static inline void vma_flags_overwrite_word(vma_flags_t *flags, unsigned long value) +{ + *ACCESS_PRIVATE(flags, __vma_flags) = value; +} + +/* + * Copy value to the first system word of VMA flags ONCE, non-atomically. + * + * IMPORTANT: This does not overwrite bytes past the first system word. The + * caller must account for this. + */ +static inline void vma_flags_overwrite_word_once(vma_flags_t *flags, unsigned long value) +{ + unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags); + + WRITE_ONCE(*bitmap, value); +} + +/* Update the first system word of VMA flags setting bits, non-atomically. */ +static inline void vma_flags_set_word(vma_flags_t *flags, unsigned long value) +{ + unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags); + + *bitmap |= value; +} + +/* Update the first system word of VMA flags clearing bits, non-atomically. */ +static inline void vma_flags_clear_word(vma_flags_t *flags, unsigned long value) +{ + unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags); + + *bitmap &= ~value; +} + #ifdef CONFIG_NUMA #define vma_policy(vma) ((vma)->vm_policy) #else @@ -1194,15 +1332,13 @@ struct mm_struct { unsigned long cpu_bitmap[]; }; -/* Set the first system word of mm flags, non-atomically. */ -static inline void __mm_flags_set_word(struct mm_struct *mm, unsigned long value) +/* Copy value to the first system word of mm flags, non-atomically. */ +static inline void __mm_flags_overwrite_word(struct mm_struct *mm, unsigned long value) { - unsigned long *bitmap = ACCESS_PRIVATE(&mm->flags, __mm_flags); - - bitmap_copy(bitmap, &value, BITS_PER_LONG); + *ACCESS_PRIVATE(&mm->flags, __mm_flags) = value; } -/* Obtain a read-only view of the bitmap. */ +/* Obtain a read-only view of the mm flags bitmap. */ static inline const unsigned long *__mm_flags_get_bitmap(const struct mm_struct *mm) { return (const unsigned long *)ACCESS_PRIVATE(&mm->flags, __mm_flags); @@ -1211,9 +1347,7 @@ static inline const unsigned long *__mm_flags_get_bitmap(const struct mm_struct /* Read the first system word of mm flags, non-atomically. */ static inline unsigned long __mm_flags_get_word(const struct mm_struct *mm) { - const unsigned long *bitmap = __mm_flags_get_bitmap(mm); - - return bitmap_read(bitmap, 0, BITS_PER_LONG); + return *__mm_flags_get_bitmap(mm); } /* diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 2c9fffa58714..d53f72dba7fe 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -130,7 +130,7 @@ static inline bool is_vma_writer_only(int refcnt) * a detached vma happens only in vma_mark_detached() and is a rare * case, therefore most of the time there will be no unnecessary wakeup. */ - return refcnt & VMA_LOCK_OFFSET && refcnt <= VMA_LOCK_OFFSET + 1; + return (refcnt & VMA_LOCK_OFFSET) && refcnt <= VMA_LOCK_OFFSET + 1; } static inline void vma_refcount_put(struct vm_area_struct *vma) @@ -183,7 +183,7 @@ static inline void vma_end_read(struct vm_area_struct *vma) } /* WARNING! Can only be used if mmap_lock is expected to be write-locked */ -static bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq) +static inline bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq) { mmap_assert_write_locked(vma->vm_mm); @@ -195,7 +195,8 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_l return (vma->vm_lock_seq == *mm_lock_seq); } -void __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq); +int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq, + int state); /* * Begin writing to a VMA. @@ -209,7 +210,30 @@ static inline void vma_start_write(struct vm_area_struct *vma) if (__is_vma_write_locked(vma, &mm_lock_seq)) return; - __vma_start_write(vma, mm_lock_seq); + __vma_start_write(vma, mm_lock_seq, TASK_UNINTERRUPTIBLE); +} + +/** + * vma_start_write_killable - Begin writing to a VMA. + * @vma: The VMA we are going to modify. + * + * Exclude concurrent readers under the per-VMA lock until the currently + * write-locked mmap_lock is dropped or downgraded. + * + * Context: May sleep while waiting for readers to drop the vma read lock. + * Caller must already hold the mmap_lock for write. + * + * Return: 0 for a successful acquisition. -EINTR if a fatal signal was + * received. + */ +static inline __must_check +int vma_start_write_killable(struct vm_area_struct *vma) +{ + unsigned int mm_lock_seq; + + if (__is_vma_write_locked(vma, &mm_lock_seq)) + return 0; + return __vma_start_write(vma, mm_lock_seq, TASK_KILLABLE); } static inline void vma_assert_write_locked(struct vm_area_struct *vma) @@ -281,11 +305,10 @@ static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int return true; } static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) {} -static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm, - struct vm_area_struct *vma) - { return NULL; } static inline void vma_end_read(struct vm_area_struct *vma) {} static inline void vma_start_write(struct vm_area_struct *vma) {} +static inline __must_check +int vma_start_write_killable(struct vm_area_struct *vma) { return 0; } static inline void vma_assert_write_locked(struct vm_area_struct *vma) { mmap_assert_write_locked(vma->vm_mm); } static inline void vma_assert_attached(struct vm_area_struct *vma) {} diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index ddcdf23d731c..e9e964c20e53 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -182,7 +182,6 @@ struct sd_switch_caps { #define SD_SET_CURRENT_LIMIT_400 1 #define SD_SET_CURRENT_LIMIT_600 2 #define SD_SET_CURRENT_LIMIT_800 3 -#define SD_SET_CURRENT_NO_CHANGE (-1) #define SD_MAX_CURRENT_200 (1 << SD_SET_CURRENT_LIMIT_200) #define SD_MAX_CURRENT_400 (1 << SD_SET_CURRENT_LIMIT_400) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 7fb7331c5725..4398e027f450 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1060,10 +1060,6 @@ struct zone { } ____cacheline_internodealigned_in_smp; enum pgdat_flags { - PGDAT_DIRTY, /* reclaim scanning has recently found - * many dirty file pages at the tail - * of the LRU. - */ PGDAT_WRITEBACK, /* reclaim scanning has recently found * many pages under writeback */ diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 6077972e8b45..24eb5a88a5c5 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -867,7 +867,7 @@ struct mhi_device_id { kernel_ulong_t driver_data; }; -#define AUXILIARY_NAME_SIZE 32 +#define AUXILIARY_NAME_SIZE 40 #define AUXILIARY_MODULE_PREFIX "auxiliary:" struct auxiliary_device_id { diff --git a/include/linux/mtd/spear_smi.h b/include/linux/mtd/spear_smi.h index 581603ac1277..871634862627 100644 --- a/include/linux/mtd/spear_smi.h +++ b/include/linux/mtd/spear_smi.h @@ -31,12 +31,12 @@ * struct spear_smi_flash_info - platform structure for passing flash * information * - * name: name of the serial nor flash for identification - * mem_base: the memory base on which the flash is mapped - * size: size of the flash in bytes - * partitions: parition details - * nr_partitions: number of partitions - * fast_mode: whether flash supports fast mode + * @name: name of the serial nor flash for identification + * @mem_base: the memory base on which the flash is mapped + * @size: size of the flash in bytes + * @partitions: parition details + * @nr_partitions: number of partitions + * @fast_mode: whether flash supports fast mode */ struct spear_smi_flash_info { @@ -51,9 +51,10 @@ struct spear_smi_flash_info { /** * struct spear_smi_plat_data - platform structure for configuring smi * - * clk_rate: clk rate at which SMI must operate - * num_flashes: number of flashes present on board - * board_flash_info: specific details of each flash present on board + * @clk_rate: clk rate at which SMI must operate + * @num_flashes: number of flashes present on board + * @board_flash_info: specific details of each flash present on board + * @np: array of DT node pointers for all possible flash chip devices */ struct spear_smi_plat_data { unsigned long clk_rate; diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 927c10d78769..ce76f5c632e1 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -354,6 +354,7 @@ struct spinand_manufacturer { /* SPI NAND manufacturers */ extern const struct spinand_manufacturer alliancememory_spinand_manufacturer; extern const struct spinand_manufacturer ato_spinand_manufacturer; +extern const struct spinand_manufacturer esmt_8c_spinand_manufacturer; extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer; extern const struct spinand_manufacturer fmsh_spinand_manufacturer; extern const struct spinand_manufacturer foresee_spinand_manufacturer; diff --git a/include/linux/node.h b/include/linux/node.h index 866e3323f1fd..0269b064ba65 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -132,8 +132,6 @@ static inline void register_memory_blocks_under_nodes(void) } #endif -extern void unregister_node(struct node *node); - struct node_notify { int nid; }; @@ -176,8 +174,8 @@ static inline int hotplug_node_notifier(notifier_fn_t fn, int pri) #ifdef CONFIG_NUMA extern void node_dev_init(void); /* Core of the node registration - only memory hotplug should use this */ -extern int register_one_node(int nid); -extern void unregister_one_node(int nid); +int register_node(int nid); +void unregister_node(int nid); extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk); @@ -189,11 +187,11 @@ extern int register_memory_node_under_compute_node(unsigned int mem_nid, static inline void node_dev_init(void) { } -static inline int register_one_node(int nid) +static inline int register_node(int nid) { return 0; } -static inline int unregister_one_node(int nid) +static inline int unregister_node(int nid) { return 0; } diff --git a/include/linux/of.h b/include/linux/of.h index 121a288ca92d..01bb3affcd49 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -407,6 +407,8 @@ extern int of_alias_get_id(const struct device_node *np, const char *stem); extern int of_alias_get_highest_id(const char *stem); bool of_machine_compatible_match(const char *const *compats); +bool of_machine_device_match(const struct of_device_id *matches); +const void *of_machine_get_match_data(const struct of_device_id *matches); /** * of_machine_is_compatible - Test root of device tree for a given compatible value @@ -855,6 +857,17 @@ static inline bool of_machine_compatible_match(const char *const *compats) return false; } +static inline bool of_machine_device_match(const struct of_device_id *matches) +{ + return false; +} + +static inline const void * +of_machine_get_match_data(const struct of_device_id *matches) +{ + return NULL; +} + static inline bool of_console_check(const struct device_node *dn, const char *name, int index) { return false; diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index b8d6c0c20876..51dadbaa3d63 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -55,6 +55,15 @@ extern int of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname); extern const void *of_get_flat_dt_prop(unsigned long node, const char *name, int *size); + +extern const __be32 *of_flat_dt_get_addr_size_prop(unsigned long node, + const char *name, + int *entries); +extern bool of_flat_dt_get_addr_size(unsigned long node, const char *name, + u64 *addr, u64 *size); +extern void of_flat_dt_read_addr_size(const __be32 *prop, int entry_index, + u64 *addr, u64 *size); + extern int of_flat_dt_is_compatible(unsigned long node, const char *name); extern unsigned long of_get_flat_dt_root(void); extern uint32_t of_get_flat_dt_phandle(unsigned long node); diff --git a/include/linux/overflow.h b/include/linux/overflow.h index 725f95f7e416..736f633b2d5f 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -459,6 +459,18 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) struct_size((type *)NULL, member, count) /** + * struct_offset() - Calculate the offset of a member within a struct + * @p: Pointer to the struct + * @member: Name of the member to get the offset of + * + * Calculates the offset of a particular @member of the structure pointed + * to by @p. + * + * Return: number of bytes to the location of @member. + */ +#define struct_offset(p, member) (offsetof(typeof(*(p)), member)) + +/** * __DEFINE_FLEX() - helper macro for DEFINE_FLEX() family. * Enables caller macro to pass arbitrary trailing expressions * diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index e601a3144f28..31a848485ad9 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -651,9 +651,11 @@ static inline void *detach_page_private(struct page *page) } #ifdef CONFIG_NUMA -struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order); +struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order, + struct mempolicy *policy); #else -static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) +static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order, + struct mempolicy *policy) { return folio_alloc_noprof(gfp, order); } @@ -664,7 +666,7 @@ static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int o static inline struct page *__page_cache_alloc(gfp_t gfp) { - return &filemap_alloc_folio(gfp, 0)->page; + return &filemap_alloc_folio(gfp, 0, NULL)->page; } static inline gfp_t readahead_gfp_mask(struct address_space *x) @@ -750,11 +752,17 @@ static inline fgf_t fgf_set_order(size_t size) } void *filemap_get_entry(struct address_space *mapping, pgoff_t index); -struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, - fgf_t fgp_flags, gfp_t gfp); +struct folio *__filemap_get_folio_mpol(struct address_space *mapping, + pgoff_t index, fgf_t fgf_flags, gfp_t gfp, struct mempolicy *policy); struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, fgf_t fgp_flags, gfp_t gfp); +static inline struct folio *__filemap_get_folio(struct address_space *mapping, + pgoff_t index, fgf_t fgf_flags, gfp_t gfp) +{ + return __filemap_get_folio_mpol(mapping, index, fgf_flags, gfp, NULL); +} + /** * write_begin_get_folio - Get folio for write_begin with flags. * @iocb: The kiocb passed from write_begin (may be NULL). diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index 2e85504ba2ba..48f68c4dcfa5 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -115,8 +115,8 @@ struct pci_epf_driver { * @phys_addr: physical address that should be mapped to the BAR * @addr: virtual address corresponding to the @phys_addr * @size: the size of the address space present in BAR - * @aligned_size: the size actually allocated to accommodate the iATU alignment - * requirement + * @mem_size: the size actually allocated to accommodate the iATU alignment + * requirement * @barno: BAR number * @flags: flags that are set for the BAR */ @@ -124,7 +124,7 @@ struct pci_epf_bar { dma_addr_t phys_addr; void *addr; size_t size; - size_t aligned_size; + size_t mem_size; enum pci_barno barno; int flags; }; @@ -242,6 +242,12 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar, enum pci_epc_interface_type type); +int pci_epf_assign_bar_space(struct pci_epf *epf, size_t size, + enum pci_barno bar, + const struct pci_epc_features *epc_features, + enum pci_epc_interface_type type, + dma_addr_t bar_addr); + int pci_epf_align_inbound_addr(struct pci_epf *epf, enum pci_barno bar, u64 addr, dma_addr_t *base, size_t *off); int pci_epf_bind(struct pci_epf *epf); diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h index 951f81a38f3a..517e121d2598 100644 --- a/include/linux/pci-p2pdma.h +++ b/include/linux/pci-p2pdma.h @@ -16,7 +16,58 @@ struct block_device; struct scatterlist; +/** + * struct p2pdma_provider + * + * A p2pdma provider is a range of MMIO address space available to the CPU. + */ +struct p2pdma_provider { + struct device *owner; + u64 bus_offset; +}; + +enum pci_p2pdma_map_type { + /* + * PCI_P2PDMA_MAP_UNKNOWN: Used internally as an initial state before + * the mapping type has been calculated. Exported routines for the API + * will never return this value. + */ + PCI_P2PDMA_MAP_UNKNOWN = 0, + + /* + * Not a PCI P2PDMA transfer. + */ + PCI_P2PDMA_MAP_NONE, + + /* + * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will + * traverse the host bridge and the host bridge is not in the + * allowlist. DMA Mapping routines should return an error when + * this is returned. + */ + PCI_P2PDMA_MAP_NOT_SUPPORTED, + + /* + * PCI_P2PDMA_MAP_BUS_ADDR: Indicates that two devices can talk to + * each other directly through a PCI switch and the transaction will + * not traverse the host bridge. Such a mapping should program + * the DMA engine with PCI bus addresses. + */ + PCI_P2PDMA_MAP_BUS_ADDR, + + /* + * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk + * to each other, but the transaction traverses a host bridge on the + * allowlist. In this case, a normal mapping either with CPU physical + * addresses (in the case of dma-direct) or IOVA addresses (in the + * case of IOMMUs) should be used to program the DMA engine. + */ + PCI_P2PDMA_MAP_THRU_HOST_BRIDGE, +}; + #ifdef CONFIG_PCI_P2PDMA +int pcim_p2pdma_init(struct pci_dev *pdev); +struct p2pdma_provider *pcim_p2pdma_provider(struct pci_dev *pdev, int bar); int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, u64 offset); int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients, @@ -33,7 +84,18 @@ int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, bool *use_p2pdma); ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev, bool use_p2pdma); +enum pci_p2pdma_map_type pci_p2pdma_map_type(struct p2pdma_provider *provider, + struct device *dev); #else /* CONFIG_PCI_P2PDMA */ +static inline int pcim_p2pdma_init(struct pci_dev *pdev) +{ + return -EOPNOTSUPP; +} +static inline struct p2pdma_provider *pcim_p2pdma_provider(struct pci_dev *pdev, + int bar) +{ + return NULL; +} static inline int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, u64 offset) { @@ -85,6 +147,11 @@ static inline ssize_t pci_p2pdma_enable_show(char *page, { return sprintf(page, "none\n"); } +static inline enum pci_p2pdma_map_type +pci_p2pdma_map_type(struct p2pdma_provider *provider, struct device *dev) +{ + return PCI_P2PDMA_MAP_NOT_SUPPORTED; +} #endif /* CONFIG_PCI_P2PDMA */ @@ -99,51 +166,12 @@ static inline struct pci_dev *pci_p2pmem_find(struct device *client) return pci_p2pmem_find_many(&client, 1); } -enum pci_p2pdma_map_type { - /* - * PCI_P2PDMA_MAP_UNKNOWN: Used internally as an initial state before - * the mapping type has been calculated. Exported routines for the API - * will never return this value. - */ - PCI_P2PDMA_MAP_UNKNOWN = 0, - - /* - * Not a PCI P2PDMA transfer. - */ - PCI_P2PDMA_MAP_NONE, - - /* - * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will - * traverse the host bridge and the host bridge is not in the - * allowlist. DMA Mapping routines should return an error when - * this is returned. - */ - PCI_P2PDMA_MAP_NOT_SUPPORTED, - - /* - * PCI_P2PDMA_MAP_BUS_ADDR: Indicates that two devices can talk to - * each other directly through a PCI switch and the transaction will - * not traverse the host bridge. Such a mapping should program - * the DMA engine with PCI bus addresses. - */ - PCI_P2PDMA_MAP_BUS_ADDR, - - /* - * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk - * to each other, but the transaction traverses a host bridge on the - * allowlist. In this case, a normal mapping either with CPU physical - * addresses (in the case of dma-direct) or IOVA addresses (in the - * case of IOMMUs) should be used to program the DMA engine. - */ - PCI_P2PDMA_MAP_THRU_HOST_BRIDGE, -}; - struct pci_p2pdma_map_state { - struct dev_pagemap *pgmap; + struct p2pdma_provider *mem; enum pci_p2pdma_map_type map; - u64 bus_off; }; + /* helper for pci_p2pdma_state(), do not use directly */ void __pci_p2pdma_update_state(struct pci_p2pdma_map_state *state, struct device *dev, struct page *page); @@ -162,8 +190,7 @@ pci_p2pdma_state(struct pci_p2pdma_map_state *state, struct device *dev, struct page *page) { if (IS_ENABLED(CONFIG_PCI_P2PDMA) && is_pci_p2pdma_page(page)) { - if (state->pgmap != page_pgmap(page)) - __pci_p2pdma_update_state(state, dev, page); + __pci_p2pdma_update_state(state, dev, page); return state->map; } return PCI_P2PDMA_MAP_NONE; @@ -172,16 +199,15 @@ pci_p2pdma_state(struct pci_p2pdma_map_state *state, struct device *dev, /** * pci_p2pdma_bus_addr_map - Translate a physical address to a bus address * for a PCI_P2PDMA_MAP_BUS_ADDR transfer. - * @state: P2P state structure + * @provider: P2P provider structure * @paddr: physical address to map * * Map a physically contiguous PCI_P2PDMA_MAP_BUS_ADDR transfer. */ static inline dma_addr_t -pci_p2pdma_bus_addr_map(struct pci_p2pdma_map_state *state, phys_addr_t paddr) +pci_p2pdma_bus_addr_map(struct p2pdma_provider *provider, phys_addr_t paddr) { - WARN_ON_ONCE(state->map != PCI_P2PDMA_MAP_BUS_ADDR); - return paddr + state->bus_off; + return paddr + provider->bus_offset; } #endif /* _LINUX_PCI_P2P_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index bf97d49c23cf..b16127c6a7b4 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -502,6 +502,8 @@ struct pci_dev { #ifdef CONFIG_PCIE_PTM u16 ptm_cap; /* PTM Capability */ unsigned int ptm_root:1; + unsigned int ptm_responder:1; + unsigned int ptm_requester:1; unsigned int ptm_enabled:1; u8 ptm_granularity; #endif @@ -648,6 +650,7 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv); struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev, size_t priv); void pci_free_host_bridge(struct pci_host_bridge *bridge); +struct device *pci_get_host_bridge_device(struct pci_dev *dev); struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); void pci_set_host_bridge_release(struct pci_host_bridge *bridge, @@ -831,6 +834,7 @@ struct pci_ops { void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where); int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); + int (*assert_perst)(struct pci_bus *bus, bool assert); }; /* @@ -1421,16 +1425,16 @@ void pcibios_reset_secondary_bus(struct pci_dev *dev); void pci_update_resource(struct pci_dev *dev, int resno); int __must_check pci_assign_resource(struct pci_dev *dev, int i); int pci_release_resource(struct pci_dev *dev, int resno); -static inline int pci_rebar_bytes_to_size(u64 bytes) -{ - bytes = roundup_pow_of_two(bytes); - /* Return BAR size as defined in the resizable BAR specification */ - return max(ilog2(bytes), 20) - 20; -} +/* Resizable BAR related routines */ +int pci_rebar_bytes_to_size(u64 bytes); +resource_size_t pci_rebar_size_to_bytes(int size); +u64 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar); +bool pci_rebar_size_supported(struct pci_dev *pdev, int bar, int size); +int pci_rebar_get_max_size(struct pci_dev *pdev, int bar); +int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size, + int exclude_bars); -u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar); -int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size); int pci_select_bars(struct pci_dev *dev, unsigned long flags); bool pci_device_is_present(struct pci_dev *pdev); void pci_ignore_hotplug(struct pci_dev *dev); @@ -1958,10 +1962,17 @@ DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T)) */ #ifdef CONFIG_PCI_DOMAINS extern int pci_domains_supported; +int pci_bus_find_emul_domain_nr(u32 hint, u32 min, u32 max); +void pci_bus_release_emul_domain_nr(int domain_nr); #else enum { pci_domains_supported = 0 }; static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } static inline int pci_proc_domain(struct pci_bus *bus) { return 0; } +static inline int pci_bus_find_emul_domain_nr(u32 hint, u32 min, u32 max) +{ + return 0; +} +static inline void pci_bus_release_emul_domain_nr(int domain_nr) { } #endif /* CONFIG_PCI_DOMAINS */ /* diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 92ffc4373f6d..a9a089566b7c 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -3075,6 +3075,7 @@ #define PCI_DEVICE_ID_INTEL_5100_22 0x65f6 #define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff #define PCI_DEVICE_ID_INTEL_HDA_FCL 0x67a8 +#define PCI_DEVICE_ID_INTEL_HDA_NVL_S 0x6e50 #define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000 #define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010 #define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020 diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index ee3148ef87f6..652f287c1ef6 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1557,6 +1557,18 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) #define arch_start_context_switch(prev) do {} while (0) #endif +/* + * Some platforms can customize the PTE soft-dirty bit making it unavailable + * even if the architecture provides the resource. + * Adding this API allows architectures to add their own checks for the + * devices on which the kernel is running. + * Note: When overriding it, please make sure the CONFIG_MEM_SOFT_DIRTY + * is part of this macro. + */ +#ifndef pgtable_supports_soft_dirty +#define pgtable_supports_soft_dirty() IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) +#endif + #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY #ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h index ab222dd05bbc..3b4a891acefe 100644 --- a/include/linux/platform_data/lp855x.h +++ b/include/linux/platform_data/lp855x.h @@ -124,12 +124,12 @@ struct lp855x_rom_data { }; /** - * struct lp855x_platform_data + * struct lp855x_platform_data - lp855 platform-specific data * @name : Backlight driver name. If it is not defined, default name is set. * @device_control : value of DEVICE CONTROL register * @initial_brightness : initial value of backlight brightness * @period_ns : platform specific pwm period value. unit is nano. - Only valid when mode is PWM_BASED. + * Only valid when mode is PWM_BASED. * @size_program : total size of lp855x_rom_data * @rom_data : list of new eeprom/eprom registers */ diff --git a/include/linux/platform_data/spi-davinci.h b/include/linux/platform_data/spi-davinci.h deleted file mode 100644 index 2cb5cc70fd9d..000000000000 --- a/include/linux/platform_data/spi-davinci.h +++ /dev/null @@ -1,73 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright 2009 Texas Instruments. - */ - -#ifndef __ARCH_ARM_DAVINCI_SPI_H -#define __ARCH_ARM_DAVINCI_SPI_H - -#include <linux/platform_data/edma.h> - -#define SPI_INTERN_CS 0xFF - -enum { - SPI_VERSION_1, /* For DM355/DM365/DM6467 */ - SPI_VERSION_2, /* For DA8xx */ -}; - -/** - * davinci_spi_platform_data - Platform data for SPI master device on DaVinci - * - * @version: version of the SPI IP. Different DaVinci devices have slightly - * varying versions of the same IP. - * @num_chipselect: number of chipselects supported by this SPI master - * @intr_line: interrupt line used to connect the SPI IP to the ARM interrupt - * controller withn the SoC. Possible values are 0 and 1. - * @cshold_bug: set this to true if the SPI controller on your chip requires - * a write to CSHOLD bit in between transfers (like in DM355). - * @dma_event_q: DMA event queue to use if SPI_IO_TYPE_DMA is used for any - * device on the bus. - */ -struct davinci_spi_platform_data { - u8 version; - u8 num_chipselect; - u8 intr_line; - u8 prescaler_limit; - bool cshold_bug; - enum dma_event_q dma_event_q; -}; - -/** - * davinci_spi_config - Per-chip-select configuration for SPI slave devices - * - * @wdelay: amount of delay between transmissions. Measured in number of - * SPI module clocks. - * @odd_parity: polarity of parity flag at the end of transmit data stream. - * 0 - odd parity, 1 - even parity. - * @parity_enable: enable transmission of parity at end of each transmit - * data stream. - * @io_type: type of IO transfer. Choose between polled, interrupt and DMA. - * @timer_disable: disable chip-select timers (setup and hold) - * @c2tdelay: chip-select setup time. Measured in number of SPI module clocks. - * @t2cdelay: chip-select hold time. Measured in number of SPI module clocks. - * @t2edelay: transmit data finished to SPI ENAn pin inactive time. Measured - * in number of SPI clocks. - * @c2edelay: chip-select active to SPI ENAn signal active time. Measured in - * number of SPI clocks. - */ -struct davinci_spi_config { - u8 wdelay; - u8 odd_parity; - u8 parity_enable; -#define SPI_IO_TYPE_INTR 0 -#define SPI_IO_TYPE_POLL 1 -#define SPI_IO_TYPE_DMA 2 - u8 io_type; - u8 timer_disable; - u8 c2tdelay; - u8 t2cdelay; - u8 t2edelay; - u8 c2edelay; -}; - -#endif /* __ARCH_ARM_DAVINCI_SPI_H */ diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 93c945331f39..813da101b5bf 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -80,7 +80,7 @@ static inline void __iomem * devm_platform_get_and_ioremap_resource(struct platform_device *pdev, unsigned int index, struct resource **res) { - return ERR_PTR(-EINVAL); + return IOMEM_ERR_PTR(-EINVAL); } @@ -88,14 +88,14 @@ static inline void __iomem * devm_platform_ioremap_resource(struct platform_device *pdev, unsigned int index) { - return ERR_PTR(-EINVAL); + return IOMEM_ERR_PTR(-EINVAL); } static inline void __iomem * devm_platform_ioremap_resource_byname(struct platform_device *pdev, const char *name) { - return ERR_PTR(-EINVAL); + return IOMEM_ERR_PTR(-EINVAL); } #endif diff --git a/include/linux/pm.h b/include/linux/pm.h index 7f69f739f613..98a899858ece 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -688,6 +688,7 @@ struct dev_pm_info { bool smart_suspend:1; /* Owned by the PM core */ bool must_resume:1; /* Owned by the PM core */ bool may_skip_resume:1; /* Set by subsystems */ + bool out_band_wakeup:1; bool strict_midlayer:1; #else bool should_wakeup:1; diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index c838b4a30f87..41e8f344a205 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -94,6 +94,16 @@ static inline void device_set_wakeup_path(struct device *dev) dev->power.wakeup_path = true; } +static inline void device_set_out_band_wakeup(struct device *dev) +{ + dev->power.out_band_wakeup = true; +} + +static inline bool device_out_band_wakeup(struct device *dev) +{ + return dev->power.out_band_wakeup; +} + /* drivers/base/power/wakeup.c */ extern struct wakeup_source *wakeup_source_register(struct device *dev, const char *name); @@ -162,6 +172,13 @@ static inline bool device_wakeup_path(struct device *dev) static inline void device_set_wakeup_path(struct device *dev) {} +static inline void device_set_out_band_wakeup(struct device *dev) {} + +static inline bool device_out_band_wakeup(struct device *dev) +{ + return false; +} + static inline void __pm_stay_awake(struct wakeup_source *ws) {} static inline void pm_stay_awake(struct device *dev) {} diff --git a/include/linux/power/max77705_charger.h b/include/linux/power/max77705_charger.h index 6653abfdf747..b3950ce0625e 100644 --- a/include/linux/power/max77705_charger.h +++ b/include/linux/power/max77705_charger.h @@ -123,6 +123,8 @@ #define MAX77705_DISABLE_SKIP 1 #define MAX77705_AUTO_SKIP 0 +#define AICL_WORK_DELAY_MS 100 + /* uA */ #define MAX77705_CURRENT_CHGIN_STEP 25000 #define MAX77705_CURRENT_CHG_STEP 50000 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index f139377f4b31..19d1c5e5f335 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -66,8 +66,6 @@ enum proc_pidonly { struct proc_fs_info { struct pid_namespace *pid_ns; - struct dentry *proc_self; /* For /proc/self */ - struct dentry *proc_thread_self; /* For /proc/thread-self */ kgid_t pid_gid; enum proc_hidepid hide_pid; enum proc_pidonly pidonly; diff --git a/include/linux/property.h b/include/linux/property.h index 50b26589dd70..272bfbdea7bf 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -355,19 +355,26 @@ struct software_node; /** * struct software_node_ref_args - Reference property with additional arguments - * @node: Reference to a software node + * @swnode: Reference to a software node + * @fwnode: Alternative reference to a firmware node handle * @nargs: Number of elements in @args array * @args: Integer arguments */ struct software_node_ref_args { - const struct software_node *node; + const struct software_node *swnode; + struct fwnode_handle *fwnode; unsigned int nargs; u64 args[NR_FWNODE_REFERENCE_ARGS]; }; #define SOFTWARE_NODE_REFERENCE(_ref_, ...) \ (const struct software_node_ref_args) { \ - .node = _ref_, \ + .swnode = _Generic(_ref_, \ + const struct software_node *: _ref_, \ + default: NULL), \ + .fwnode = _Generic(_ref_, \ + struct fwnode_handle *: _ref_, \ + default: NULL), \ .nargs = COUNT_ARGS(__VA_ARGS__), \ .args = { __VA_ARGS__ }, \ } diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index e0dbcb4b4fd9..abcdee256c65 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -14,6 +14,39 @@ #include <uapi/linux/psp-sev.h> +/* As defined by SEV API, under "Guest Policy". */ +#define SEV_POLICY_MASK_NODBG BIT(0) +#define SEV_POLICY_MASK_NOKS BIT(1) +#define SEV_POLICY_MASK_ES BIT(2) +#define SEV_POLICY_MASK_NOSEND BIT(3) +#define SEV_POLICY_MASK_DOMAIN BIT(4) +#define SEV_POLICY_MASK_SEV BIT(5) +#define SEV_POLICY_MASK_API_MAJOR GENMASK(23, 16) +#define SEV_POLICY_MASK_API_MINOR GENMASK(31, 24) + +/* As defined by SEV-SNP Firmware ABI, under "Guest Policy". */ +#define SNP_POLICY_MASK_API_MINOR GENMASK_ULL(7, 0) +#define SNP_POLICY_MASK_API_MAJOR GENMASK_ULL(15, 8) +#define SNP_POLICY_MASK_SMT BIT_ULL(16) +#define SNP_POLICY_MASK_RSVD_MBO BIT_ULL(17) +#define SNP_POLICY_MASK_MIGRATE_MA BIT_ULL(18) +#define SNP_POLICY_MASK_DEBUG BIT_ULL(19) +#define SNP_POLICY_MASK_SINGLE_SOCKET BIT_ULL(20) +#define SNP_POLICY_MASK_CXL_ALLOW BIT_ULL(21) +#define SNP_POLICY_MASK_MEM_AES_256_XTS BIT_ULL(22) +#define SNP_POLICY_MASK_RAPL_DIS BIT_ULL(23) +#define SNP_POLICY_MASK_CIPHERTEXT_HIDING_DRAM BIT_ULL(24) +#define SNP_POLICY_MASK_PAGE_SWAP_DISABLE BIT_ULL(25) + +/* Base SEV-SNP policy bitmask for minimum supported SEV firmware version */ +#define SNP_POLICY_MASK_BASE (SNP_POLICY_MASK_API_MINOR | \ + SNP_POLICY_MASK_API_MAJOR | \ + SNP_POLICY_MASK_SMT | \ + SNP_POLICY_MASK_RSVD_MBO | \ + SNP_POLICY_MASK_MIGRATE_MA | \ + SNP_POLICY_MASK_DEBUG | \ + SNP_POLICY_MASK_SINGLE_SOCKET) + #define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */ /** @@ -849,7 +882,10 @@ struct snp_feature_info { u32 edx; } __packed; +#define SNP_RAPL_DISABLE_SUPPORTED BIT(2) #define SNP_CIPHER_TEXT_HIDING_SUPPORTED BIT(3) +#define SNP_AES_256_XTS_POLICY_SUPPORTED BIT(4) +#define SNP_CXL_ALLOW_POLICY_SUPPORTED BIT(5) #ifdef CONFIG_CRYPTO_DEV_SP_PSP @@ -995,6 +1031,7 @@ void *snp_alloc_firmware_page(gfp_t mask); void snp_free_firmware_page(void *addr); void sev_platform_shutdown(void); bool sev_is_snp_ciphertext_hiding_supported(void); +u64 sev_get_snp_policy_bits(void); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 549ac4aaad59..b11ae91723f8 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -488,6 +488,12 @@ int __pwmchip_add(struct pwm_chip *chip, struct module *owner); #define pwmchip_add(chip) __pwmchip_add(chip, THIS_MODULE) void pwmchip_remove(struct pwm_chip *chip); +/* + * For FFI wrapper use only: + * The Rust PWM abstraction needs this to properly free the pwm_chip. + */ +void pwmchip_release(struct device *dev); + int __devm_pwmchip_add(struct device *dev, struct pwm_chip *chip, struct module *owner); #define devm_pwmchip_add(dev, chip) __devm_pwmchip_add(dev, chip, THIS_MODULE) @@ -611,39 +617,6 @@ devm_fwnode_pwm_get(struct device *dev, struct fwnode_handle *fwnode, } #endif -static inline void pwm_apply_args(struct pwm_device *pwm) -{ - struct pwm_state state = { }; - - /* - * PWM users calling pwm_apply_args() expect to have a fresh config - * where the polarity and period are set according to pwm_args info. - * The problem is, polarity can only be changed when the PWM is - * disabled. - * - * PWM drivers supporting hardware readout may declare the PWM device - * as enabled, and prevent polarity setting, which changes from the - * existing behavior, where all PWM devices are declared as disabled - * at startup (even if they are actually enabled), thus authorizing - * polarity setting. - * - * To fulfill this requirement, we apply a new state which disables - * the PWM device and set the reference period and polarity config. - * - * Note that PWM users requiring a smooth handover between the - * bootloader and the kernel (like critical regulators controlled by - * PWM devices) will have to switch to the atomic API and avoid calling - * pwm_apply_args(). - */ - - state.enabled = false; - state.polarity = pwm->args.polarity; - state.period = pwm->args.period; - state.usage_power = false; - - pwm_apply_might_sleep(pwm, &state); -} - struct pwm_lookup { struct list_head list; const char *provider; diff --git a/include/linux/ras.h b/include/linux/ras.h index a64182bc72ad..468941bfe855 100644 --- a/include/linux/ras.h +++ b/include/linux/ras.h @@ -24,8 +24,7 @@ int __init parse_cec_param(char *str); void log_non_standard_event(const guid_t *sec_type, const guid_t *fru_id, const char *fru_text, const u8 sev, const u8 *err, const u32 len); -void log_arm_hw_error(struct cper_sec_proc_arm *err); - +void log_arm_hw_error(struct cper_sec_proc_arm *err, const u8 sev); #else static inline void log_non_standard_event(const guid_t *sec_type, @@ -33,7 +32,7 @@ log_non_standard_event(const guid_t *sec_type, const u8 sev, const u8 *err, const u32 len) { return; } static inline void -log_arm_hw_error(struct cper_sec_proc_arm *err) { return; } +log_arm_hw_error(struct cper_sec_proc_arm *err, const u8 sev) { return; } #endif struct atl_err { @@ -53,4 +52,15 @@ static inline unsigned long amd_convert_umc_mca_addr_to_sys_addr(struct atl_err *err) { return -EINVAL; } #endif /* CONFIG_AMD_ATL */ +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) +#include <asm/smp_plat.h> +/* + * Include ARM-specific SMP header which provides a function mapping mpidr to + * CPU logical index. + */ +#define GET_LOGICAL_INDEX(mpidr) get_logical_index(mpidr & MPIDR_HWID_BITMASK) +#else +#define GET_LOGICAL_INDEX(mpidr) -EINVAL +#endif /* CONFIG_ARM || CONFIG_ARM64 */ + #endif /* __RAS_H__ */ diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 55343795644b..b0b9be750d93 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -55,18 +55,23 @@ struct sdw_slave; #define REGMAP_DOWNSHIFT(s) (s) /* - * The supported cache types, the default is no cache. Any new caches - * should usually use the maple tree cache unless they specifically - * require that there are never any allocations at runtime and can't - * provide defaults in which case they should use the flat cache. The - * rbtree cache *may* have some performance advantage for very low end - * systems that make heavy use of cache syncs but is mainly legacy. + * The supported cache types, the default is no cache. Any new caches should + * usually use the maple tree cache unless they specifically require that there + * are never any allocations at runtime in which case they should use the sparse + * flat cache. The rbtree cache *may* have some performance advantage for very + * low end systems that make heavy use of cache syncs but is mainly legacy. + * These caches are sparse and entries will be initialized from hardware if no + * default has been provided. + * The non-sparse flat cache is provided for compatibility with existing users + * and will zero-initialize cache entries for which no defaults are provided. + * New users should use the sparse flat cache. */ enum regcache_type { REGCACHE_NONE, REGCACHE_RBTREE, REGCACHE_FLAT, REGCACHE_MAPLE, + REGCACHE_FLAT_S, }; /** @@ -676,7 +681,7 @@ struct regmap *__regmap_init_sdw(struct sdw_slave *sdw, const struct regmap_config *config, struct lock_class_key *lock_key, const char *lock_name); -struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw, +struct regmap *__regmap_init_sdw_mbq(struct device *dev, struct sdw_slave *sdw, const struct regmap_config *config, const struct regmap_sdw_mbq_cfg *mbq_config, struct lock_class_key *lock_key, @@ -738,7 +743,7 @@ struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw, const struct regmap_config *config, struct lock_class_key *lock_key, const char *lock_name); -struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw, +struct regmap *__devm_regmap_init_sdw_mbq(struct device *dev, struct sdw_slave *sdw, const struct regmap_config *config, const struct regmap_sdw_mbq_cfg *mbq_config, struct lock_class_key *lock_key, @@ -970,7 +975,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); */ #define regmap_init_sdw_mbq(sdw, config) \ __regmap_lockdep_wrapper(__regmap_init_sdw_mbq, #config, \ - sdw, config, NULL) + &sdw->dev, sdw, config, NULL) /** * regmap_init_sdw_mbq_cfg() - Initialise MBQ SDW register map with config @@ -983,9 +988,9 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); * to a struct regmap. The regmap will be automatically freed by the * device management code. */ -#define regmap_init_sdw_mbq_cfg(sdw, config, mbq_config) \ +#define regmap_init_sdw_mbq_cfg(dev, sdw, config, mbq_config) \ __regmap_lockdep_wrapper(__regmap_init_sdw_mbq, #config, \ - sdw, config, mbq_config) + dev, sdw, config, mbq_config) /** * regmap_init_spi_avmm() - Initialize register map for Intel SPI Slave @@ -1198,12 +1203,13 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); */ #define devm_regmap_init_sdw_mbq(sdw, config) \ __regmap_lockdep_wrapper(__devm_regmap_init_sdw_mbq, #config, \ - sdw, config, NULL) + &sdw->dev, sdw, config, NULL) /** * devm_regmap_init_sdw_mbq_cfg() - Initialise managed MBQ SDW register map with config * - * @sdw: Device that will be interacted with + * @dev: Device that will be interacted with + * @sdw: SoundWire Device that will be interacted with * @config: Configuration for register map * @mbq_config: Properties for the MBQ registers * @@ -1211,9 +1217,9 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); * to a struct regmap. The regmap will be automatically freed by the * device management code. */ -#define devm_regmap_init_sdw_mbq_cfg(sdw, config, mbq_config) \ - __regmap_lockdep_wrapper(__devm_regmap_init_sdw_mbq, \ - #config, sdw, config, mbq_config) +#define devm_regmap_init_sdw_mbq_cfg(dev, sdw, config, mbq_config) \ + __regmap_lockdep_wrapper(__devm_regmap_init_sdw_mbq, \ + #config, dev, sdw, config, mbq_config) /** * devm_regmap_init_slimbus() - Initialise managed register map diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 4a216fdba354..978cf593b662 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -658,6 +658,9 @@ struct regulator_dev { spinlock_t err_lock; int pw_requested_mW; + + /* regulator notification forwarding */ + struct notifier_block supply_fwd_nb; }; /* diff --git a/include/linux/regulator/mt6363-regulator.h b/include/linux/regulator/mt6363-regulator.h new file mode 100644 index 000000000000..60761f01d3ad --- /dev/null +++ b/include/linux/regulator/mt6363-regulator.h @@ -0,0 +1,330 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2024 MediaTek Inc. + * Copyright (c) 2025 Collabora Ltd + */ + +#include <linux/bits.h> + +#ifndef __LINUX_REGULATOR_MT6363_H +#define __LINUX_REGULATOR_MT6363_H + +/* Register */ +#define MT6363_TOP_TRAP 0x6 +#define MT6363_TOP_TMA_KEY_L 0x36e +#define MT6363_RG_BUCK0_EN_ADDR 0x210 +#define MT6363_RG_BUCK_VS2_EN_BIT 0 +#define MT6363_RG_BUCK_VBUCK1_EN_BIT 1 +#define MT6363_RG_BUCK_VBUCK2_EN_BIT 2 +#define MT6363_RG_BUCK_VBUCK3_EN_BIT 3 +#define MT6363_RG_BUCK_VBUCK4_EN_BIT 4 +#define MT6363_RG_BUCK_VBUCK5_EN_BIT 5 +#define MT6363_RG_BUCK_VBUCK6_EN_BIT 6 +#define MT6363_RG_BUCK_VBUCK7_EN_BIT 7 +#define MT6363_RG_BUCK1_EN_ADDR 0x213 +#define MT6363_RG_BUCK_VS1_EN_BIT 0 +#define MT6363_RG_BUCK_VS3_EN_BIT 1 +#define MT6363_RG_LDO_VSRAM_DIGRF_EN_BIT 4 +#define MT6363_RG_LDO_VSRAM_MDFE_EN_BIT 5 +#define MT6363_RG_LDO_VSRAM_MODEM_EN_BIT 6 +#define MT6363_RG_BUCK0_LP_ADDR 0x216 +#define MT6363_RG_BUCK_VS2_LP_BIT 0 +#define MT6363_RG_BUCK_VBUCK1_LP_BIT 1 +#define MT6363_RG_BUCK_VBUCK2_LP_BIT 2 +#define MT6363_RG_BUCK_VBUCK3_LP_BIT 3 +#define MT6363_RG_BUCK_VBUCK4_LP_BIT 4 +#define MT6363_RG_BUCK_VBUCK5_LP_BIT 5 +#define MT6363_RG_BUCK_VBUCK6_LP_BIT 6 +#define MT6363_RG_BUCK_VBUCK7_LP_BIT 7 +#define MT6363_RG_BUCK1_LP_ADDR 0x219 +#define MT6363_RG_BUCK_VS1_LP_BIT 0 +#define MT6363_RG_BUCK_VS3_LP_BIT 1 +#define MT6363_RG_LDO_VSRAM_DIGRF_LP_BIT 4 +#define MT6363_RG_LDO_VSRAM_MDFE_LP_BIT 5 +#define MT6363_RG_LDO_VSRAM_MODEM_LP_BIT 6 +#define MT6363_RG_BUCK_VS2_VOSEL_ADDR 0x21c +#define MT6363_RG_BUCK_VS2_VOSEL_MASK GENMASK(7, 0) +#define MT6363_RG_BUCK_VBUCK1_VOSEL_ADDR 0x21d +#define MT6363_RG_BUCK_VBUCK1_VOSEL_MASK GENMASK(7, 0) +#define MT6363_RG_BUCK_VBUCK2_VOSEL_ADDR 0x21e +#define MT6363_RG_BUCK_VBUCK2_VOSEL_MASK GENMASK(7, 0) +#define MT6363_RG_BUCK_VBUCK3_VOSEL_ADDR 0x21f +#define MT6363_RG_BUCK_VBUCK3_VOSEL_MASK GENMASK(7, 0) +#define MT6363_RG_BUCK_VBUCK4_VOSEL_ADDR 0x220 +#define MT6363_RG_BUCK_VBUCK4_VOSEL_MASK GENMASK(7, 0) +#define MT6363_RG_BUCK_VBUCK5_VOSEL_ADDR 0x221 +#define MT6363_RG_BUCK_VBUCK5_VOSEL_MASK GENMASK(7, 0) +#define MT6363_RG_BUCK_VBUCK6_VOSEL_ADDR 0x222 +#define MT6363_RG_BUCK_VBUCK6_VOSEL_MASK GENMASK(7, 0) +#define MT6363_RG_BUCK_VBUCK7_VOSEL_ADDR 0x223 +#define MT6363_RG_BUCK_VBUCK7_VOSEL_MASK GENMASK(7, 0) +#define MT6363_RG_BUCK_VS1_VOSEL_ADDR 0x224 +#define MT6363_RG_BUCK_VS1_VOSEL_MASK GENMASK(7, 0) +#define MT6363_RG_BUCK_VS3_VOSEL_ADDR 0x225 +#define MT6363_RG_BUCK_VS3_VOSEL_MASK GENMASK(7, 0) +#define MT6363_RG_LDO_VSRAM_DIGRF_VOSEL_ADDR 0x228 +#define MT6363_RG_LDO_VSRAM_DIGRF_VOSEL_MASK GENMASK(6, 0) +#define MT6363_RG_LDO_VSRAM_MDFE_VOSEL_ADDR 0x229 +#define MT6363_RG_LDO_VSRAM_MDFE_VOSEL_MASK GENMASK(6, 0) +#define MT6363_RG_LDO_VSRAM_MODEM_VOSEL_ADDR 0x22a +#define MT6363_RG_LDO_VSRAM_MODEM_VOSEL_MASK GENMASK(6, 0) +#define MT6363_BUCK_TOP_KEY_PROT_LO 0x13fa +#define MT6363_BUCK_VS2_WDTDBG_VOSEL_ADDR 0x13fc +#define MT6363_BUCK_VBUCK1_WDTDBG_VOSEL_ADDR 0x13fd +#define MT6363_BUCK_VBUCK2_WDTDBG_VOSEL_ADDR 0x13fe +#define MT6363_BUCK_VBUCK3_WDTDBG_VOSEL_ADDR 0x13ff +#define MT6363_BUCK_VBUCK4_WDTDBG_VOSEL_ADDR 0x1400 +#define MT6363_BUCK_VBUCK5_WDTDBG_VOSEL_ADDR 0x1401 +#define MT6363_BUCK_VBUCK6_WDTDBG_VOSEL_ADDR 0x1402 +#define MT6363_BUCK_VBUCK7_WDTDBG_VOSEL_ADDR 0x1403 +#define MT6363_BUCK_VS1_WDTDBG_VOSEL_ADDR 0x1404 +#define MT6363_BUCK_VS3_WDTDBG_VOSEL_ADDR 0x1405 +#define MT6363_RG_BUCK_EFUSE_RSV1 0x1417 +#define MT6363_RG_BUCK_EFUSE_RSV1_MASK GENMASK(7, 4) +#define MT6363_BUCK_VS2_OP_EN_0 0x145d +#define MT6363_BUCK_VS2_HW_LP_MODE 0x1468 +#define MT6363_BUCK_VBUCK1_OP_EN_0 0x14dd +#define MT6363_BUCK_VBUCK1_HW_LP_MODE 0x14e8 +#define MT6363_RG_BUCK_VBUCK1_SSHUB_EN_ADDR 0x14ea +#define MT6363_RG_BUCK_VBUCK1_SSHUB_VOSEL_ADDR 0x14eb +#define MT6363_RG_BUCK_VBUCK1_SSHUB_VOSEL_MASK GENMASK(7, 0) +#define MT6363_BUCK_VBUCK2_OP_EN_0 0x155d +#define MT6363_BUCK_VBUCK2_HW_LP_MODE 0x1568 +#define MT6363_RG_BUCK_VBUCK2_SSHUB_EN_ADDR 0x156a +#define MT6363_RG_BUCK_VBUCK2_SSHUB_VOSEL_ADDR 0x156b +#define MT6363_RG_BUCK_VBUCK2_SSHUB_VOSEL_MASK GENMASK(7, 0) +#define MT6363_BUCK_VBUCK3_OP_EN_0 0x15dd +#define MT6363_BUCK_VBUCK3_HW_LP_MODE 0x15e8 +#define MT6363_BUCK_VBUCK4_OP_EN_0 0x165d +#define MT6363_BUCK_VBUCK4_HW_LP_MODE 0x1668 +#define MT6363_RG_BUCK_VBUCK4_SSHUB_EN_ADDR 0x166a +#define MT6363_RG_BUCK_VBUCK4_SSHUB_VOSEL_ADDR 0x166b +#define MT6363_RG_BUCK_VBUCK4_SSHUB_VOSEL_MASK GENMASK(7, 0) +#define MT6363_BUCK_VBUCK5_OP_EN_0 0x16dd +#define MT6363_BUCK_VBUCK5_HW_LP_MODE 0x16e8 +#define MT6363_BUCK_VBUCK6_OP_EN_0 0x175d +#define MT6363_BUCK_VBUCK6_HW_LP_MODE 0x1768 +#define MT6363_BUCK_VBUCK7_OP_EN_0 0x17dd +#define MT6363_BUCK_VBUCK7_HW_LP_MODE 0x17e8 +#define MT6363_BUCK_VS1_OP_EN_0 0x185d +#define MT6363_BUCK_VS1_HW_LP_MODE 0x1868 +#define MT6363_BUCK_VS3_OP_EN_0 0x18dd +#define MT6363_BUCK_VS3_HW_LP_MODE 0x18e8 +#define MT6363_RG_VS1_FCCM_ADDR 0x1964 +#define MT6363_RG_VS1_FCCM_BIT 0 +#define MT6363_RG_VS3_FCCM_ADDR 0x1973 +#define MT6363_RG_VS3_FCCM_BIT 0 +#define MT6363_RG_BUCK0_FCCM_ADDR 0x1a02 +#define MT6363_RG_VBUCK1_FCCM_BIT 0 +#define MT6363_RG_VBUCK2_FCCM_BIT 1 +#define MT6363_RG_VBUCK3_FCCM_BIT 2 +#define MT6363_RG_VS2_FCCM_BIT 3 +#define MT6363_RG_BUCK0_1_FCCM_ADDR 0x1a82 +#define MT6363_RG_VBUCK4_FCCM_BIT 0 +#define MT6363_RG_VBUCK5_FCCM_BIT 1 +#define MT6363_RG_VBUCK6_FCCM_BIT 2 +#define MT6363_RG_VBUCK7_FCCM_BIT 3 +#define MT6363_RG_VCN13_VOSEL_ADDR 0x1b0f +#define MT6363_RG_VCN13_VOSEL_MASK GENMASK(3, 0) +#define MT6363_RG_VEMC_VOSEL_ADDR 0x1b10 +#define MT6363_RG_VEMC_VOSEL_MASK GENMASK(3, 0) +#define MT6363_RG_VEMC_VOSEL_1_MASK GENMASK(7, 4) +#define MT6363_RG_LDO_VSRAM_CPUB_VOSEL_ADDR 0x1b14 +#define MT6363_RG_LDO_VSRAM_CPUB_VOSEL_MASK GENMASK(6, 0) +#define MT6363_RG_LDO_VSRAM_CPUM_VOSEL_ADDR 0x1b15 +#define MT6363_RG_LDO_VSRAM_CPUM_VOSEL_MASK GENMASK(6, 0) +#define MT6363_RG_LDO_VSRAM_CPUL_VOSEL_ADDR 0x1b16 +#define MT6363_RG_LDO_VSRAM_CPUL_VOSEL_MASK GENMASK(6, 0) +#define MT6363_RG_LDO_VSRAM_APU_VOSEL_ADDR 0x1b17 +#define MT6363_RG_LDO_VSRAM_APU_VOSEL_MASK GENMASK(6, 0) +#define MT6363_RG_VEMC_VOCAL_ADDR 0x1b1b +#define MT6363_RG_VEMC_VOCAL_MASK GENMASK(3, 0) +#define MT6363_RG_LDO_VCN15_ADDR 0x1b57 +#define MT6363_RG_LDO_VCN15_EN_BIT 0 +#define MT6363_RG_LDO_VCN15_LP_BIT 1 +#define MT6363_LDO_VCN15_HW_LP_MODE 0x1b5b +#define MT6363_LDO_VCN15_OP_EN0 0x1b5c +#define MT6363_RG_LDO_VRF09_ADDR 0x1b65 +#define MT6363_RG_LDO_VRF09_EN_BIT 0 +#define MT6363_RG_LDO_VRF09_LP_BIT 1 +#define MT6363_LDO_VRF09_HW_LP_MODE 0x1b69 +#define MT6363_LDO_VRF09_OP_EN0 0x1b6a +#define MT6363_RG_LDO_VRF12_ADDR 0x1b73 +#define MT6363_RG_LDO_VRF12_EN_BIT 0 +#define MT6363_RG_LDO_VRF12_LP_BIT 1 +#define MT6363_LDO_VRF12_HW_LP_MODE 0x1b77 +#define MT6363_LDO_VRF12_OP_EN0 0x1b78 +#define MT6363_RG_LDO_VRF13_ADDR 0x1b81 +#define MT6363_RG_LDO_VRF13_EN_BIT 0 +#define MT6363_RG_LDO_VRF13_LP_BIT 1 +#define MT6363_LDO_VRF13_HW_LP_MODE 0x1b85 +#define MT6363_LDO_VRF13_OP_EN0 0x1b86 +#define MT6363_RG_LDO_VRF18_ADDR 0x1b8f +#define MT6363_RG_LDO_VRF18_EN_BIT 0 +#define MT6363_RG_LDO_VRF18_LP_BIT 1 +#define MT6363_LDO_VRF18_HW_LP_MODE 0x1b93 +#define MT6363_LDO_VRF18_OP_EN0 0x1b94 +#define MT6363_RG_LDO_VRFIO18_ADDR 0x1b9d +#define MT6363_RG_LDO_VRFIO18_EN_BIT 0 +#define MT6363_RG_LDO_VRFIO18_LP_BIT 1 +#define MT6363_LDO_VRFIO18_HW_LP_MODE 0x1ba1 +#define MT6363_LDO_VRFIO18_OP_EN0 0x1ba2 +#define MT6363_RG_LDO_VTREF18_ADDR 0x1bd7 +#define MT6363_RG_LDO_VTREF18_EN_BIT 0 +#define MT6363_RG_LDO_VTREF18_LP_BIT 1 +#define MT6363_LDO_VTREF18_HW_LP_MODE 0x1bdb +#define MT6363_LDO_VTREF18_OP_EN0 0x1bdc +#define MT6363_RG_LDO_VAUX18_ADDR 0x1be5 +#define MT6363_RG_LDO_VAUX18_EN_BIT 0 +#define MT6363_RG_LDO_VAUX18_LP_BIT 1 +#define MT6363_LDO_VAUX18_HW_LP_MODE 0x1be9 +#define MT6363_LDO_VAUX18_OP_EN0 0x1bea +#define MT6363_RG_LDO_VEMC_ADDR 0x1bf3 +#define MT6363_RG_LDO_VEMC_EN_BIT 0 +#define MT6363_RG_LDO_VEMC_LP_BIT 1 +#define MT6363_LDO_VEMC_HW_LP_MODE 0x1bf7 +#define MT6363_LDO_VEMC_OP_EN0 0x1bf8 +#define MT6363_RG_LDO_VUFS12_ADDR 0x1c01 +#define MT6363_RG_LDO_VUFS12_EN_BIT 0 +#define MT6363_RG_LDO_VUFS12_LP_BIT 1 +#define MT6363_LDO_VUFS12_HW_LP_MODE 0x1c05 +#define MT6363_LDO_VUFS12_OP_EN0 0x1c06 +#define MT6363_RG_LDO_VUFS18_ADDR 0x1c0f +#define MT6363_RG_LDO_VUFS18_EN_BIT 0 +#define MT6363_RG_LDO_VUFS18_LP_BIT 1 +#define MT6363_LDO_VUFS18_HW_LP_MODE 0x1c13 +#define MT6363_LDO_VUFS18_OP_EN0 0x1c14 +#define MT6363_RG_LDO_VIO18_ADDR 0x1c1d +#define MT6363_RG_LDO_VIO18_EN_BIT 0 +#define MT6363_RG_LDO_VIO18_LP_BIT 1 +#define MT6363_LDO_VIO18_HW_LP_MODE 0x1c21 +#define MT6363_LDO_VIO18_OP_EN0 0x1c22 +#define MT6363_RG_LDO_VIO075_ADDR 0x1c57 +#define MT6363_RG_LDO_VIO075_EN_BIT 0 +#define MT6363_RG_LDO_VIO075_LP_BIT 1 +#define MT6363_LDO_VIO075_HW_LP_MODE 0x1c5b +#define MT6363_LDO_VIO075_OP_EN0 0x1c5c +#define MT6363_RG_LDO_VA12_1_ADDR 0x1c65 +#define MT6363_RG_LDO_VA12_1_EN_BIT 0 +#define MT6363_RG_LDO_VA12_1_LP_BIT 1 +#define MT6363_LDO_VA12_1_HW_LP_MODE 0x1c69 +#define MT6363_LDO_VA12_1_OP_EN0 0x1c6a +#define MT6363_RG_LDO_VA12_2_ADDR 0x1c73 +#define MT6363_RG_LDO_VA12_2_EN_BIT 0 +#define MT6363_RG_LDO_VA12_2_LP_BIT 1 +#define MT6363_LDO_VA12_2_HW_LP_MODE 0x1c77 +#define MT6363_LDO_VA12_2_OP_EN0 0x1c78 +#define MT6363_RG_LDO_VA15_ADDR 0x1c81 +#define MT6363_RG_LDO_VA15_EN_BIT 0 +#define MT6363_RG_LDO_VA15_LP_BIT 1 +#define MT6363_LDO_VA15_HW_LP_MODE 0x1c85 +#define MT6363_LDO_VA15_OP_EN0 0x1c86 +#define MT6363_RG_LDO_VM18_ADDR 0x1c8f +#define MT6363_RG_LDO_VM18_EN_BIT 0 +#define MT6363_RG_LDO_VM18_LP_BIT 1 +#define MT6363_LDO_VM18_HW_LP_MODE 0x1c93 +#define MT6363_LDO_VM18_OP_EN0 0x1c94 +#define MT6363_RG_LDO_VCN13_ADDR 0x1cd7 +#define MT6363_RG_LDO_VCN13_EN_BIT 0 +#define MT6363_RG_LDO_VCN13_LP_BIT 1 +#define MT6363_LDO_VCN13_HW_LP_MODE 0x1cdb +#define MT6363_LDO_VCN13_OP_EN0 0x1ce4 +#define MT6363_LDO_VSRAM_DIGRF_HW_LP_MODE 0x1cf1 +#define MT6363_LDO_VSRAM_DIGRF_OP_EN0 0x1cfa +#define MT6363_LDO_VSRAM_MDFE_HW_LP_MODE 0x1d5b +#define MT6363_LDO_VSRAM_MDFE_OP_EN0 0x1d64 +#define MT6363_LDO_VSRAM_MODEM_HW_LP_MODE 0x1d76 +#define MT6363_LDO_VSRAM_MODEM_OP_EN0 0x1d7f +#define MT6363_RG_LDO_VSRAM_CPUB_ADDR 0x1dd7 +#define MT6363_RG_LDO_VSRAM_CPUB_EN_BIT 0 +#define MT6363_RG_LDO_VSRAM_CPUB_LP_BIT 1 +#define MT6363_LDO_VSRAM_CPUB_HW_LP_MODE 0x1ddb +#define MT6363_LDO_VSRAM_CPUB_OP_EN0 0x1de4 +#define MT6363_RG_LDO_VSRAM_CPUM_ADDR 0x1ded +#define MT6363_RG_LDO_VSRAM_CPUM_EN_BIT 0 +#define MT6363_RG_LDO_VSRAM_CPUM_LP_BIT 1 +#define MT6363_LDO_VSRAM_CPUM_HW_LP_MODE 0x1df1 +#define MT6363_LDO_VSRAM_CPUM_OP_EN0 0x1dfa +#define MT6363_RG_LDO_VSRAM_CPUL_ADDR 0x1e57 +#define MT6363_RG_LDO_VSRAM_CPUL_EN_BIT 0 +#define MT6363_RG_LDO_VSRAM_CPUL_LP_BIT 1 +#define MT6363_LDO_VSRAM_CPUL_HW_LP_MODE 0x1e5b +#define MT6363_LDO_VSRAM_CPUL_OP_EN0 0x1e64 +#define MT6363_RG_LDO_VSRAM_APU_ADDR 0x1e6d +#define MT6363_RG_LDO_VSRAM_APU_EN_BIT 0 +#define MT6363_RG_LDO_VSRAM_APU_LP_BIT 1 +#define MT6363_LDO_VSRAM_APU_HW_LP_MODE 0x1e71 +#define MT6363_LDO_VSRAM_APU_OP_EN0 0x1e7a +#define MT6363_RG_VTREF18_VOCAL_ADDR 0x1ed8 +#define MT6363_RG_VTREF18_VOCAL_MASK GENMASK(3, 0) +#define MT6363_RG_VTREF18_VOSEL_ADDR 0x1ed9 +#define MT6363_RG_VTREF18_VOSEL_MASK GENMASK(3, 0) +#define MT6363_RG_VAUX18_VOCAL_ADDR 0x1edc +#define MT6363_RG_VAUX18_VOCAL_MASK GENMASK(3, 0) +#define MT6363_RG_VAUX18_VOSEL_ADDR 0x1edd +#define MT6363_RG_VAUX18_VOSEL_MASK GENMASK(3, 0) +#define MT6363_RG_VCN15_VOCAL_ADDR 0x1ee3 +#define MT6363_RG_VCN15_VOCAL_MASK GENMASK(3, 0) +#define MT6363_RG_VCN15_VOSEL_ADDR 0x1ee4 +#define MT6363_RG_VCN15_VOSEL_MASK GENMASK(3, 0) +#define MT6363_RG_VUFS18_VOCAL_ADDR 0x1ee7 +#define MT6363_RG_VUFS18_VOCAL_MASK GENMASK(3, 0) +#define MT6363_RG_VUFS18_VOSEL_ADDR 0x1ee8 +#define MT6363_RG_VUFS18_VOSEL_MASK GENMASK(3, 0) +#define MT6363_RG_VIO18_VOCAL_ADDR 0x1eeb +#define MT6363_RG_VIO18_VOCAL_MASK GENMASK(3, 0) +#define MT6363_RG_VIO18_VOSEL_ADDR 0x1eec +#define MT6363_RG_VIO18_VOSEL_MASK GENMASK(3, 0) +#define MT6363_RG_VM18_VOCAL_ADDR 0x1eef +#define MT6363_RG_VM18_VOCAL_MASK GENMASK(3, 0) +#define MT6363_RG_VM18_VOSEL_ADDR 0x1ef0 +#define MT6363_RG_VM18_VOSEL_MASK GENMASK(3, 0) +#define MT6363_RG_VA15_VOCAL_ADDR 0x1ef3 +#define MT6363_RG_VA15_VOCAL_MASK GENMASK(3, 0) +#define MT6363_RG_VA15_VOSEL_ADDR 0x1ef4 +#define MT6363_RG_VA15_VOSEL_MASK GENMASK(3, 0) +#define MT6363_RG_VRF18_VOCAL_ADDR 0x1ef7 +#define MT6363_RG_VRF18_VOCAL_MASK GENMASK(3, 0) +#define MT6363_RG_VRF18_VOSEL_ADDR 0x1ef8 +#define MT6363_RG_VRF18_VOSEL_MASK GENMASK(3, 0) +#define MT6363_RG_VRFIO18_VOCAL_ADDR 0x1efb +#define MT6363_RG_VRFIO18_VOCAL_MASK GENMASK(3, 0) +#define MT6363_RG_VRFIO18_VOSEL_ADDR 0x1efc +#define MT6363_RG_VRFIO18_VOSEL_MASK GENMASK(3, 0) +#define MT6363_RG_VIO075_VOCFG_ADDR 0x1f01 +#define MT6363_RG_VIO075_VOCAL_ADDR MT6363_RG_VIO075_VOCFG_ADDR +#define MT6363_RG_VIO075_VOCAL_MASK GENMASK(3, 0) +#define MT6363_RG_VIO075_VOSEL_ADDR MT6363_RG_VIO075_VOCFG_ADDR +#define MT6363_RG_VIO075_VOSEL_MASK GENMASK(6, 4) +#define MT6363_RG_VCN13_VOCAL_ADDR 0x1f58 +#define MT6363_RG_VCN13_VOCAL_MASK GENMASK(3, 0) +#define MT6363_RG_VUFS12_VOCAL_ADDR 0x1f61 +#define MT6363_RG_VUFS12_VOCAL_MASK GENMASK(3, 0) +#define MT6363_RG_VUFS12_VOSEL_ADDR 0x1f62 +#define MT6363_RG_VUFS12_VOSEL_MASK GENMASK(3, 0) +#define MT6363_RG_VA12_1_VOCAL_ADDR 0x1f65 +#define MT6363_RG_VA12_1_VOCAL_MASK GENMASK(3, 0) +#define MT6363_RG_VA12_1_VOSEL_ADDR 0x1f66 +#define MT6363_RG_VA12_1_VOSEL_MASK GENMASK(3, 0) +#define MT6363_RG_VA12_2_VOCAL_ADDR 0x1f69 +#define MT6363_RG_VA12_2_VOCAL_MASK GENMASK(3, 0) +#define MT6363_RG_VA12_2_VOSEL_ADDR 0x1f6a +#define MT6363_RG_VA12_2_VOSEL_MASK GENMASK(3, 0) +#define MT6363_RG_VRF12_VOCAL_ADDR 0x1f6d +#define MT6363_RG_VRF12_VOCAL_MASK GENMASK(3, 0) +#define MT6363_RG_VRF12_VOSEL_ADDR 0x1f6e +#define MT6363_RG_VRF12_VOSEL_MASK GENMASK(3, 0) +#define MT6363_RG_VRF13_VOCAL_ADDR 0x1f71 +#define MT6363_RG_VRF13_VOCAL_MASK GENMASK(3, 0) +#define MT6363_RG_VRF13_VOSEL_ADDR 0x1f72 +#define MT6363_RG_VRF13_VOSEL_MASK GENMASK(3, 0) +#define MT6363_RG_VRF09_VOCAL_ADDR 0x1f78 +#define MT6363_RG_VRF09_VOCAL_MASK GENMASK(3, 0) +#define MT6363_RG_VRF09_VOSEL_ADDR 0x1f79 +#define MT6363_RG_VRF09_VOSEL_MASK GENMASK(3, 0) +#define MT6363_ISINK_EN_CTRL0 0x21db +#define MT6363_ISINK_CTRL0_MASK GENMASK(7, 0) +#define MT6363_ISINK_EN_CTRL1 0x21dc +#define MT6363_ISINK_CTRL1_MASK GENMASK(7, 4) + +#endif /* __LINUX_REGULATOR_MT6363_H */ diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h index 85b4fecc10d8..0df8b3c48082 100644 --- a/include/linux/regulator/pca9450.h +++ b/include/linux/regulator/pca9450.h @@ -223,12 +223,44 @@ enum { #define IRQ_THERM_105 0x02 #define IRQ_THERM_125 0x01 +/* PCA9450_REG_PWRCTRL bits */ +#define T_ON_DEB_MASK 0xC0 +#define T_ON_DEB_120US (0 << 6) +#define T_ON_DEB_20MS (1 << 6) +#define T_ON_DEB_100MS (2 << 6) +#define T_ON_DEB_750MS (3 << 6) +#define T_OFF_DEB_MASK 0x20 +#define T_OFF_DEB_120US (0 << 5) +#define T_OFF_DEB_2MS (1 << 5) +#define T_ON_STEP_MASK 0x18 +#define T_ON_STEP_1MS (0 << 3) +#define T_ON_STEP_2MS (1 << 3) +#define T_ON_STEP_4MS (2 << 3) +#define T_ON_STEP_8MS (3 << 3) +#define T_OFF_STEP_MASK 0x06 +#define T_OFF_STEP_2MS (0 << 1) +#define T_OFF_STEP_4MS (1 << 1) +#define T_OFF_STEP_8MS (2 << 1) +#define T_OFF_STEP_16MS (3 << 1) +#define T_RESTART_MASK 0x01 +#define T_RESTART_250MS 0 +#define T_RESTART_500MS 1 + /* PCA9450_REG_RESET_CTRL bits */ #define WDOG_B_CFG_MASK 0xC0 #define WDOG_B_CFG_NONE 0x00 #define WDOG_B_CFG_WARM 0x40 #define WDOG_B_CFG_COLD_LDO12 0x80 #define WDOG_B_CFG_COLD 0xC0 +#define T_PMIC_RST_DEB_MASK 0x07 +#define T_PMIC_RST_DEB_10MS 0x00 +#define T_PMIC_RST_DEB_50MS 0x01 +#define T_PMIC_RST_DEB_100MS 0x02 +#define T_PMIC_RST_DEB_500MS 0x03 +#define T_PMIC_RST_DEB_1S 0x04 +#define T_PMIC_RST_DEB_2S 0x05 +#define T_PMIC_RST_DEB_4S 0x06 +#define T_PMIC_RST_DEB_8S 0x07 /* PCA9450_REG_CONFIG2 bits */ #define I2C_LT_MASK 0x03 diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h index 357df16ede32..46514cb1b9e0 100644 --- a/include/linux/reset-controller.h +++ b/include/linux/reset-controller.h @@ -27,31 +27,6 @@ struct device_node; struct of_phandle_args; /** - * struct reset_control_lookup - represents a single lookup entry - * - * @list: internal list of all reset lookup entries - * @provider: name of the reset controller device controlling this reset line - * @index: ID of the reset controller in the reset controller device - * @dev_id: name of the device associated with this reset line - * @con_id: name of the reset line (can be NULL) - */ -struct reset_control_lookup { - struct list_head list; - const char *provider; - unsigned int index; - const char *dev_id; - const char *con_id; -}; - -#define RESET_LOOKUP(_provider, _index, _dev_id, _con_id) \ - { \ - .provider = _provider, \ - .index = _index, \ - .dev_id = _dev_id, \ - .con_id = _con_id, \ - } - -/** * struct reset_controller_dev - reset controller entity that might * provide multiple reset controls * @ops: a pointer to device specific struct reset_control_ops @@ -90,9 +65,6 @@ void reset_controller_unregister(struct reset_controller_dev *rcdev); struct device; int devm_reset_controller_register(struct device *dev, struct reset_controller_dev *rcdev); - -void reset_controller_add_lookup(struct reset_control_lookup *lookup, - unsigned int num_entries); #else static inline int reset_controller_register(struct reset_controller_dev *rcdev) { @@ -108,11 +80,6 @@ static inline int devm_reset_controller_register(struct device *dev, { return 0; } - -static inline void reset_controller_add_lookup(struct reset_control_lookup *lookup, - unsigned int num_entries) -{ -} #endif #endif diff --git a/include/linux/reset.h b/include/linux/reset.h index 840d75d172f6..44f9e3415f92 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -2,6 +2,7 @@ #ifndef _LINUX_RESET_H_ #define _LINUX_RESET_H_ +#include <linux/bits.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/types.h> diff --git a/include/linux/rv.h b/include/linux/rv.h index 9520aab34bcb..92fd467547e7 100644 --- a/include/linux/rv.h +++ b/include/linux/rv.h @@ -88,7 +88,7 @@ union rv_task_monitor { struct rv_reactor { const char *name; const char *description; - __printf(1, 2) void (*react)(const char *msg, ...); + __printf(1, 0) void (*react)(const char *msg, va_list args); struct list_head list; }; #endif @@ -102,7 +102,7 @@ struct rv_monitor { void (*reset)(void); #ifdef CONFIG_RV_REACTORS struct rv_reactor *reactor; - __printf(1, 2) void (*react)(const char *msg, ...); + __printf(1, 0) void (*react)(const char *msg, va_list args); #endif struct list_head list; struct rv_monitor *parent; @@ -116,13 +116,14 @@ int rv_get_task_monitor_slot(void); void rv_put_task_monitor_slot(int slot); #ifdef CONFIG_RV_REACTORS -bool rv_reacting_on(void); int rv_unregister_reactor(struct rv_reactor *reactor); int rv_register_reactor(struct rv_reactor *reactor); +__printf(2, 3) +void rv_react(struct rv_monitor *monitor, const char *msg, ...); #else -static inline bool rv_reacting_on(void) +__printf(2, 3) +static inline void rv_react(struct rv_monitor *monitor, const char *msg, ...) { - return false; } #endif /* CONFIG_RV_REACTORS */ diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 0232d983b715..0e1d73955fa5 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -189,12 +189,11 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t); -unsigned long mm_get_unmapped_area(struct mm_struct *mm, struct file *filp, - unsigned long addr, unsigned long len, - unsigned long pgoff, unsigned long flags); +unsigned long mm_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); -unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, - struct file *filp, +unsigned long mm_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, @@ -318,6 +317,9 @@ static inline void might_alloc(gfp_t gfp_mask) fs_reclaim_acquire(gfp_mask); fs_reclaim_release(gfp_mask); + if (current->flags & PF_MEMALLOC) + return; + might_sleep_if(gfpflags_allow_blocking(gfp_mask)); } diff --git a/include/linux/security.h b/include/linux/security.h index eb36451ce41f..83a646d72f6f 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -2257,8 +2257,6 @@ static inline void securityfs_remove(struct dentry *dentry) #endif -#define securityfs_recursive_remove securityfs_remove - #ifdef CONFIG_BPF_SYSCALL union bpf_attr; struct bpf_map; diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h index 52791e070506..9f2839e73f8a 100644 --- a/include/linux/seq_buf.h +++ b/include/linux/seq_buf.h @@ -149,6 +149,23 @@ static inline void seq_buf_commit(struct seq_buf *s, int num) } } +/** + * seq_buf_pop - pop off the last written character + * @s: the seq_buf handle + * + * Removes the last written character to the seq_buf @s. + * + * Returns the last character or -1 if it is empty. + */ +static inline int seq_buf_pop(struct seq_buf *s) +{ + if (!s->len) + return -1; + + s->len--; + return (unsigned int)s->buffer[s->len]; +} + extern __printf(2, 3) int seq_buf_printf(struct seq_buf *s, const char *fmt, ...); extern __printf(2, 0) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 774efe592a9a..5e4b3c1ae5c2 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -94,7 +94,8 @@ extern struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags); extern struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name, loff_t size, unsigned long flags); -extern int shmem_zero_setup(struct vm_area_struct *); +int shmem_zero_setup(struct vm_area_struct *vma); +int shmem_zero_setup_desc(struct vm_area_desc *desc); extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); extern int shmem_lock(struct file *file, int lock, struct ucounts *ucounts); @@ -135,11 +136,16 @@ static inline bool shmem_hpage_pmd_enabled(void) #ifdef CONFIG_SHMEM extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); +extern void shmem_uncharge(struct inode *inode, long pages); #else static inline unsigned long shmem_swap_usage(struct vm_area_struct *vma) { return 0; } + +static inline void shmem_uncharge(struct inode *inode, long pages) +{ +} #endif extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, pgoff_t start, pgoff_t end); @@ -193,7 +199,6 @@ static inline pgoff_t shmem_fallocend(struct inode *inode, pgoff_t eof) } extern bool shmem_charge(struct inode *inode, long pages); -extern void shmem_uncharge(struct inode *inode, long pages); #ifdef CONFIG_USERFAULTFD #ifdef CONFIG_SHMEM diff --git a/include/linux/sizes.h b/include/linux/sizes.h index 49039494076f..f1f1a055b047 100644 --- a/include/linux/sizes.h +++ b/include/linux/sizes.h @@ -67,5 +67,6 @@ #define SZ_16T _AC(0x100000000000, ULL) #define SZ_32T _AC(0x200000000000, ULL) #define SZ_64T _AC(0x400000000000, ULL) +#define SZ_128T _AC(0x800000000000, ULL) #endif /* __LINUX_SIZES_H__ */ diff --git a/include/linux/smp.h b/include/linux/smp.h index 18e9c918325e..91d0ecf3b8d3 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -168,6 +168,7 @@ int smp_call_function_any(const struct cpumask *mask, void kick_all_cpus_sync(void); void wake_up_all_idle_cpus(void); +bool cpus_peek_for_pending_ipi(const struct cpumask *mask); /* * Generic and arch helpers @@ -216,6 +217,10 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, static inline void kick_all_cpus_sync(void) { } static inline void wake_up_all_idle_cpus(void) { } +static inline bool cpus_peek_for_pending_ipi(const struct cpumask *mask) +{ + return false; +} #define setup_max_cpus 0 diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h index 7a69210a250c..0287f9182c4d 100644 --- a/include/linux/soc/qcom/llcc-qcom.h +++ b/include/linux/soc/qcom/llcc-qcom.h @@ -74,7 +74,14 @@ #define LLCC_CAMSRTIP 73 #define LLCC_CAMRTRF 74 #define LLCC_CAMSRTRF 75 +#define LLCC_VIDEO_APV 83 +#define LLCC_COMPUTE1 87 +#define LLCC_CPUSS_OPP 88 #define LLCC_CPUSSMPAM 89 +#define LLCC_CAM_IPE_STROV 92 +#define LLCC_CAM_OFE_STROV 93 +#define LLCC_CPUSS_HEU 94 +#define LLCC_MDM_PNG_FIXED 100 /** * struct llcc_slice_desc - Cache slice descriptor diff --git a/include/linux/soc/qcom/socinfo.h b/include/linux/soc/qcom/socinfo.h index 608950443eee..ba823a0013c5 100644 --- a/include/linux/soc/qcom/socinfo.h +++ b/include/linux/soc/qcom/socinfo.h @@ -82,6 +82,10 @@ struct socinfo { __le32 num_func_clusters; __le32 boot_cluster; __le32 boot_core; + /* Version 20 */ + __le32 raw_package_type; + /* Version 21, 22, 23 */ + __le32 reserve1[4]; }; /* Internal feature codes */ diff --git a/include/linux/soc/qcom/ubwc.h b/include/linux/soc/qcom/ubwc.h index 1ed8b1b16bc9..0a4edfe3d96d 100644 --- a/include/linux/soc/qcom/ubwc.h +++ b/include/linux/soc/qcom/ubwc.h @@ -52,6 +52,7 @@ struct qcom_ubwc_cfg_data { #define UBWC_4_0 0x40000000 #define UBWC_4_3 0x40030000 #define UBWC_5_0 0x50000000 +#define UBWC_6_0 0x60000000 #if IS_ENABLED(CONFIG_QCOM_UBWC_CONFIG) const struct qcom_ubwc_cfg_data *qcom_ubwc_config_get_data(void); diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h index 71e0c09a49eb..532c6c2d1195 100644 --- a/include/linux/soc/samsung/exynos-regs-pmu.h +++ b/include/linux/soc/samsung/exynos-regs-pmu.h @@ -672,14 +672,341 @@ /* For Tensor GS101 */ /* PMU ALIVE */ -#define GS101_SYSIP_DAT0 (0x810) -#define GS101_CPU0_INFORM (0x860) -#define GS101_CPU_INFORM(cpu) \ - (GS101_CPU0_INFORM + (cpu*4)) -#define GS101_SYSTEM_CONFIGURATION (0x3A00) -#define GS101_EINT_WAKEUP_MASK (0x3A80) -#define GS101_PHY_CTRL_USB20 (0x3EB0) -#define GS101_PHY_CTRL_USBDP (0x3EB4) +#define GS101_OM_STAT 0x0000 +#define GS101_VERSION 0x0004 +#define GS101_PORESET_CHECK 0x0008 +#define GS101_OTP_STATUS 0x000c +#define GS101_SYSTEM_INFO 0x0010 +#define GS101_IDLE_IP(n) (0x03e0 + ((n) & 3) * 4) +#define GS101_IDLE_IP_MASK(n) (0x03f0 + ((n) & 3) * 4) +#define GS101_SLC_CH_OFFSET(ch) (0x0400 + ((ch) & 3) * 0x10) +#define GS101_DATARAM_STATE_SLC_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x00) +#define GS101_TAGRAM_STATE_SLC_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x04) +#define GS101_LRURAM_STATE_SLC_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x08) +#define GS101_PPMPURAM_STATE_SLC_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x0c) +#define GS101_DATARAM_INFORM_SCL_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x40) +#define GS101_TAGRAM_INFORM_SCL_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x44) +#define GS101_LRURAM_INFORM_SCL_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x48) +#define GS101_PPMPURAM_INFORM_SCL_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x4c) +#define GS101_INFORM0 0x0800 +#define GS101_INFORM1 0x0804 +#define GS101_INFORM2 0x0808 +#define GS101_INFORM3 0x080c +#define GS101_SYSIP_DAT(n) (0x0810 + ((n) & 3) * 4) +#define GS101_PWR_HOLD_HW_TRIP 0x0820 +#define GS101_PWR_HOLD_SW_TRIP 0x0824 +#define GS101_GSA_INFORM(n) (0x0830 + ((n) & 1) * 4) +#define GS101_INFORM4 0x0840 +#define GS101_INFORM5 0x0844 +#define GS101_INFORM6 0x0848 +#define GS101_INFORM7 0x084c +#define GS101_INFORM8 0x0850 +#define GS101_INFORM9 0x0854 +#define GS101_INFORM10 0x0858 +#define GS101_INFORM11 0x085c +#define GS101_CPU_INFORM(cpu) (0x0860 + ((cpu) & 7) * 4) +#define GS101_IROM_INFORM 0x0880 +#define GS101_IROM_CPU_INFORM(cpu) (0x0890 + ((cpu) & 7) * 4) +#define GS101_PMU_SPARE(n) (0x0900 + ((n) & 3) * 4) +#define GS101_IROM_DATA_REG(n) (0x0980 + ((n) & 3) * 4) +#define GS101_IROM_PWRMODE 0x0990 +#define GS101_DREX_CALIBRATION(n) (0x09a0 + ((n) & 7) * 4) + +#define GS101_CLUSTER0_OFFSET 0x1000 +#define GS101_CLUSTER1_OFFSET 0x1300 +#define GS101_CLUSTER2_OFFSET 0x1500 +#define GS101_CLUSTER_CPU_OFFSET(cl, cpu) ((cl) + ((cpu) * 0x80)) +#define GS101_CLUSTER_CPU_CONFIGURATION(cl, cpu) \ + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x00) +#define GS101_CLUSTER_CPU_STATUS(cl, cpu) \ + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x04) +#define GS101_CLUSTER_CPU_STATES(cl, cpu) \ + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x08) +#define GS101_CLUSTER_CPU_OPTION(cl, cpu) \ + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x0c) +#define GS101_CLUSTER_CPU_OUT(cl, cpu) \ + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x20) +#define GS101_CLUSTER_CPU_IN(cl, cpu) \ + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x24) +#define GS101_CLUSTER_CPU_INT_IN(cl, cpu) \ + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x40) +#define GS101_CLUSTER_CPU_INT_EN(cl, cpu) \ + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x44) +#define GS101_CLUSTER_CPU_INT_TYPE(cl, cpu) \ + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x48) +#define GS101_CLUSTER_CPU_INT_DIR(cl, cpu) \ + (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x4c) + +#define GS101_CLUSTER_NONCPU_OFFSET(cl) (0x1200 + ((cl) * 0x200)) +#define GS101_CLUSTER_NONCPU_CONFIGURATION(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x00) +#define GS101_CLUSTER_NONCPU_STATUS(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x04) +#define GS101_CLUSTER_NONCPU_STATES(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x08) +#define GS101_CLUSTER_NONCPU_OPTION(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x0c) +#define GS101_CLUSTER_NONCPU_OUT(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x20) +#define GS101_CLUSTER_NONCPU_IN(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x24) +#define GS101_CLUSTER_NONCPU_INT_IN(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x40) +#define GS101_CLUSTER_NONCPU_INT_EN(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x44) +#define GS101_CLUSTER_NONCPU_INT_TYPE(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x48) +#define GS101_CLUSTER_NONCPU_INT_DIR(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x4c) +#define GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_OUT(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x60) +#define GS101_CLUSTER_NONCPU_DUALRAIL_POS_OUT(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x64) +#define GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_IN(cl) \ + (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x6c) +#define GS101_CLUSTER0_NONCPU_DSU_PCH \ + (GS101_CLUSTER_NONCPU_OFFSET(0) + 0x80) + +#define GS101_SUBBBLK_OFFSET_ALIVE 0x1800 +#define GS101_SUBBBLK_OFFSET_AOC 0x1880 +#define GS101_SUBBBLK_OFFSET_APM 0x1900 +#define GS101_SUBBBLK_OFFSET_CMU 0x1980 +#define GS101_SUBBBLK_OFFSET_BUS0 0x1a00 +#define GS101_SUBBBLK_OFFSET_BUS1 0x1a80 +#define GS101_SUBBBLK_OFFSET_BUS2 0x1b00 +#define GS101_SUBBBLK_OFFSET_CORE 0x1b80 +#define GS101_SUBBBLK_OFFSET_EH 0x1c00 +#define GS101_SUBBBLK_OFFSET_CPUCL0 0x1c80 +#define GS101_SUBBBLK_OFFSET_CPUCL1 0x1d00 +#define GS101_SUBBBLK_OFFSET_CPUCL2 0x1d80 +#define GS101_SUBBBLK_OFFSET_G3D 0x1e00 +#define GS101_SUBBBLK_OFFSET_EMBEDDED_CPUCL0 0x1e80 +#define GS101_SUBBBLK_OFFSET_EMBEDDED_G3D 0x2000 +#define GS101_SUBBBLK_OFFSET_HSI0 0x2080 +#define GS101_SUBBBLK_OFFSET_HSI1 0x2100 +#define GS101_SUBBBLK_OFFSET_HSI2 0x2180 +#define GS101_SUBBBLK_OFFSET_DPU 0x2200 +#define GS101_SUBBBLK_OFFSET_DISP 0x2280 +#define GS101_SUBBBLK_OFFSET_G2D 0x2300 +#define GS101_SUBBBLK_OFFSET_MFC 0x2380 +#define GS101_SUBBBLK_OFFSET_CSIS 0x2400 +#define GS101_SUBBBLK_OFFSET_PDP 0x2480 +#define GS101_SUBBBLK_OFFSET_DNS 0x2500 +#define GS101_SUBBBLK_OFFSET_G3AA 0x2580 +#define GS101_SUBBBLK_OFFSET_IPP 0x2600 +#define GS101_SUBBBLK_OFFSET_ITP 0x2680 +#define GS101_SUBBBLK_OFFSET_MCSC 0x2700 +#define GS101_SUBBBLK_OFFSET_GDC 0x2780 +#define GS101_SUBBBLK_OFFSET_TNR 0x2800 +#define GS101_SUBBBLK_OFFSET_BO 0x2880 +#define GS101_SUBBBLK_OFFSET_TPU 0x2900 +#define GS101_SUBBBLK_OFFSET_MIF0 0x2980 +#define GS101_SUBBBLK_OFFSET_MIF1 0x2a00 +#define GS101_SUBBBLK_OFFSET_MIF2 0x2a80 +#define GS101_SUBBBLK_OFFSET_MIF3 0x2b00 +#define GS101_SUBBBLK_OFFSET_MISC 0x2b80 +#define GS101_SUBBBLK_OFFSET_PERIC0 0x2c00 +#define GS101_SUBBBLK_OFFSET_PERIC1 0x2c80 +#define GS101_SUBBBLK_OFFSET_S2D 0x2d00 +#define GS101_SUBBLK_CONFIGURATION(blk) ((blk) + 0x00) +#define GS101_SUBBLK_STATUS(blk) ((blk) + 0x04) +#define GS101_SUBBLK_STATES(blk) ((blk) + 0x08) +#define GS101_SUBBLK_OPTION(blk) ((blk) + 0x0c) +#define GS101_SUBBLK_CTRL(blk) ((blk) + 0x10) +#define GS101_SUBBLK_OUT(blk) ((blk) + 0x20) +#define GS101_SUBBLK_IN(blk) ((blk) + 0x24) +#define GS101_SUBBLK_INT_IN(blk) ((blk) + 0x40) +#define GS101_SUBBLK_INT_EN(blk) ((blk) + 0x44) +#define GS101_SUBBLK_INT_TYPE(blk) ((blk) + 0x48) +#define GS101_SUBBLK_INT_DIR(blk) ((blk) + 0x4c) +#define GS101_SUBBLK_MEMORY_OUT(blk) ((blk) + 0x60) +#define GS101_SUBBLK_MEMORY_IN(blk) ((blk) + 0x64) + +#define GS101_SUBBBLK_CPU_OFFSET_APM 0x3000 +#define GS101_SUBBBLK_CPU_OFFSET_DBGCORE 0x3080 +#define GS101_SUBBBLK_CPU_OFFSET_SSS 0x3100 +#define GS101_SUBBLK_CPU_CONFIGURATION(blk) ((blk) + 0x00) +#define GS101_SUBBLK_CPU_STATUS(blk) ((blk) + 0x04) +#define GS101_SUBBLK_CPU_STATES(blk) ((blk) + 0x08) +#define GS101_SUBBLK_CPU_OPTION(blk) ((blk) + 0x0c) +#define GS101_SUBBLK_CPU_OUT(blk) ((blk) + 0x20) +#define GS101_SUBBLK_CPU_IN(blk) ((blk) + 0x24) +#define GS101_SUBBLK_CPU_INT_IN(blk) ((blk) + 0x40) +#define GS101_SUBBLK_CPU_INT_EN(blk) ((blk) + 0x44) +#define GS101_SUBBLK_CPU_INT_TYPE(blk) ((blk) + 0x48) +#define GS101_SUBBLK_CPU_INT_DIR(blk) ((blk) + 0x4c) + +#define GS101_MIF_CONFIGURATION 0x3800 +#define GS101_MIF_STATUS 0x3804 +#define GS101_MIF_STATES 0x3808 +#define GS101_MIF_OPTION 0x380c +#define GS101_MIF_CTRL 0x3810 +#define GS101_MIF_OUT 0x3820 +#define GS101_MIF_IN 0x3824 +#define GS101_MIF_INT_IN 0x3840 +#define GS101_MIF_INT_EN 0x3844 +#define GS101_MIF_INT_TYPE 0x3848 +#define GS101_MIF_INT_DIR 0x384c +#define GS101_TOP_CONFIGURATION 0x3900 +#define GS101_TOP_STATUS 0x3904 +#define GS101_TOP_STATES 0x3908 +#define GS101_TOP_OPTION 0x390c +#define GS101_TOP_OUT 0x3920 +#define GS101_TOP_IN 0x3924 +#define GS101_TOP_INT_IN 0x3940 +#define GS101_TOP_INT_EN 0x3944 +#define GS101_TOP_INT_TYPE 0x3948 +#define GS101_TOP_INT_DIR 0x394c +#define GS101_WAKEUP_STAT 0x3950 +#define GS101_WAKEUP2_STAT 0x3954 +#define GS101_WAKEUP2_INT_IN 0x3960 +#define GS101_WAKEUP2_INT_EN 0x3964 +#define GS101_WAKEUP2_INT_TYPE 0x3968 +#define GS101_WAKEUP2_INT_DIR 0x396c +#define GS101_SYSTEM_CONFIGURATION 0x3a00 +#define GS101_SYSTEM_STATUS 0x3a04 +#define GS101_SYSTEM_STATES 0x3a08 +#define GS101_SYSTEM_OPTION 0x3a0c +#define GS101_SYSTEM_CTRL 0x3a10 +#define GS101_SPARE_CTRL 0x3a14 +#define GS101_USER_DEFINED_OUT 0x3a18 +#define GS101_SYSTEM_OUT 0x3a20 +#define GS101_SYSTEM_IN 0x3a24 +#define GS101_SYSTEM_INT_IN 0x3a40 +#define GS101_SYSTEM_INT_EN 0x3a44 +#define GS101_SYSTEM_INT_TYPE 0x3a48 +#define GS101_SYSTEM_INT_DIR 0x3a4c +#define GS101_EINT_INT_IN 0x3a50 +#define GS101_EINT_INT_EN 0x3a54 +#define GS101_EINT_INT_TYPE 0x3a58 +#define GS101_EINT_INT_DIR 0x3a5c +#define GS101_EINT2_INT_IN 0x3a60 +#define GS101_EINT2_INT_EN 0x3a64 +#define GS101_EINT2_INT_TYPE 0x3a68 +#define GS101_EINT2_INT_DIR 0x3a6c +#define GS101_EINT3_INT_IN 0x3a70 +#define GS101_EINT3_INT_EN 0x3a74 +#define GS101_EINT3_INT_TYPE 0x3a78 +#define GS101_EINT3_INT_DIR 0x3a7c +#define GS101_EINT_WAKEUP_MASK 0x3a80 +#define GS101_EINT_WAKEUP_MASK2 0x3a84 +#define GS101_EINT_WAKEUP_MASK3 0x3a88 +#define GS101_USER_DEFINED_INT_IN 0x3a90 +#define GS101_USER_DEFINED_INT_EN 0x3a94 +#define GS101_USER_DEFINED_INT_TYPE 0x3a98 +#define GS101_USER_DEFINED_INT_DIR 0x3a9c +#define GS101_SCAN2DRAM_INT_IN 0x3aa0 +#define GS101_SCAN2DRAM_INT_EN 0x3aa4 +#define GS101_SCAN2DRAM_INT_TYPE 0x3aa8 +#define GS101_SCAN2DRAM_INT_DIR 0x3aac +#define GS101_HCU_START 0x3ab0 +#define GS101_CUSTOM_OUT 0x3ac0 +#define GS101_CUSTOM_IN 0x3ac4 +#define GS101_CUSTOM_INT_IN 0x3ad0 +#define GS101_CUSTOM_INT_EN 0x3ad4 +#define GS101_CUSTOM_INT_TYPE 0x3ad8 +#define GS101_CUSTOM_INT_DIR 0x3adc +#define GS101_ACK_LAST_CPU 0x3afc +#define GS101_HCU_R(n) (0x3b00 + ((n) & 3) * 4) +#define GS101_HCU_SP 0x3b14 +#define GS101_HCU_PC 0x3b18 +#define GS101_PMU_RAM_CTRL 0x3b20 +#define GS101_APM_HCU_CTRL 0x3b24 +#define GS101_APM_NMI_ENABLE 0x3b30 +#define GS101_DBGCORE_NMI_ENABLE 0x3b34 +#define GS101_HCU_NMI_ENABLE 0x3b38 +#define GS101_PWR_HOLD_WDT_ENABLE 0x3b3c +#define GS101_NMI_SRC_IN 0x3b40 +#define GS101_RST_STAT 0x3b44 +#define GS101_RST_STAT_PMU 0x3b48 +#define GS101_HPM_INT_IN 0x3b60 +#define GS101_HPM_INT_EN 0x3b64 +#define GS101_HPM_INT_TYPE 0x3b68 +#define GS101_HPM_INT_DIR 0x3b6c +#define GS101_S2D_AUTH 0x3b70 +#define GS101_BOOT_STAT 0x3b74 +#define GS101_PMLINK_OUT 0x3c00 +#define GS101_PMLINK_AOC_OUT 0x3c04 +#define GS101_PMLINK_AOC_CTRL 0x3c08 +#define GS101_TCXO_BUF_CTRL 0x3c10 +#define GS101_ADD_CTRL 0x3c14 +#define GS101_HCU_TIMEOUT_RESET 0x3c20 +#define GS101_HCU_TIMEOUT_SCAN2DRAM 0x3c24 +#define GS101_TIMER(n) (0x3c80 + ((n) & 3) * 4) +#define GS101_PPC_MIF(n) (0x3c90 + ((n) & 3) * 4) +#define GS101_PPC_CORE 0x3ca0 +#define GS101_PPC_EH 0x3ca4 +#define GS101_PPC_CPUCL1_0 0x3ca8 +#define GS101_PPC_CPUCL1_1 0x3cac +#define GS101_EXT_REGULATOR_MIF_DURATION 0x3cb0 +#define GS101_EXT_REGULATOR_TOP_DURATION 0x3cb4 +#define GS101_EXT_REGULATOR_CPUCL2_DURATION 0x3cb8 +#define GS101_EXT_REGULATOR_CPUCL1_DURATION 0x3cbc +#define GS101_EXT_REGULATOR_G3D_DURATION 0x3cc0 +#define GS101_EXT_REGULATOR_TPU_DURATION 0x3cc4 +#define GS101_TCXO_DURATION 0x3cc8 +#define GS101_BURNIN_CTRL 0x3cd0 +#define GS101_JTAG_DBG_DET 0x3cd4 +#define GS101_MMC_CONWKUP_CTRL 0x3cd8 +#define GS101_USBDPPHY0_USBDP_WAKEUP 0x3cdc +#define GS101_TMU_TOP_TRIP 0x3ce0 +#define GS101_TMU_SUB_TRIP 0x3ce4 +#define GS101_MEMORY_CEN 0x3d00 +#define GS101_MEMORY_PGEN 0x3d04 +#define GS101_MEMORY_RET 0x3d08 +#define GS101_MEMORY_PGEN_FEEDBACK 0x3d0c +#define GS101_MEMORY_SMX 0x3d10 +#define GS101_MEMORY_SMX_FEEDBACK 0x3d14 +#define GS101_SLC_PCH_CHANNEL 0x3d20 +#define GS101_SLC_PCH_CB 0x3d24 +#define GS101_FORCE_NOMC 0x3d3c +#define GS101_FORCE_BOOST 0x3d4c +#define GS101_PMLINK_SLC_REQ 0x3d50 +#define GS101_PMLINK_SLC_ACK 0x3d54 +#define GS101_PMLINK_SLC_BUSY 0x3d58 +#define GS101_BOOTSYNC_OUT 0x3d80 +#define GS101_BOOTSYNC_IN 0x3d84 +#define GS101_SCAN_READY_OUT 0x3d88 +#define GS101_SCAN_READY_IN 0x3d8c +#define GS101_GSA_RESTORE 0x3d90 +#define GS101_ALIVE_OTP_LATCH 0x3d94 +#define GS101_DEBUG_OVERRIDE 0x3d98 +#define GS101_WDT_OPTION 0x3d9c +#define GS101_AOC_WDT_CFG 0x3da0 +#define GS101_CTRL_SECJTAG_ALIVE 0x3da4 +#define GS101_CTRL_DIV_PLL_ALV_DIVLOW 0x3e00 +#define GS101_CTRL_MUX_CLK_APM_REFSRC_AUTORESTORE 0x3e04 +#define GS101_CTRL_MUX_CLK_APM_REFSRC 0x3e08 +#define GS101_CTRL_MUX_CLK_APM_REF 0x3e0c +#define GS101_CTRL_MUX_PLL_ALV_DIV4 0x3e10 +#define GS101_CTRL_PLL_ALV_DIV4 0x3e14 +#define GS101_CTRL_OSCCLK_APMGSA 0x3e18 +#define GS101_CTRL_BLK_AOC_CLKS 0x3e1c +#define GS101_CTRL_PLL_ALV_LOCK 0x3e20 +#define GS101_CTRL_CLKDIV__CLKRTC 0x3e24 +#define GS101_CTRL_SOC32K 0x3e30 +#define GS101_CTRL_STM_PMU 0x3e34 +#define GS101_CTRL_PMU_DEBUG 0x3e38 +#define GS101_CTRL_DEBUG_UART 0x3e3c +#define GS101_CTRL_TCK 0x3e40 +#define GS101_CTRL_SBU_SW_EN 0x3e44 +#define GS101_PAD_CTRL_CLKOUT0 0x3e80 +#define GS101_PAD_CTRL_CLKOUT1 0x3e84 +#define GS101_PAD_CTRL_APM_24MOUT_0 0x3e88 +#define GS101_PAD_CTRL_APM_24MOUT_1 0x3e8c +#define GS101_PAD_CTRL_IO_FORCE_RETENTION 0x3e90 +#define GS101_PAD_CTRL_APACTIVE_n 0x3e94 +#define GS101_PAD_CTRL_TCXO_ON 0x3e98 +#define GS101_PAD_CTRL_PWR_HOLD 0x3e9c +#define GS101_PAD_CTRL_RESETO_n 0x3ea0 +#define GS101_PAD_CTRL_WRESETO_n 0x3ea4 +#define GS101_PHY_CTRL_USB20 0x3eb0 +#define GS101_PHY_CTRL_USBDP 0x3eb4 +#define GS101_PHY_CTRL_MIPI_DCPHY_M4M4 0x3eb8 +#define GS101_PHY_CTRL_MIPI_DCPHY_S4S4S4S4 0x3ebc +#define GS101_PHY_CTRL_PCIE_GEN4_0 0x3ec0 +#define GS101_PHY_CTRL_PCIE_GEN4_1 0x3ec4 +#define GS101_PHY_CTRL_UFS 0x3ec8 /* PMU INTR GEN */ #define GS101_GRP1_INTR_BID_UPEND (0x0108) diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h index 0a5939285583..cae8a0a5a9b0 100644 --- a/include/linux/soundwire/sdw_registers.h +++ b/include/linux/soundwire/sdw_registers.h @@ -355,4 +355,6 @@ /* Check the reserved and fixed bits in address */ #define SDW_SDCA_VALID_CTL(reg) (((reg) & (GENMASK(31, 25) | BIT(18) | BIT(13))) == BIT(30)) +#define SDW_SDCA_MAX_REGISTER 0x47FFFFFF + #endif /* __SDW_REGISTERS_H */ diff --git a/include/linux/spi/offload/types.h b/include/linux/spi/offload/types.h index 6f7892347871..cd61f8adb7a5 100644 --- a/include/linux/spi/offload/types.h +++ b/include/linux/spi/offload/types.h @@ -57,8 +57,17 @@ enum spi_offload_trigger_type { SPI_OFFLOAD_TRIGGER_PERIODIC, }; +/** + * spi_offload_trigger_periodic - configuration parameters for periodic triggers + * @frequency_hz: The rate that the trigger should fire in Hz. + * @offset_ns: A delay in nanoseconds between when this trigger fires + * compared to another trigger. This requires specialized hardware + * that supports such synchronization with a delay between two or + * more triggers. Set to 0 when not needed. + */ struct spi_offload_trigger_periodic { u64 frequency_hz; + u64 offset_ns; }; struct spi_offload_trigger_config { diff --git a/include/linux/string.h b/include/linux/string.h index fdd3442c6bcb..1b564c36d721 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -371,6 +371,10 @@ static inline void memzero_explicit(void *s, size_t count) * kbasename - return the last part of a pathname. * * @path: path to extract the filename from. + * + * Returns: + * Pointer to the filename portion inside @path. If no '/' exists, + * returns @path unchanged. */ static inline const char *kbasename(const char *path) { @@ -556,10 +560,32 @@ static __always_inline size_t str_has_prefix(const char *str, const char *prefix * strstarts - does @str start with @prefix? * @str: string to examine * @prefix: prefix to look for. + * + * Returns: + * True if @str begins with @prefix. False in all other cases. */ static inline bool strstarts(const char *str, const char *prefix) { return strncmp(str, prefix, strlen(prefix)) == 0; } +/** + * strends - Check if a string ends with another string. + * @str: NULL-terminated string to check against @suffix + * @suffix: NULL-terminated string defining the suffix to look for in @str + * + * Returns: + * True if @str ends with @suffix. False in all other cases. + */ +static inline bool __attribute__((nonnull(1, 2))) +strends(const char *str, const char *suffix) +{ + unsigned int str_len = strlen(str), suffix_len = strlen(suffix); + + if (str_len < suffix_len) + return false; + + return !(strcmp(str + str_len - suffix_len, suffix)); +} + #endif /* _LINUX_STRING_H_ */ diff --git a/include/linux/swap.h b/include/linux/swap.h index e818fbade1e2..38ca3df68716 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -301,16 +301,7 @@ struct swap_info_struct { struct work_struct discard_work; /* discard worker */ struct work_struct reclaim_work; /* reclaim worker */ struct list_head discard_clusters; /* discard clusters list */ - struct plist_node avail_lists[]; /* - * entries in swap_avail_heads, one - * entry per node. - * Must be last as the number of the - * array is nr_node_ids, which is not - * a fixed value so have to allocate - * dynamically. - * And it has to be an array so that - * plist_for_each_* can work. - */ + struct plist_node avail_list; /* entry in swap_avail_head */ }; static inline swp_entry_t page_swap_entry(struct page *page) @@ -462,7 +453,7 @@ static inline long get_nr_swap_pages(void) } extern void si_swapinfo(struct sysinfo *); -int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask); +int folio_alloc_swap(struct folio *folio); bool folio_free_swap(struct folio *folio); void put_swap_folio(struct folio *folio, swp_entry_t entry); extern swp_entry_t get_swap_page_of_type(int); @@ -560,7 +551,7 @@ static inline int swp_swapcount(swp_entry_t entry) return 0; } -static inline int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask) +static inline int folio_alloc_swap(struct folio *folio) { return -EINVAL; } diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 64ea151a7ae3..8cfc966eae48 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -28,7 +28,7 @@ #define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1) /* - * Definitions only for PFN swap entries (see is_pfn_swap_entry()). To + * Definitions only for PFN swap entries (see leafeant_has_pfn()). To * store PFN, we only need SWP_PFN_BITS bits. Each of the pfn swap entries * can use the extra bits to store other information besides PFN. */ @@ -66,8 +66,6 @@ #define SWP_MIG_YOUNG BIT(SWP_MIG_YOUNG_BIT) #define SWP_MIG_DIRTY BIT(SWP_MIG_DIRTY_BIT) -static inline bool is_pfn_swap_entry(swp_entry_t entry); - /* Clear all flags but only keep swp_entry_t related information */ static inline pte_t pte_swp_clear_flags(pte_t pte) { @@ -110,36 +108,6 @@ static inline pgoff_t swp_offset(swp_entry_t entry) } /* - * This should only be called upon a pfn swap entry to get the PFN stored - * in the swap entry. Please refers to is_pfn_swap_entry() for definition - * of pfn swap entry. - */ -static inline unsigned long swp_offset_pfn(swp_entry_t entry) -{ - VM_BUG_ON(!is_pfn_swap_entry(entry)); - return swp_offset(entry) & SWP_PFN_MASK; -} - -/* check whether a pte points to a swap entry */ -static inline int is_swap_pte(pte_t pte) -{ - return !pte_none(pte) && !pte_present(pte); -} - -/* - * Convert the arch-dependent pte representation of a swp_entry_t into an - * arch-independent swp_entry_t. - */ -static inline swp_entry_t pte_to_swp_entry(pte_t pte) -{ - swp_entry_t arch_entry; - - pte = pte_swp_clear_flags(pte); - arch_entry = __pte_to_swp_entry(pte); - return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); -} - -/* * Convert the arch-independent representation of a swp_entry_t into the * arch-dependent pte representation. */ @@ -175,27 +143,11 @@ static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) return swp_entry(SWP_DEVICE_WRITE, offset); } -static inline bool is_device_private_entry(swp_entry_t entry) -{ - int type = swp_type(entry); - return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE; -} - -static inline bool is_writable_device_private_entry(swp_entry_t entry) -{ - return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); -} - static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset) { return swp_entry(SWP_DEVICE_EXCLUSIVE, offset); } -static inline bool is_device_exclusive_entry(swp_entry_t entry) -{ - return swp_type(entry) == SWP_DEVICE_EXCLUSIVE; -} - #else /* CONFIG_DEVICE_PRIVATE */ static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) { @@ -207,50 +159,14 @@ static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) return swp_entry(0, 0); } -static inline bool is_device_private_entry(swp_entry_t entry) -{ - return false; -} - -static inline bool is_writable_device_private_entry(swp_entry_t entry) -{ - return false; -} - static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset) { return swp_entry(0, 0); } -static inline bool is_device_exclusive_entry(swp_entry_t entry) -{ - return false; -} - #endif /* CONFIG_DEVICE_PRIVATE */ #ifdef CONFIG_MIGRATION -static inline int is_migration_entry(swp_entry_t entry) -{ - return unlikely(swp_type(entry) == SWP_MIGRATION_READ || - swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE || - swp_type(entry) == SWP_MIGRATION_WRITE); -} - -static inline int is_writable_migration_entry(swp_entry_t entry) -{ - return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); -} - -static inline int is_readable_migration_entry(swp_entry_t entry) -{ - return unlikely(swp_type(entry) == SWP_MIGRATION_READ); -} - -static inline int is_readable_exclusive_migration_entry(swp_entry_t entry) -{ - return unlikely(swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE); -} static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) { @@ -289,14 +205,6 @@ static inline swp_entry_t make_migration_entry_young(swp_entry_t entry) return entry; } -static inline bool is_migration_entry_young(swp_entry_t entry) -{ - if (migration_entry_supports_ad()) - return swp_offset(entry) & SWP_MIG_YOUNG; - /* Keep the old behavior of aging page after migration */ - return false; -} - static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry) { if (migration_entry_supports_ad()) @@ -305,14 +213,6 @@ static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry) return entry; } -static inline bool is_migration_entry_dirty(swp_entry_t entry) -{ - if (migration_entry_supports_ad()) - return swp_offset(entry) & SWP_MIG_DIRTY; - /* Keep the old behavior of clean page after migration */ - return false; -} - extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address); extern void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *pte); @@ -332,43 +232,21 @@ static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) return swp_entry(0, 0); } -static inline int is_migration_entry(swp_entry_t swp) -{ - return 0; -} - static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { } static inline void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *pte) { } -static inline int is_writable_migration_entry(swp_entry_t entry) -{ - return 0; -} -static inline int is_readable_migration_entry(swp_entry_t entry) -{ - return 0; -} static inline swp_entry_t make_migration_entry_young(swp_entry_t entry) { return entry; } -static inline bool is_migration_entry_young(swp_entry_t entry) -{ - return false; -} - static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry) { return entry; } -static inline bool is_migration_entry_dirty(swp_entry_t entry) -{ - return false; -} #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_MEMORY_FAILURE @@ -426,21 +304,6 @@ static inline swp_entry_t make_pte_marker_entry(pte_marker marker) return swp_entry(SWP_PTE_MARKER, marker); } -static inline bool is_pte_marker_entry(swp_entry_t entry) -{ - return swp_type(entry) == SWP_PTE_MARKER; -} - -static inline pte_marker pte_marker_get(swp_entry_t entry) -{ - return swp_offset(entry) & PTE_MARKER_MASK; -} - -static inline bool is_pte_marker(pte_t pte) -{ - return is_swap_pte(pte) && is_pte_marker_entry(pte_to_swp_entry(pte)); -} - static inline pte_t make_pte_marker(pte_marker marker) { return swp_entry_to_pte(make_pte_marker_entry(marker)); @@ -451,83 +314,11 @@ static inline swp_entry_t make_poisoned_swp_entry(void) return make_pte_marker_entry(PTE_MARKER_POISONED); } -static inline int is_poisoned_swp_entry(swp_entry_t entry) -{ - return is_pte_marker_entry(entry) && - (pte_marker_get(entry) & PTE_MARKER_POISONED); - -} - static inline swp_entry_t make_guard_swp_entry(void) { return make_pte_marker_entry(PTE_MARKER_GUARD); } -static inline int is_guard_swp_entry(swp_entry_t entry) -{ - return is_pte_marker_entry(entry) && - (pte_marker_get(entry) & PTE_MARKER_GUARD); -} - -/* - * This is a special version to check pte_none() just to cover the case when - * the pte is a pte marker. It existed because in many cases the pte marker - * should be seen as a none pte; it's just that we have stored some information - * onto the none pte so it becomes not-none any more. - * - * It should be used when the pte is file-backed, ram-based and backing - * userspace pages, like shmem. It is not needed upon pgtables that do not - * support pte markers at all. For example, it's not needed on anonymous - * memory, kernel-only memory (including when the system is during-boot), - * non-ram based generic file-system. It's fine to be used even there, but the - * extra pte marker check will be pure overhead. - */ -static inline int pte_none_mostly(pte_t pte) -{ - return pte_none(pte) || is_pte_marker(pte); -} - -static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry) -{ - struct page *p = pfn_to_page(swp_offset_pfn(entry)); - - /* - * Any use of migration entries may only occur while the - * corresponding page is locked - */ - BUG_ON(is_migration_entry(entry) && !PageLocked(p)); - - return p; -} - -static inline struct folio *pfn_swap_entry_folio(swp_entry_t entry) -{ - struct folio *folio = pfn_folio(swp_offset_pfn(entry)); - - /* - * Any use of migration entries may only occur while the - * corresponding folio is locked - */ - BUG_ON(is_migration_entry(entry) && !folio_test_locked(folio)); - - return folio; -} - -/* - * A pfn swap entry is a special type of swap entry that always has a pfn stored - * in the swap offset. They can either be used to represent unaddressable device - * memory, to restrict access to a page undergoing migration or to represent a - * pfn which has been hwpoisoned and unmapped. - */ -static inline bool is_pfn_swap_entry(swp_entry_t entry) -{ - /* Make sure the swp offset can always store the needed fields */ - BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS); - - return is_migration_entry(entry) || is_device_private_entry(entry) || - is_device_exclusive_entry(entry) || is_hwpoison_entry(entry); -} - struct page_vma_mapped_walk; #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION @@ -539,18 +330,6 @@ extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd); -static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) -{ - swp_entry_t arch_entry; - - if (pmd_swp_soft_dirty(pmd)) - pmd = pmd_swp_clear_soft_dirty(pmd); - if (pmd_swp_uffd_wp(pmd)) - pmd = pmd_swp_clear_uffd_wp(pmd); - arch_entry = __pmd_to_swp_entry(pmd); - return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); -} - static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) { swp_entry_t arch_entry; @@ -559,10 +338,6 @@ static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) return __swp_entry_to_pmd(arch_entry); } -static inline int is_pmd_migration_entry(pmd_t pmd) -{ - return is_swap_pmd(pmd) && is_migration_entry(pmd_to_swp_entry(pmd)); -} #else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, struct page *page) @@ -578,26 +353,12 @@ static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { } -static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) -{ - return swp_entry(0, 0); -} - static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) { return __pmd(0); } -static inline int is_pmd_migration_entry(pmd_t pmd) -{ - return 0; -} #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ -static inline int non_swap_entry(swp_entry_t entry) -{ - return swp_type(entry) >= MAX_SWAPFILES; -} - #endif /* CONFIG_MMU */ #endif /* _LINUX_SWAPOPS_H */ diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h index ae4d48e4c970..ac6d71be5c38 100644 --- a/include/linux/syscore_ops.h +++ b/include/linux/syscore_ops.h @@ -11,14 +11,19 @@ #include <linux/list.h> struct syscore_ops { + int (*suspend)(void *data); + void (*resume)(void *data); + void (*shutdown)(void *data); +}; + +struct syscore { struct list_head node; - int (*suspend)(void); - void (*resume)(void); - void (*shutdown)(void); + const struct syscore_ops *ops; + void *data; }; -extern void register_syscore_ops(struct syscore_ops *ops); -extern void unregister_syscore_ops(struct syscore_ops *ops); +extern void register_syscore(struct syscore *syscore); +extern void unregister_syscore(struct syscore *syscore); #ifdef CONFIG_PM_SLEEP extern int syscore_suspend(void); extern void syscore_resume(void); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 92e9146b1104..288fe0055cd5 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -59,6 +59,121 @@ extern const int sysctl_vals[]; #define SYSCTL_LONG_ONE ((void *)&sysctl_long_vals[1]) #define SYSCTL_LONG_MAX ((void *)&sysctl_long_vals[2]) +#define SYSCTL_CONV_IDENTITY(val) (val) +/** + * + * "dir" originates from read_iter (dir = 0) or write_iter (dir = 1) + * in the file_operations struct at proc/proc_sysctl.c. Its value means + * one of two things for sysctl: + * 1. SYSCTL_USER_TO_KERN(dir) Writing to an internal kernel variable from user + * space (dir > 0) + * 2. SYSCTL_KERN_TO_USER(dir) Writing to a user space buffer from a kernel + * variable (dir == 0). + */ +#define SYSCTL_USER_TO_KERN(dir) (!!(dir)) +#define SYSCTL_KERN_TO_USER(dir) (!dir) + +#define SYSCTL_USER_TO_KERN_INT_CONV(name, u_ptr_op) \ +int sysctl_user_to_kern_int_conv##name(const bool *negp, \ + const unsigned long *u_ptr,\ + int *k_ptr) \ +{ \ + unsigned long u = u_ptr_op(*u_ptr); \ + if (*negp) { \ + if (u > (unsigned long) INT_MAX + 1) \ + return -EINVAL; \ + WRITE_ONCE(*k_ptr, -u); \ + } else { \ + if (u > (unsigned long) INT_MAX) \ + return -EINVAL; \ + WRITE_ONCE(*k_ptr, u); \ + } \ + return 0; \ +} + +#define SYSCTL_KERN_TO_USER_INT_CONV(name, k_ptr_op) \ +int sysctl_kern_to_user_int_conv##name(bool *negp, \ + unsigned long *u_ptr, \ + const int *k_ptr) \ +{ \ + int val = READ_ONCE(*k_ptr); \ + if (val < 0) { \ + *negp = true; \ + *u_ptr = -k_ptr_op((unsigned long)val); \ + } else { \ + *negp = false; \ + *u_ptr = k_ptr_op((unsigned long)val); \ + } \ + return 0; \ +} + +/** + * To range check on a converted value, use a temp k_ptr + * When checking range, value should be within (tbl->extra1, tbl->extra2) + */ +#define SYSCTL_INT_CONV_CUSTOM(name, user_to_kern, kern_to_user, \ + k_ptr_range_check) \ +int do_proc_int_conv##name(bool *negp, unsigned long *u_ptr, int *k_ptr,\ + int dir, const struct ctl_table *tbl) \ +{ \ + if (SYSCTL_KERN_TO_USER(dir)) \ + return kern_to_user(negp, u_ptr, k_ptr); \ + \ + if (k_ptr_range_check) { \ + int tmp_k, ret; \ + if (!tbl) \ + return -EINVAL; \ + ret = user_to_kern(negp, u_ptr, &tmp_k); \ + if (ret) \ + return ret; \ + if ((tbl->extra1 && *(int *)tbl->extra1 > tmp_k) || \ + (tbl->extra2 && *(int *)tbl->extra2 < tmp_k)) \ + return -EINVAL; \ + WRITE_ONCE(*k_ptr, tmp_k); \ + } else \ + return user_to_kern(negp, u_ptr, k_ptr); \ + return 0; \ +} + +#define SYSCTL_USER_TO_KERN_UINT_CONV(name, u_ptr_op) \ +int sysctl_user_to_kern_uint_conv##name(const unsigned long *u_ptr,\ + unsigned int *k_ptr) \ +{ \ + unsigned long u = u_ptr_op(*u_ptr); \ + if (u > UINT_MAX) \ + return -EINVAL; \ + WRITE_ONCE(*k_ptr, u); \ + return 0; \ +} + +#define SYSCTL_UINT_CONV_CUSTOM(name, user_to_kern, kern_to_user, \ + k_ptr_range_check) \ +int do_proc_uint_conv##name(unsigned long *u_ptr, unsigned int *k_ptr, \ + int dir, const struct ctl_table *tbl) \ +{ \ + if (SYSCTL_KERN_TO_USER(dir)) \ + return kern_to_user(u_ptr, k_ptr); \ + \ + if (k_ptr_range_check) { \ + unsigned int tmp_k; \ + int ret; \ + if (!tbl) \ + return -EINVAL; \ + ret = user_to_kern(u_ptr, &tmp_k); \ + if (ret) \ + return ret; \ + if ((tbl->extra1 && \ + *(unsigned int *)tbl->extra1 > tmp_k) || \ + (tbl->extra2 && \ + *(unsigned int *)tbl->extra2 < tmp_k)) \ + return -ERANGE; \ + WRITE_ONCE(*k_ptr, tmp_k); \ + } else \ + return user_to_kern(u_ptr, k_ptr); \ + return 0; \ +} + + extern const unsigned long sysctl_long_vals[]; typedef int proc_handler(const struct ctl_table *ctl, int write, void *buffer, @@ -68,25 +183,30 @@ int proc_dostring(const struct ctl_table *, int, void *, size_t *, loff_t *); int proc_dobool(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); int proc_dointvec(const struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_dointvec_minmax(const struct ctl_table *table, int dir, void *buffer, + size_t *lenp, loff_t *ppos); +int proc_dointvec_conv(const struct ctl_table *table, int dir, void *buffer, + size_t *lenp, loff_t *ppos, + int (*conv)(bool *negp, unsigned long *u_ptr, int *k_ptr, + int dir, const struct ctl_table *table)); int proc_douintvec(const struct ctl_table *, int, void *, size_t *, loff_t *); -int proc_dointvec_minmax(const struct ctl_table *, int, void *, size_t *, loff_t *); int proc_douintvec_minmax(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); +int proc_douintvec_conv(const struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos, + int (*conv)(unsigned long *lvalp, unsigned int *valp, + int write, const struct ctl_table *table)); + int proc_dou8vec_minmax(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); -int proc_dointvec_jiffies(const struct ctl_table *, int, void *, size_t *, loff_t *); -int proc_dointvec_ms_jiffies_minmax(const struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos); -int proc_dointvec_userhz_jiffies(const struct ctl_table *, int, void *, size_t *, - loff_t *); -int proc_dointvec_ms_jiffies(const struct ctl_table *, int, void *, size_t *, - loff_t *); int proc_doulongvec_minmax(const struct ctl_table *, int, void *, size_t *, loff_t *); -int proc_doulongvec_ms_jiffies_minmax(const struct ctl_table *table, int, void *, - size_t *, loff_t *); +int proc_doulongvec_minmax_conv(const struct ctl_table *table, int dir, + void *buffer, size_t *lenp, loff_t *ppos, + unsigned long convmul, unsigned long convdiv); int proc_do_large_bitmap(const struct ctl_table *, int, void *, size_t *, loff_t *); int proc_do_static_key(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); +int sysctl_kern_to_user_uint_conv(unsigned long *u_ptr, const unsigned int *k_ptr); /* * Register a set of sysctl names by calling register_sysctl @@ -156,6 +276,10 @@ struct ctl_node { * @nreg: When nreg drops to 0 the ctl_table_header will be unregistered. * @rcu: Delays the freeing of the inode. Introduced with "unfuck proc_sysctl ->d_compare()" * + * @type: Enumeration to differentiate between ctl target types + * @type.SYSCTL_TABLE_TYPE_DEFAULT: ctl target with no special considerations + * @type.SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY: Identifies a permanently empty dir + * target to serve as a mount point */ struct ctl_table_header { union { @@ -175,13 +299,6 @@ struct ctl_table_header { struct ctl_dir *parent; struct ctl_node *node; struct hlist_head inodes; /* head for proc_inode->sysctl_inodes */ - /** - * enum type - Enumeration to differentiate between ctl target types - * @SYSCTL_TABLE_TYPE_DEFAULT: ctl target with no special considerations - * @SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY: Used to identify a permanently - * empty directory target to serve - * as mount point. - */ enum { SYSCTL_TABLE_TYPE_DEFAULT, SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY, @@ -235,12 +352,6 @@ extern struct ctl_table_header *register_sysctl_mount_point(const char *path); void do_sysctl_args(void); bool sysctl_is_alias(char *param); -int do_proc_douintvec(const struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos, - int (*conv)(unsigned long *lvalp, - unsigned int *valp, - int write, void *data), - void *data); extern int unaligned_enabled; extern int no_unaligned_warning; diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 9a25a2911652..c33a96b7391a 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -58,6 +58,12 @@ do { \ #define sysfs_attr_init(attr) do {} while (0) #endif +#ifdef CONFIG_CFI +#define __SYSFS_FUNCTION_ALTERNATIVE(MEMBERS...) struct { MEMBERS } +#else +#define __SYSFS_FUNCTION_ALTERNATIVE(MEMBERS...) union { MEMBERS } +#endif + /** * struct attribute_group - data structure used to declare an attribute group. * @name: Optional: Attribute group name @@ -98,14 +104,21 @@ do { \ */ struct attribute_group { const char *name; - umode_t (*is_visible)(struct kobject *, - struct attribute *, int); + __SYSFS_FUNCTION_ALTERNATIVE( + umode_t (*is_visible)(struct kobject *, + struct attribute *, int); + umode_t (*is_visible_const)(struct kobject *, + const struct attribute *, int); + ); umode_t (*is_bin_visible)(struct kobject *, const struct bin_attribute *, int); size_t (*bin_size)(struct kobject *, const struct bin_attribute *, int); - struct attribute **attrs; + union { + struct attribute **attrs; + const struct attribute *const *attrs_const; + }; const struct bin_attribute *const *bin_attrs; }; @@ -238,28 +251,20 @@ struct attribute_group { .store = _store, \ } -#define __ATTR_RO(_name) { \ - .attr = { .name = __stringify(_name), .mode = 0444 }, \ - .show = _name##_show, \ -} - #define __ATTR_RO_MODE(_name, _mode) { \ .attr = { .name = __stringify(_name), \ .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ .show = _name##_show, \ } -#define __ATTR_RW_MODE(_name, _mode) { \ - .attr = { .name = __stringify(_name), \ - .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ - .show = _name##_show, \ - .store = _name##_store, \ -} +#define __ATTR_RO(_name) \ + __ATTR_RO_MODE(_name, 0444) -#define __ATTR_WO(_name) { \ - .attr = { .name = __stringify(_name), .mode = 0200 }, \ - .store = _name##_store, \ -} +#define __ATTR_RW_MODE(_name, _mode) \ + __ATTR(_name, _mode, _name##_show, _name##_store) + +#define __ATTR_WO(_name) \ + __ATTR(_name, 0200, NULL, _name##_store) #define __ATTR_RW(_name) __ATTR(_name, 0644, _name##_show, _name##_store) @@ -284,7 +289,12 @@ static const struct attribute_group *_name##_groups[] = { \ #define ATTRIBUTE_GROUPS(_name) \ static const struct attribute_group _name##_group = { \ - .attrs = _name##_attrs, \ + .attrs = _Generic(_name##_attrs, \ + struct attribute **: \ + _name##_attrs, \ + const struct attribute *const *: \ + (void *)_name##_attrs \ + ), \ }; \ __ATTRIBUTE_GROUPS(_name) diff --git a/include/linux/tpm.h b/include/linux/tpm.h index b15360ff78d7..202da079d500 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -26,7 +26,9 @@ #include <crypto/aes.h> #define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */ -#define TPM_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE + +#define TPM2_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE +#define TPM2_MAX_PCR_BANKS 8 struct tpm_chip; struct trusted_key_payload; @@ -68,7 +70,7 @@ enum tpm2_curves { struct tpm_digest { u16 alg_id; - u8 digest[TPM_MAX_DIGEST_SIZE]; + u8 digest[TPM2_MAX_DIGEST_SIZE]; } __packed; struct tpm_bank_info { @@ -189,7 +191,7 @@ struct tpm_chip { unsigned int groups_cnt; u32 nr_allocated_banks; - struct tpm_bank_info *allocated_banks; + struct tpm_bank_info allocated_banks[TPM2_MAX_PCR_BANKS]; #ifdef CONFIG_ACPI acpi_handle acpi_dev_handle; char ppi_version[TPM_PPI_VERSION_LEN + 1]; @@ -454,8 +456,10 @@ static inline ssize_t tpm_ret_to_err(ssize_t ret) return 0; case TPM2_RC_SESSION_MEMORY: return -ENOMEM; + case TPM2_RC_HASH: + return -EINVAL; default: - return -EFAULT; + return -EPERM; } } @@ -525,41 +529,18 @@ static inline struct tpm2_auth *tpm2_chip_auth(struct tpm_chip *chip) #endif } -void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, - u32 handle, u8 *name); +int tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, + u32 handle, u8 *name); void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf, u8 attributes, u8 *passphrase, int passphraselen); void tpm_buf_append_auth(struct tpm_chip *chip, struct tpm_buf *buf, - u8 attributes, u8 *passphrase, int passphraselen); -static inline void tpm_buf_append_hmac_session_opt(struct tpm_chip *chip, - struct tpm_buf *buf, - u8 attributes, - u8 *passphrase, - int passphraselen) -{ - struct tpm_header *head; - int offset; - - if (tpm2_chip_auth(chip)) { - tpm_buf_append_hmac_session(chip, buf, attributes, passphrase, passphraselen); - } else { - offset = buf->handles * 4 + TPM_HEADER_SIZE; - head = (struct tpm_header *)buf->data; - - /* - * If the only sessions are optional, the command tag must change to - * TPM2_ST_NO_SESSIONS. - */ - if (tpm_buf_length(buf) == offset) - head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS); - } -} + u8 *passphrase, int passphraselen); #ifdef CONFIG_TCG_TPM2_HMAC int tpm2_start_auth_session(struct tpm_chip *chip); -void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf); +int tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf); int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf, int rc); void tpm2_end_auth_session(struct tpm_chip *chip); @@ -573,10 +554,13 @@ static inline int tpm2_start_auth_session(struct tpm_chip *chip) static inline void tpm2_end_auth_session(struct tpm_chip *chip) { } -static inline void tpm_buf_fill_hmac_session(struct tpm_chip *chip, - struct tpm_buf *buf) + +static inline int tpm_buf_fill_hmac_session(struct tpm_chip *chip, + struct tpm_buf *buf) { + return 0; } + static inline int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf, int rc) diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index 557780fe1c77..4a0b8c172d27 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -80,6 +80,19 @@ static inline bool trace_seq_has_overflowed(struct trace_seq *s) return s->full || seq_buf_has_overflowed(&s->seq); } +/** + * trace_seq_pop - pop off the last written character + * @s: trace sequence descriptor + * + * Removes the last written character to the trace_seq @s. + * + * Returns the last character or -1 if it is empty. + */ +static inline int trace_seq_pop(struct trace_seq *s) +{ + return seq_buf_pop(&s->seq); +} + /* * Currently only defined when tracing is enabled. */ diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 826ce3f8e1f8..8a56f3278b1b 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -222,6 +222,15 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) } /* + * When a tracepoint is used, it's name is added to the __tracepoint_check + * section. This section is only used at build time to make sure all + * defined tracepoints are used. It is discarded after the build. + */ +# define TRACEPOINT_CHECK(name) \ + static const char __used __section("__tracepoint_check") \ + __trace_check_##name[] = #name; + +/* * Make sure the alignment of the structure in the __tracepoints section will * not add unwanted padding between the beginning of the section and the * structure. Force alignment to the same alignment as the section start. @@ -270,6 +279,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \ static inline void __do_trace_##name(proto) \ { \ + TRACEPOINT_CHECK(name) \ if (cond) { \ guard(preempt_notrace)(); \ __DO_TRACE_CALL(name, TP_ARGS(args)); \ @@ -289,6 +299,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \ static inline void __do_trace_##name(proto) \ { \ + TRACEPOINT_CHECK(name) \ guard(rcu_tasks_trace)(); \ __DO_TRACE_CALL(name, TP_ARGS(args)); \ } \ @@ -371,10 +382,12 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) __DEFINE_TRACE_EXT(_name, NULL, PARAMS(_proto), PARAMS(_args)); #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \ + TRACEPOINT_CHECK(name) \ EXPORT_SYMBOL_GPL(__tracepoint_##name); \ EXPORT_SYMBOL_GPL(__traceiter_##name); \ EXPORT_STATIC_CALL_GPL(tp_func_##name) #define EXPORT_TRACEPOINT_SYMBOL(name) \ + TRACEPOINT_CHECK(name) \ EXPORT_SYMBOL(__tracepoint_##name); \ EXPORT_SYMBOL(__traceiter_##name); \ EXPORT_STATIC_CALL(tp_func_##name) diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index e17ebeee24e3..c6451191d2de 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -66,6 +66,7 @@ struct ci_hdrc_platform_data { #define CI_HDRC_HAS_PORTSC_PEC_MISSED BIT(17) #define CI_HDRC_FORCE_VBUS_ACTIVE_ALWAYS BIT(18) #define CI_HDRC_HAS_SHORT_PKT_LIMIT BIT(19) +#define CI_HDRC_OUT_BAND_WAKEUP BIT(20) enum usb_dr_mode dr_mode; #define CI_HDRC_CONTROLLER_RESET_EVENT 0 #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index c0e716aec26a..fd5f42765497 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -16,7 +16,7 @@ #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/swap.h> -#include <linux/swapops.h> +#include <linux/leafops.h> #include <asm-generic/pgtable_uffd.h> #include <linux/hugetlb_inline.h> @@ -228,15 +228,14 @@ static inline bool vma_can_userfault(struct vm_area_struct *vma, if (wp_async && (vm_flags == VM_UFFD_WP)) return true; -#ifndef CONFIG_PTE_MARKER_UFFD_WP /* * If user requested uffd-wp but not enabled pte markers for * uffd-wp, then shmem & hugetlbfs are not supported but only * anonymous. */ - if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma)) + if (!uffd_supports_wp_marker() && (vm_flags & VM_UFFD_WP) && + !vma_is_anonymous(vma)) return false; -#endif /* By default, allow any of anon|shmem|hugetlb */ return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || @@ -291,6 +290,43 @@ void userfaultfd_release_new(struct userfaultfd_ctx *ctx); void userfaultfd_release_all(struct mm_struct *mm, struct userfaultfd_ctx *ctx); +static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma) +{ + /* Only wr-protect mode uses pte markers */ + if (!userfaultfd_wp(vma)) + return false; + + /* File-based uffd-wp always need markers */ + if (!vma_is_anonymous(vma)) + return true; + + /* + * Anonymous uffd-wp only needs the markers if WP_UNPOPULATED + * enabled (to apply markers on zero pages). + */ + return userfaultfd_wp_unpopulated(vma); +} + +/* + * Returns true if this is a swap pte and was uffd-wp wr-protected in either + * forms (pte marker or a normal swap pte), false otherwise. + */ +static inline bool pte_swp_uffd_wp_any(pte_t pte) +{ + if (!uffd_supports_wp_marker()) + return false; + + if (pte_present(pte)) + return false; + + if (pte_swp_uffd_wp(pte)) + return true; + + if (pte_is_uffd_wp_marker(pte)) + return true; + + return false; +} #else /* CONFIG_USERFAULTFD */ /* mm helpers */ @@ -415,49 +451,9 @@ static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma) return false; } -#endif /* CONFIG_USERFAULTFD */ - static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma) { - /* Only wr-protect mode uses pte markers */ - if (!userfaultfd_wp(vma)) - return false; - - /* File-based uffd-wp always need markers */ - if (!vma_is_anonymous(vma)) - return true; - - /* - * Anonymous uffd-wp only needs the markers if WP_UNPOPULATED - * enabled (to apply markers on zero pages). - */ - return userfaultfd_wp_unpopulated(vma); -} - -static inline bool pte_marker_entry_uffd_wp(swp_entry_t entry) -{ -#ifdef CONFIG_PTE_MARKER_UFFD_WP - return is_pte_marker_entry(entry) && - (pte_marker_get(entry) & PTE_MARKER_UFFD_WP); -#else - return false; -#endif -} - -static inline bool pte_marker_uffd_wp(pte_t pte) -{ -#ifdef CONFIG_PTE_MARKER_UFFD_WP - swp_entry_t entry; - - if (!is_swap_pte(pte)) - return false; - - entry = pte_to_swp_entry(pte); - - return pte_marker_entry_uffd_wp(entry); -#else return false; -#endif } /* @@ -466,17 +462,7 @@ static inline bool pte_marker_uffd_wp(pte_t pte) */ static inline bool pte_swp_uffd_wp_any(pte_t pte) { -#ifdef CONFIG_PTE_MARKER_UFFD_WP - if (!is_swap_pte(pte)) - return false; - - if (pte_swp_uffd_wp(pte)) - return true; - - if (pte_marker_uffd_wp(pte)) - return true; -#endif return false; } - +#endif /* CONFIG_USERFAULTFD */ #endif /* _LINUX_USERFAULTFD_K_H */ diff --git a/include/linux/vfio.h b/include/linux/vfio.h index eb563f538dee..e90859956514 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -21,6 +21,7 @@ struct kvm; struct iommufd_ctx; struct iommufd_device; struct iommufd_access; +struct vfio_info_cap; /* * VFIO devices can be placed in a set, this allows all devices to share this @@ -132,6 +133,9 @@ struct vfio_device_ops { size_t count, loff_t *size); long (*ioctl)(struct vfio_device *vdev, unsigned int cmd, unsigned long arg); + int (*get_region_info_caps)(struct vfio_device *vdev, + struct vfio_region_info *info, + struct vfio_info_cap *caps); int (*mmap)(struct vfio_device *vdev, struct vm_area_struct *vma); void (*request)(struct vfio_device *vdev, unsigned int count); int (*match)(struct vfio_device *vdev, char *buf); @@ -297,6 +301,8 @@ static inline void vfio_put_device(struct vfio_device *device) int vfio_register_group_dev(struct vfio_device *device); int vfio_register_emulated_iommu_dev(struct vfio_device *device); void vfio_unregister_group_dev(struct vfio_device *device); +bool vfio_device_try_get_registration(struct vfio_device *device); +void vfio_device_put_registration(struct vfio_device *device); int vfio_assign_device_set(struct vfio_device *device, void *set_id); unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set); diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h index f541044e42a2..706877f998ff 100644 --- a/include/linux/vfio_pci_core.h +++ b/include/linux/vfio_pci_core.h @@ -12,6 +12,7 @@ #include <linux/pci.h> #include <linux/vfio.h> #include <linux/irqbypass.h> +#include <linux/rcupdate.h> #include <linux/types.h> #include <linux/uuid.h> #include <linux/notifier.h> @@ -26,6 +27,14 @@ struct vfio_pci_core_device; struct vfio_pci_region; +struct p2pdma_provider; +struct dma_buf_phys_vec; +struct dma_buf_attachment; + +struct vfio_pci_eventfd { + struct eventfd_ctx *ctx; + struct rcu_head rcu; +}; struct vfio_pci_regops { ssize_t (*rw)(struct vfio_pci_core_device *vdev, char __user *buf, @@ -49,9 +58,48 @@ struct vfio_pci_region { u32 flags; }; +struct vfio_pci_device_ops { + int (*get_dmabuf_phys)(struct vfio_pci_core_device *vdev, + struct p2pdma_provider **provider, + unsigned int region_index, + struct dma_buf_phys_vec *phys_vec, + struct vfio_region_dma_range *dma_ranges, + size_t nr_ranges); +}; + +#if IS_ENABLED(CONFIG_VFIO_PCI_DMABUF) +int vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec, + struct vfio_region_dma_range *dma_ranges, + size_t nr_ranges, phys_addr_t start, + phys_addr_t len); +int vfio_pci_core_get_dmabuf_phys(struct vfio_pci_core_device *vdev, + struct p2pdma_provider **provider, + unsigned int region_index, + struct dma_buf_phys_vec *phys_vec, + struct vfio_region_dma_range *dma_ranges, + size_t nr_ranges); +#else +static inline int +vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec, + struct vfio_region_dma_range *dma_ranges, + size_t nr_ranges, phys_addr_t start, + phys_addr_t len) +{ + return -EINVAL; +} +static inline int vfio_pci_core_get_dmabuf_phys( + struct vfio_pci_core_device *vdev, struct p2pdma_provider **provider, + unsigned int region_index, struct dma_buf_phys_vec *phys_vec, + struct vfio_region_dma_range *dma_ranges, size_t nr_ranges) +{ + return -EOPNOTSUPP; +} +#endif + struct vfio_pci_core_device { struct vfio_device vdev; struct pci_dev *pdev; + const struct vfio_pci_device_ops *pci_ops; void __iomem *barmap[PCI_STD_NUM_BARS]; bool bar_mmap_supported[PCI_STD_NUM_BARS]; u8 *pci_config_map; @@ -83,8 +131,8 @@ struct vfio_pci_core_device { struct pci_saved_state *pci_saved_state; struct pci_saved_state *pm_save; int ioeventfds_nr; - struct eventfd_ctx *err_trigger; - struct eventfd_ctx *req_trigger; + struct vfio_pci_eventfd __rcu *err_trigger; + struct vfio_pci_eventfd __rcu *req_trigger; struct eventfd_ctx *pm_wake_eventfd_ctx; struct list_head dummy_resources_list; struct mutex ioeventfds_lock; @@ -94,6 +142,7 @@ struct vfio_pci_core_device { struct vfio_pci_core_device *sriov_pf_core_dev; struct notifier_block nb; struct rw_semaphore memory_lock; + struct list_head dmabufs; }; /* Will be exported for vfio pci drivers usage */ @@ -115,10 +164,16 @@ long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd, unsigned long arg); int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags, void __user *arg, size_t argsz); +int vfio_pci_ioctl_get_region_info(struct vfio_device *core_vdev, + struct vfio_region_info *info, + struct vfio_info_cap *caps); ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf, size_t count, loff_t *ppos); ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf, size_t count, loff_t *ppos); +vm_fault_t vfio_pci_vmf_insert_pfn(struct vfio_pci_core_device *vdev, + struct vm_fault *vmf, unsigned long pfn, + unsigned int order); int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma); void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count); int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf); @@ -134,6 +189,7 @@ ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem, void __iomem *io, char __user *buf, loff_t off, size_t count, size_t x_start, size_t x_end, bool iswrite); +bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev); bool vfio_pci_core_range_intersect_range(loff_t buf_start, size_t buf_cnt, loff_t reg_start, size_t reg_cnt, loff_t *buf_offset, @@ -161,4 +217,17 @@ VFIO_IOREAD_DECLARATION(32) VFIO_IOREAD_DECLARATION(64) #endif +static inline bool is_aligned_for_order(struct vm_area_struct *vma, + unsigned long addr, + unsigned long pfn, + unsigned int order) +{ + return !(order && (addr < vma->vm_start || + addr + (PAGE_SIZE << order) > vma->vm_end || + !IS_ALIGNED(pfn, 1 << order))); +} + +int vfio_pci_dma_buf_iommufd_map(struct dma_buf_attachment *attachment, + struct dma_buf_phys_vec *phys); + #endif /* VFIO_PCI_CORE_H */ diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 96c66126c074..132a474e5914 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -177,7 +177,7 @@ struct virtio_device { union virtio_map vmap; #ifdef CONFIG_VIRTIO_DEBUG struct dentry *debugfs_dir; - u64 debugfs_filter_features[VIRTIO_FEATURES_DWORDS]; + u64 debugfs_filter_features[VIRTIO_FEATURES_U64S]; #endif }; diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 16001e9f9b39..69f84ea85d71 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -24,7 +24,7 @@ typedef void vq_callback_t(struct virtqueue *); * a virtqueue unused by the driver. * @callback: A callback to invoke on a used buffer notification. * NULL for a virtqueue that does not need a callback. - * @ctx: A flag to indicate to maintain an extra context per virtqueue. + * @ctx: whether to maintain an extra context per virtqueue. */ struct virtqueue_info { const char *name; @@ -80,13 +80,13 @@ struct virtqueue_info { * Returns the first 64 feature bits. * @get_extended_features: * vdev: the virtio_device - * Returns the first VIRTIO_FEATURES_MAX feature bits (all we currently + * Returns the first VIRTIO_FEATURES_BITS feature bits (all we currently * need). * @finalize_features: confirm what device features we'll be using. * vdev: the virtio_device * This sends the driver feature bits to the device: it can change * the dev->feature bits if it wants. - * Note that despite the name this can be called any number of + * Note that despite the name this can be called any number of * times. * Returns 0 on success or error status * @bus_name: return the bus name associated with the device (optional) @@ -141,8 +141,8 @@ struct virtio_config_ops { /** * struct virtio_map_ops - operations for mapping buffer for a virtio device - * Note: For transport that has its own mapping logic it must - * implements all of the operations + * Note: For a transport that has its own mapping logic it must + * implement all of the operations * @map_page: map a buffer to the device * map: metadata for performing mapping * page: the page that will be mapped by the device @@ -150,7 +150,7 @@ struct virtio_config_ops { * size: the buffer size * dir: mapping direction * attrs: mapping attributes - * Returns: the mapped address + * Returns the mapped address * @unmap_page: unmap a buffer from the device * map: device specific mapping map * map_handle: the mapped address @@ -172,23 +172,23 @@ struct virtio_config_ops { * size: the size of the buffer * map_handle: the mapping address to sync * gfp: allocation flag (GFP_XXX) - * Returns: virtual address of the allocated buffer + * Returns virtual address of the allocated buffer * @free: free a coherent buffer mapping * map: metadata for performing mapping * size: the size of the buffer * vaddr: virtual address of the buffer - * map_handle: the mapping address to sync + * map_handle: the mapping address that needs to be freed * attrs: unmapping attributes * @need_sync: if the buffer needs synchronization * map: metadata for performing mapping * map_handle: the mapped address - * Returns: whether the buffer needs synchronization + * Returns whether the buffer needs synchronization * @mapping_error: if the mapping address is error * map: metadata for performing mapping * map_handle: the mapped address * @max_mapping_size: get the maximum buffer size that can be mapped * map: metadata for performing mapping - * Returns: the maximum buffer size that can be mapped + * Returns the maximum buffer size that can be mapped */ struct virtio_map_ops { dma_addr_t (*map_page)(union virtio_map map, struct page *page, @@ -362,7 +362,7 @@ void virtio_device_ready(struct virtio_device *dev) * specific set_status() method. * * A well behaved device will only notify a virtqueue after - * DRIVER_OK, this means the device should "see" the coherenct + * DRIVER_OK, this means the device should "see" the coherent * memory write that set vq->broken as false which is done by * the driver when it sees DRIVER_OK, then the following * driver's vring_interrupt() will see vq->broken as false so @@ -384,7 +384,7 @@ const char *virtio_bus_name(struct virtio_device *vdev) * @vq: the virtqueue * @cpu_mask: the cpu mask * - * Pay attention the function are best-effort: the affinity hint may not be set + * Note that this function is best-effort: the affinity hint may not be set * due to config support, irq type and sharing. * */ diff --git a/include/linux/virtio_features.h b/include/linux/virtio_features.h index f748f2f87de8..ea2ad8717882 100644 --- a/include/linux/virtio_features.h +++ b/include/linux/virtio_features.h @@ -4,15 +4,16 @@ #include <linux/bits.h> -#define VIRTIO_FEATURES_DWORDS 2 -#define VIRTIO_FEATURES_MAX (VIRTIO_FEATURES_DWORDS * 64) -#define VIRTIO_FEATURES_WORDS (VIRTIO_FEATURES_DWORDS * 2) +#define VIRTIO_FEATURES_U64S 2 +#define VIRTIO_FEATURES_BITS (VIRTIO_FEATURES_U64S * 64) + #define VIRTIO_BIT(b) BIT_ULL((b) & 0x3f) -#define VIRTIO_DWORD(b) ((b) >> 6) +#define VIRTIO_U64(b) ((b) >> 6) + #define VIRTIO_DECLARE_FEATURES(name) \ union { \ u64 name; \ - u64 name##_array[VIRTIO_FEATURES_DWORDS];\ + u64 name##_array[VIRTIO_FEATURES_U64S];\ } static inline bool virtio_features_chk_bit(unsigned int bit) @@ -22,9 +23,9 @@ static inline bool virtio_features_chk_bit(unsigned int bit) * Don't care returning the correct value: the build * will fail before any bad features access */ - BUILD_BUG_ON(bit >= VIRTIO_FEATURES_MAX); + BUILD_BUG_ON(bit >= VIRTIO_FEATURES_BITS); } else { - if (WARN_ON_ONCE(bit >= VIRTIO_FEATURES_MAX)) + if (WARN_ON_ONCE(bit >= VIRTIO_FEATURES_BITS)) return false; } return true; @@ -34,26 +35,26 @@ static inline bool virtio_features_test_bit(const u64 *features, unsigned int bit) { return virtio_features_chk_bit(bit) && - !!(features[VIRTIO_DWORD(bit)] & VIRTIO_BIT(bit)); + !!(features[VIRTIO_U64(bit)] & VIRTIO_BIT(bit)); } static inline void virtio_features_set_bit(u64 *features, unsigned int bit) { if (virtio_features_chk_bit(bit)) - features[VIRTIO_DWORD(bit)] |= VIRTIO_BIT(bit); + features[VIRTIO_U64(bit)] |= VIRTIO_BIT(bit); } static inline void virtio_features_clear_bit(u64 *features, unsigned int bit) { if (virtio_features_chk_bit(bit)) - features[VIRTIO_DWORD(bit)] &= ~VIRTIO_BIT(bit); + features[VIRTIO_U64(bit)] &= ~VIRTIO_BIT(bit); } static inline void virtio_features_zero(u64 *features) { - memset(features, 0, sizeof(features[0]) * VIRTIO_FEATURES_DWORDS); + memset(features, 0, sizeof(features[0]) * VIRTIO_FEATURES_U64S); } static inline void virtio_features_from_u64(u64 *features, u64 from) @@ -66,7 +67,7 @@ static inline bool virtio_features_equal(const u64 *f1, const u64 *f2) { int i; - for (i = 0; i < VIRTIO_FEATURES_DWORDS; ++i) + for (i = 0; i < VIRTIO_FEATURES_U64S; ++i) if (f1[i] != f2[i]) return false; return true; @@ -74,14 +75,14 @@ static inline bool virtio_features_equal(const u64 *f1, const u64 *f2) static inline void virtio_features_copy(u64 *to, const u64 *from) { - memcpy(to, from, sizeof(to[0]) * VIRTIO_FEATURES_DWORDS); + memcpy(to, from, sizeof(to[0]) * VIRTIO_FEATURES_U64S); } static inline void virtio_features_andnot(u64 *to, const u64 *f1, const u64 *f2) { int i; - for (i = 0; i < VIRTIO_FEATURES_DWORDS; i++) + for (i = 0; i < VIRTIO_FEATURES_U64S; i++) to[i] = f1[i] & ~f2[i]; } diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h index 48bc12d1045b..9a3f2fc53bd6 100644 --- a/include/linux/virtio_pci_modern.h +++ b/include/linux/virtio_pci_modern.h @@ -107,7 +107,7 @@ void vp_modern_set_extended_features(struct virtio_pci_modern_device *mdev, static inline u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev) { - u64 features_array[VIRTIO_FEATURES_DWORDS]; + u64 features_array[VIRTIO_FEATURES_U64S]; vp_modern_get_extended_features(mdev, features_array); return features_array[0]; @@ -116,11 +116,11 @@ vp_modern_get_features(struct virtio_pci_modern_device *mdev) static inline u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev) { - u64 features_array[VIRTIO_FEATURES_DWORDS]; + u64 features_array[VIRTIO_FEATURES_U64S]; int i; vp_modern_get_driver_extended_features(mdev, features_array); - for (i = 1; i < VIRTIO_FEATURES_DWORDS; ++i) + for (i = 1; i < VIRTIO_FEATURES_U64S; ++i) WARN_ON_ONCE(features_array[i]); return features_array[0]; } @@ -128,7 +128,7 @@ vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev) static inline void vp_modern_set_features(struct virtio_pci_modern_device *mdev, u64 features) { - u64 features_array[VIRTIO_FEATURES_DWORDS]; + u64 features_array[VIRTIO_FEATURES_U64S]; virtio_features_from_u64(features_array, features); vp_modern_set_extended_features(mdev, features_array); diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index eb54b7b3202f..e8e94f90d686 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -50,7 +50,11 @@ struct iov_iter; /* in uio.h */ #endif struct vm_struct { - struct vm_struct *next; + union { + struct vm_struct *next; /* Early registration of vm_areas. */ + struct llist_node llnode; /* Asynchronous freeing on error paths. */ + }; + void *addr; unsigned long size; unsigned long flags; @@ -328,4 +332,6 @@ bool vmalloc_dump_obj(void *object); static inline bool vmalloc_dump_obj(void *object) { return false; } #endif +unsigned int memalloc_apply_gfp_scope(gfp_t gfp_mask); +void memalloc_restore_scope(unsigned int flags); #endif /* _LINUX_VMALLOC_H */ diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index c287998908bf..3398a345bda8 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -520,32 +520,12 @@ static inline const char *vm_event_name(enum vm_event_item item) #ifdef CONFIG_MEMCG -void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, +void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val); -static inline void mod_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx, int val) -{ - unsigned long flags; - - local_irq_save(flags); - __mod_lruvec_state(lruvec, idx, val); - local_irq_restore(flags); -} - -void __lruvec_stat_mod_folio(struct folio *folio, +void lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, int val); -static inline void lruvec_stat_mod_folio(struct folio *folio, - enum node_stat_item idx, int val) -{ - unsigned long flags; - - local_irq_save(flags); - __lruvec_stat_mod_folio(folio, idx, val); - local_irq_restore(flags); -} - static inline void mod_lruvec_page_state(struct page *page, enum node_stat_item idx, int val) { @@ -554,24 +534,12 @@ static inline void mod_lruvec_page_state(struct page *page, #else -static inline void __mod_lruvec_state(struct lruvec *lruvec, - enum node_stat_item idx, int val) -{ - __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); -} - static inline void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { mod_node_page_state(lruvec_pgdat(lruvec), idx, val); } -static inline void __lruvec_stat_mod_folio(struct folio *folio, - enum node_stat_item idx, int val) -{ - __mod_node_page_state(folio_pgdat(folio), idx, val); -} - static inline void lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx, int val) { @@ -586,18 +554,6 @@ static inline void mod_lruvec_page_state(struct page *page, #endif /* CONFIG_MEMCG */ -static inline void __lruvec_stat_add_folio(struct folio *folio, - enum node_stat_item idx) -{ - __lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio)); -} - -static inline void __lruvec_stat_sub_folio(struct folio *folio, - enum node_stat_item idx) -{ - __lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio)); -} - static inline void lruvec_stat_add_folio(struct folio *folio, enum node_stat_item idx) { |
