summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h18
-rw-r--r--include/linux/alloc_tag.h12
-rw-r--r--include/linux/arch_topology.h8
-rw-r--r--include/linux/arm_sdei.h4
-rw-r--r--include/linux/auxiliary_bus.h17
-rw-r--r--include/linux/binfmts.h1
-rw-r--r--include/linux/bio.h26
-rw-r--r--include/linux/blk-mq.h10
-rw-r--r--include/linux/blk_types.h10
-rw-r--r--include/linux/blkdev.h24
-rw-r--r--include/linux/bpf-cgroup.h17
-rw-r--r--include/linux/bpf.h20
-rw-r--r--include/linux/bpf_verifier.h24
-rw-r--r--include/linux/btf.h1
-rw-r--r--include/linux/can/dev.h28
-rw-r--r--include/linux/cgroup-defs.h100
-rw-r--r--include/linux/cgroup.h50
-rw-r--r--include/linux/cleanup.h19
-rw-r--r--include/linux/codetag.h8
-rw-r--r--include/linux/compiler-version.h30
-rw-r--r--include/linux/compiler_types.h5
-rw-r--r--include/linux/configfs.h8
-rw-r--r--include/linux/coredump.h1
-rw-r--r--include/linux/cpu.h4
-rw-r--r--include/linux/cpufreq.h22
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/cpumask.h75
-rw-r--r--include/linux/crc16.h9
-rw-r--r--include/linux/crc32.h28
-rw-r--r--include/linux/crypto.h85
-rw-r--r--include/linux/dcache.h4
-rw-r--r--include/linux/dccp.h289
-rw-r--r--include/linux/device.h38
-rw-r--r--include/linux/device/devres.h41
-rw-r--r--include/linux/device_cgroup.h7
-rw-r--r--include/linux/dma-buf.h31
-rw-r--r--include/linux/dma-fence-unwrap.h2
-rw-r--r--include/linux/dma-fence.h25
-rw-r--r--include/linux/dma-map-ops.h54
-rw-r--r--include/linux/dma-mapping.h85
-rw-r--r--include/linux/dmapool.h21
-rw-r--r--include/linux/energy_model.h2
-rw-r--r--include/linux/entry-common.h43
-rw-r--r--include/linux/ethtool.h98
-rw-r--r--include/linux/execmem.h11
-rw-r--r--include/linux/fanotify.h5
-rw-r--r--include/linux/file.h2
-rw-r--r--include/linux/find.h25
-rw-r--r--include/linux/fs.h48
-rw-r--r--include/linux/fs_parser.h7
-rw-r--r--include/linux/fsl/ntmp.h121
-rw-r--r--include/linux/fsnotify_backend.h1
-rw-r--r--include/linux/ftrace.h11
-rw-r--r--include/linux/futex.h36
-rw-r--r--include/linux/gpio/consumer.h11
-rw-r--r--include/linux/gpio/driver.h5
-rw-r--r--include/linux/highmem-internal.h13
-rw-r--r--include/linux/highmem.h10
-rw-r--r--include/linux/hugetlb.h5
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/ieee80211.h78
-rw-r--r--include/linux/ima.h3
-rw-r--r--include/linux/inet.h2
-rw-r--r--include/linux/intel_vsec.h5
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/io.h21
-rw-r--r--include/linux/io_uring/cmd.h9
-rw-r--r--include/linux/io_uring_types.h15
-rw-r--r--include/linux/iommu.h4
-rw-r--r--include/linux/ipmi.h13
-rw-r--r--include/linux/irq.h28
-rw-r--r--include/linux/irqchip/irq-msi-lib.h27
-rw-r--r--include/linux/irqdomain.h499
-rw-r--r--include/linux/jbd2.h5
-rw-r--r--include/linux/jiffies.h2
-rw-r--r--include/linux/kexec.h9
-rw-r--r--include/linux/kvm_dirty_ring.h11
-rw-r--r--include/linux/kvm_host.h10
-rw-r--r--include/linux/livepatch_sched.h14
-rw-r--r--include/linux/mdio.h5
-rw-r--r--include/linux/mfd/max77759.h165
-rw-r--r--include/linux/micrel_phy.h1
-rw-r--r--include/linux/misc_cgroup.h4
-rw-r--r--include/linux/mm.h60
-rw-r--r--include/linux/mm_types.h7
-rw-r--r--include/linux/mman.h2
-rw-r--r--include/linux/mmap_lock.h4
-rw-r--r--include/linux/mmc/card.h1
-rw-r--r--include/linux/mmc/slot-gpio.h4
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/module.h5
-rw-r--r--include/linux/mount.h87
-rw-r--r--include/linux/mroute_base.h5
-rw-r--r--include/linux/msi.h23
-rw-r--r--include/linux/namei.h17
-rw-r--r--include/linux/net.h19
-rw-r--r--include/linux/net/intel/iidc.h109
-rw-r--r--include/linux/net/intel/iidc_rdma.h68
-rw-r--r--include/linux/net/intel/iidc_rdma_ice.h70
-rw-r--r--include/linux/net_tstamp.h7
-rw-r--r--include/linux/netdevice.h49
-rw-r--r--include/linux/netdevice_xmit.h6
-rw-r--r--include/linux/netfilter.h15
-rw-r--r--include/linux/netlink.h3
-rw-r--r--include/linux/nfs_fs_sb.h12
-rw-r--r--include/linux/nvme.h77
-rw-r--r--include/linux/of_reserved_mem.h26
-rw-r--r--include/linux/overflow.h23
-rw-r--r--include/linux/page-flags.h7
-rw-r--r--include/linux/page_table_check.h30
-rw-r--r--include/linux/panic.h2
-rw-r--r--include/linux/part_stat.h2
-rw-r--r--include/linux/pci-p2pdma.h85
-rw-r--r--include/linux/pci.h4
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/pds/pds_adminq.h3
-rw-r--r--include/linux/percpu-rwsem.h20
-rw-r--r--include/linux/percpu.h4
-rw-r--r--include/linux/perf_event.h298
-rw-r--r--include/linux/pgalloc_tag.h8
-rw-r--r--include/linux/phy.h70
-rw-r--r--include/linux/phy_fixed.h30
-rw-r--r--include/linux/pid.h2
-rw-r--r--include/linux/pidfs.h8
-rw-r--r--include/linux/platform_data/mlxreg.h4
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h19
-rw-r--r--include/linux/platform_data/x86/int3472.h166
-rw-r--r--include/linux/pm_domain.h10
-rw-r--r--include/linux/pm_opp.h32
-rw-r--r--include/linux/pm_runtime.h4
-rw-r--r--include/linux/pm_wakeup.h15
-rw-r--r--include/linux/poison.h4
-rw-r--r--include/linux/power_supply.h4
-rw-r--r--include/linux/psp-sev.h3
-rw-r--r--include/linux/ptp_clock_kernel.h18
-rw-r--r--include/linux/pwm.h10
-rw-r--r--include/linux/ratelimit.h37
-rw-r--r--include/linux/ratelimit_types.h5
-rw-r--r--include/linux/rcuref.h22
-rw-r--r--include/linux/regmap.h3
-rw-r--r--include/linux/regulator/max8952.h2
-rw-r--r--include/linux/regulator/pca9450.h5
-rw-r--r--include/linux/resctrl.h38
-rw-r--r--include/linux/resctrl_types.h16
-rw-r--r--include/linux/restart_block.h2
-rw-r--r--include/linux/ring_buffer.h1
-rw-r--r--include/linux/sched.h22
-rw-r--r--include/linux/sched/topology.h6
-rw-r--r--include/linux/screen_info.h9
-rw-r--r--include/linux/security.h12
-rw-r--r--include/linux/shmem_fs.h7
-rw-r--r--include/linux/skbuff.h41
-rw-r--r--include/linux/skbuff_ref.h4
-rw-r--r--include/linux/soc/mediatek/mtk_wed.h2
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/linux/sony-laptop.h39
-rw-r--r--include/linux/soundwire/sdw_intel.h2
-rw-r--r--include/linux/spi/sh_msiof.h125
-rw-r--r--include/linux/spi/spi.h78
-rw-r--r--include/linux/stat.h1
-rw-r--r--include/linux/stmmac.h4
-rw-r--r--include/linux/string_helpers.h1
-rw-r--r--include/linux/sunrpc/svc.h44
-rw-r--r--include/linux/sunrpc/svc_rdma.h6
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/sunrpc/svcsock.h4
-rw-r--r--include/linux/suspend.h9
-rw-r--r--include/linux/sysfs.h27
-rw-r--r--include/linux/tcp.h5
-rw-r--r--include/linux/tfrc.h51
-rw-r--r--include/linux/tick.h7
-rw-r--r--include/linux/timer.h42
-rw-r--r--include/linux/topology.h9
-rw-r--r--include/linux/tpm.h21
-rw-r--r--include/linux/tpm_svsm.h149
-rw-r--r--include/linux/tracepoint.h38
-rw-r--r--include/linux/tsm-mr.h89
-rw-r--r--include/linux/tsm.h22
-rw-r--r--include/linux/ubsan.h6
-rw-r--r--include/linux/udp.h19
-rw-r--r--include/linux/uio.h8
-rw-r--r--include/linux/vermagic.h1
-rw-r--r--include/linux/virtio_config.h2
-rw-r--r--include/linux/virtio_vsock.h1
-rw-r--r--include/linux/vmalloc.h17
-rw-r--r--include/linux/workqueue.h6
187 files changed, 3512 insertions, 1779 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 3f2e93ed9730..f4b3d442b7df 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -772,6 +772,10 @@ int acpi_get_local_u64_address(acpi_handle handle, u64 *addr);
int acpi_get_local_address(acpi_handle handle, u32 *addr);
const char *acpi_get_subsystem_id(acpi_handle handle);
+#ifdef CONFIG_ACPI_MRRM
+int acpi_mrrm_max_mem_region(void);
+#endif
+
#else /* !CONFIG_ACPI */
#define acpi_disabled 1
@@ -1092,6 +1096,11 @@ static inline acpi_handle acpi_get_processor_handle(int cpu)
return NULL;
}
+static inline int acpi_mrrm_max_mem_region(void)
+{
+ return 1;
+}
+
#endif /* !CONFIG_ACPI */
#ifdef CONFIG_ACPI_HMAT
@@ -1125,13 +1134,13 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
u32 val_a, u32 val_b);
-#if defined(CONFIG_SUSPEND) && defined(CONFIG_X86)
struct acpi_s2idle_dev_ops {
struct list_head list_node;
void (*prepare)(void);
void (*check)(void);
void (*restore)(void);
};
+#if defined(CONFIG_SUSPEND) && defined(CONFIG_X86)
int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg);
void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg);
int acpi_get_lps0_constraint(struct acpi_device *adev);
@@ -1140,6 +1149,13 @@ static inline int acpi_get_lps0_constraint(struct device *dev)
{
return ACPI_STATE_UNKNOWN;
}
+static inline int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg)
+{
+ return -ENODEV;
+}
+static inline void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg)
+{
+}
#endif /* CONFIG_SUSPEND && CONFIG_X86 */
void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
#else
diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
index a946e0203e6d..8f7931eb7d16 100644
--- a/include/linux/alloc_tag.h
+++ b/include/linux/alloc_tag.h
@@ -104,6 +104,16 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
#else /* ARCH_NEEDS_WEAK_PER_CPU */
+#ifdef MODULE
+
+#define DEFINE_ALLOC_TAG(_alloc_tag) \
+ static struct alloc_tag _alloc_tag __used __aligned(8) \
+ __section(ALLOC_TAG_SECTION_NAME) = { \
+ .ct = CODE_TAG_INIT, \
+ .counters = NULL };
+
+#else /* MODULE */
+
#define DEFINE_ALLOC_TAG(_alloc_tag) \
static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \
static struct alloc_tag _alloc_tag __used __aligned(8) \
@@ -111,6 +121,8 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
.ct = CODE_TAG_INIT, \
.counters = &_alloc_tag_cntr };
+#endif /* MODULE */
+
#endif /* ARCH_NEEDS_WEAK_PER_CPU */
DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 2222e8b03ff4..d72d6e5aa200 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -14,14 +14,6 @@ int topology_update_cpu_topology(void);
struct device_node;
bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
-DECLARE_PER_CPU(unsigned long, cpu_scale);
-
-static inline unsigned long topology_get_cpu_scale(int cpu)
-{
- return per_cpu(cpu_scale, cpu);
-}
-
-void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
DECLARE_PER_CPU(unsigned long, capacity_freq_ref);
diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h
index 255701e1251b..f652a5028b59 100644
--- a/include/linux/arm_sdei.h
+++ b/include/linux/arm_sdei.h
@@ -46,12 +46,12 @@ int sdei_unregister_ghes(struct ghes *ghes);
/* For use by arch code when CPU hotplug notifiers are not appropriate. */
int sdei_mask_local_cpu(void);
int sdei_unmask_local_cpu(void);
-void __init sdei_init(void);
+void __init acpi_sdei_init(void);
void sdei_handler_abort(void);
#else
static inline int sdei_mask_local_cpu(void) { return 0; }
static inline int sdei_unmask_local_cpu(void) { return 0; }
-static inline void sdei_init(void) { }
+static inline void acpi_sdei_init(void) { }
static inline void sdei_handler_abort(void) { }
#endif /* CONFIG_ARM_SDE_INTERFACE */
diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h
index 65dd7f154374..4086afd0cc6b 100644
--- a/include/linux/auxiliary_bus.h
+++ b/include/linux/auxiliary_bus.h
@@ -254,6 +254,23 @@ int __auxiliary_driver_register(struct auxiliary_driver *auxdrv, struct module *
void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv);
+struct auxiliary_device *auxiliary_device_create(struct device *dev,
+ const char *modname,
+ const char *devname,
+ void *platform_data,
+ int id);
+void auxiliary_device_destroy(void *auxdev);
+
+struct auxiliary_device *__devm_auxiliary_device_create(struct device *dev,
+ const char *modname,
+ const char *devname,
+ void *platform_data,
+ int id);
+
+#define devm_auxiliary_device_create(dev, devname, platform_data) \
+ __devm_auxiliary_device_create(dev, KBUILD_MODNAME, devname, \
+ platform_data, 0)
+
/**
* module_auxiliary_driver() - Helper macro for registering an auxiliary driver
* @__auxiliary_driver: auxiliary driver struct
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 1625c8529e70..65abd5ab8836 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -90,7 +90,6 @@ struct linux_binfmt {
struct list_head lh;
struct module *module;
int (*load_binary)(struct linux_binprm *);
- int (*load_shlib)(struct file *);
#ifdef CONFIG_COREDUMP
int (*core_dump)(struct coredump_params *cprm);
unsigned long min_coredump; /* minimal dump size */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index cafc7c215de8..9c37c66ef9ca 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -11,6 +11,7 @@
#include <linux/uio.h>
#define BIO_MAX_VECS 256U
+#define BIO_MAX_INLINE_VECS UIO_MAXIOV
struct queue_limits;
@@ -402,7 +403,6 @@ static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
struct request_queue;
-extern int submit_bio_wait(struct bio *bio);
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
unsigned short max_vecs, blk_opf_t opf);
extern void bio_uninit(struct bio *);
@@ -417,6 +417,30 @@ void __bio_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int off);
void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
size_t off);
+void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len);
+
+/**
+ * bio_add_max_vecs - number of bio_vecs needed to add data to a bio
+ * @kaddr: kernel virtual address to add
+ * @len: length in bytes to add
+ *
+ * Calculate how many bio_vecs need to be allocated to add the kernel virtual
+ * address range in [@kaddr:@len] in the worse case.
+ */
+static inline unsigned int bio_add_max_vecs(void *kaddr, unsigned int len)
+{
+ if (is_vmalloc_addr(kaddr))
+ return DIV_ROUND_UP(offset_in_page(kaddr) + len, PAGE_SIZE);
+ return 1;
+}
+
+unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len);
+bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len);
+
+int submit_bio_wait(struct bio *bio);
+int bdev_rw_virt(struct block_device *bdev, sector_t sector, void *data,
+ size_t len, enum req_op op);
+
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter);
void __bio_release_pages(struct bio *bio, bool mark_dirty);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 8eb9b3310167..de8c85a03bb7 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -9,6 +9,7 @@
#include <linux/prefetch.h>
#include <linux/srcu.h>
#include <linux/rw_hint.h>
+#include <linux/rwsem.h>
struct blk_mq_tags;
struct blk_flush_queue;
@@ -506,6 +507,9 @@ enum hctx_type {
* request_queue.tag_set_list.
* @srcu: Use as lock when type of the request queue is blocking
* (BLK_MQ_F_BLOCKING).
+ * @update_nr_hwq_lock:
+ * Synchronize updating nr_hw_queues with add/del disk &
+ * switching elevator.
*/
struct blk_mq_tag_set {
const struct blk_mq_ops *ops;
@@ -527,6 +531,8 @@ struct blk_mq_tag_set {
struct mutex tag_list_lock;
struct list_head tag_list;
struct srcu_struct *srcu;
+
+ struct rw_semaphore update_nr_hwq_lock;
};
/**
@@ -1031,8 +1037,8 @@ int blk_rq_map_user_io(struct request *, struct rq_map_data *,
int blk_rq_map_user_iov(struct request_queue *, struct request *,
struct rq_map_data *, const struct iov_iter *, gfp_t);
int blk_rq_unmap_user(struct bio *);
-int blk_rq_map_kern(struct request_queue *, struct request *, void *,
- unsigned int, gfp_t);
+int blk_rq_map_kern(struct request *rq, void *kbuf, unsigned int len,
+ gfp_t gfp);
int blk_rq_append_bio(struct request *rq, struct bio *bio);
void blk_execute_rq_nowait(struct request *rq, bool at_head);
blk_status_t blk_execute_rq(struct request *rq, bool at_head);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index dce7615c35e7..3d1577f07c1c 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -220,6 +220,7 @@ struct bio {
unsigned short bi_flags; /* BIO_* below */
unsigned short bi_ioprio;
enum rw_hint bi_write_hint;
+ u8 bi_write_stream;
blk_status_t bi_status;
atomic_t __bi_remaining;
@@ -286,7 +287,6 @@ struct bio {
enum {
BIO_PAGE_PINNED, /* Unpin pages in bio_release_pages() */
BIO_CLONED, /* doesn't own data */
- BIO_BOUNCED, /* bio is a bounce bio */
BIO_QUIET, /* Make BIO Quiet */
BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
BIO_REFFED, /* bio has elevated ->bi_cnt */
@@ -296,6 +296,14 @@ enum {
* of this bio. */
BIO_CGROUP_ACCT, /* has been accounted to a cgroup */
BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */
+ /*
+ * This bio has completed bps throttling at the single tg granularity,
+ * which is different from BIO_BPS_THROTTLED. When the bio is enqueued
+ * into the sq->queued of the upper tg, or is about to be dispatched,
+ * this flag needs to be cleared. Since blk-throttle and rq_qos are not
+ * on the same hierarchical level, reuse the value.
+ */
+ BIO_TG_BPS_THROTTLED = BIO_QOS_THROTTLED,
BIO_QOS_MERGED, /* but went through rq_qos merge path */
BIO_REMAPPED,
BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 9a1f0ee40b56..332b56f323d9 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -182,7 +182,6 @@ struct gendisk {
struct list_head slave_bdevs;
#endif
struct timer_rand_state *random;
- atomic_t sync_io; /* RAID */
struct disk_events *ev;
#ifdef CONFIG_BLK_DEV_ZONED
@@ -218,6 +217,8 @@ struct gendisk {
* devices that do not have multiple independent access ranges.
*/
struct blk_independent_access_ranges *ia_ranges;
+
+ struct mutex rqos_state_mutex; /* rqos state change mutex */
};
/**
@@ -331,9 +332,6 @@ typedef unsigned int __bitwise blk_features_t;
/* skip this queue in blk_mq_(un)quiesce_tagset */
#define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13))
-/* bounce all highmem pages */
-#define BLK_FEAT_BOUNCE_HIGH ((__force blk_features_t)(1u << 14))
-
/* undocumented magic for bcache */
#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
((__force blk_features_t)(1u << 15))
@@ -347,7 +345,7 @@ typedef unsigned int __bitwise blk_features_t;
*/
#define BLK_FEAT_INHERIT_MASK \
(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \
- BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | BLK_FEAT_BOUNCE_HIGH | \
+ BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | \
BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE)
/* internal flags in queue_limits.flags */
@@ -405,6 +403,9 @@ struct queue_limits {
unsigned short max_integrity_segments;
unsigned short max_discard_segments;
+ unsigned short max_write_streams;
+ unsigned int write_stream_granularity;
+
unsigned int max_open_zones;
unsigned int max_active_zones;
@@ -644,6 +645,8 @@ enum {
QUEUE_FLAG_RQ_ALLOC_TIME, /* record rq->alloc_time_ns */
QUEUE_FLAG_HCTX_ACTIVE, /* at least one blk-mq hctx is active */
QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */
+ QUEUE_FLAG_DISABLE_WBT_DEF, /* for sched to disable/enable wbt */
+ QUEUE_FLAG_NO_ELV_SWITCH, /* can't switch elevator any more */
QUEUE_FLAG_MAX
};
@@ -679,6 +682,10 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
#define blk_queue_skip_tagset_quiesce(q) \
((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE)
+#define blk_queue_disable_wbt(q) \
+ test_bit(QUEUE_FLAG_DISABLE_WBT_DEF, &(q)->queue_flags)
+#define blk_queue_no_elv_switch(q) \
+ test_bit(QUEUE_FLAG_NO_ELV_SWITCH, &(q)->queue_flags)
extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);
@@ -1288,6 +1295,13 @@ static inline unsigned int bdev_max_segments(struct block_device *bdev)
return queue_max_segments(bdev_get_queue(bdev));
}
+static inline unsigned short bdev_max_write_streams(struct block_device *bdev)
+{
+ if (bdev_is_partition(bdev))
+ return 0;
+ return bdev_limits(bdev)->max_write_streams;
+}
+
static inline unsigned queue_logical_block_size(const struct request_queue *q)
{
return q->limits.logical_block_size;
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 9de7adb68294..70c8b94e797a 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -114,8 +114,7 @@ struct bpf_prog_list {
u32 flags;
};
-int cgroup_bpf_inherit(struct cgroup *cgrp);
-void cgroup_bpf_offline(struct cgroup *cgrp);
+void __init cgroup_bpf_lifetime_notifier_init(void);
int __cgroup_bpf_run_filter_skb(struct sock *sk,
struct sk_buff *skb,
@@ -427,12 +426,12 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr,
const struct bpf_func_proto *
cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
-const struct bpf_func_proto *
-cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
#else
-static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
-static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
+static inline void cgroup_bpf_lifetime_notifier_init(void)
+{
+ return;
+}
static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
enum bpf_prog_type ptype,
@@ -465,12 +464,6 @@ cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return NULL;
}
-static inline const struct bpf_func_proto *
-cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
-{
- return NULL;
-}
-
static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
struct bpf_map *map) { return 0; }
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 3f0cc89c0622..5b25d278409b 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -346,6 +346,12 @@ static inline const char *btf_field_type_name(enum btf_field_type type)
}
}
+#if IS_ENABLED(CONFIG_DEBUG_KERNEL)
+#define BPF_WARN_ONCE(cond, format...) WARN_ONCE(cond, format)
+#else
+#define BPF_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
+#endif
+
static inline u32 btf_field_type_size(enum btf_field_type type)
{
switch (type) {
@@ -1349,6 +1355,20 @@ u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len);
void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len);
bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr);
+int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u32 offset,
+ void *src, u32 len, u64 flags);
+void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset,
+ void *buffer__opt, u32 buffer__szk);
+
+static inline int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len)
+{
+ u32 size = __bpf_dynptr_size(ptr);
+
+ if (len > size || offset > size - len)
+ return -E2BIG;
+
+ return 0;
+}
#ifdef CONFIG_BPF_JIT
int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 9734544b6957..256274acb1d8 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -356,7 +356,11 @@ enum {
INSN_F_SPI_MASK = 0x3f, /* 6 bits */
INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
- INSN_F_STACK_ACCESS = BIT(9), /* we need 10 bits total */
+ INSN_F_STACK_ACCESS = BIT(9),
+
+ INSN_F_DST_REG_STACK = BIT(10), /* dst_reg is PTR_TO_STACK */
+ INSN_F_SRC_REG_STACK = BIT(11), /* src_reg is PTR_TO_STACK */
+ /* total 12 bits are used now. */
};
static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
@@ -365,9 +369,9 @@ static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
struct bpf_insn_hist_entry {
u32 idx;
/* insn idx can't be bigger than 1 million */
- u32 prev_idx : 22;
- /* special flags, e.g., whether insn is doing register stack spill/load */
- u32 flags : 10;
+ u32 prev_idx : 20;
+ /* special INSN_F_xxx flags */
+ u32 flags : 12;
/* additional registers that need precision tracking when this
* jump is backtracked, vector of six 10-bit records
*/
@@ -591,6 +595,7 @@ struct bpf_insn_aux_data {
* bpf_fastcall pattern.
*/
u8 fastcall_spills_num:3;
+ u8 arg_prog:4;
/* below fields are initialized once */
unsigned int orig_idx; /* original instruction index */
@@ -838,6 +843,17 @@ __printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
u32 insn_off,
const char *prefix_fmt, ...);
+#define verifier_bug_if(cond, env, fmt, args...) \
+ ({ \
+ bool __cond = (cond); \
+ if (unlikely(__cond)) { \
+ BPF_WARN_ONCE(1, "verifier bug: " fmt "(" #cond ")\n", ##args); \
+ bpf_log(&env->log, "verifier bug: " fmt "(" #cond ")\n", ##args); \
+ } \
+ (__cond); \
+ })
+#define verifier_bug(env, fmt, args...) verifier_bug_if(1, env, fmt, ##args)
+
static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *cur = env->cur_state;
diff --git a/include/linux/btf.h b/include/linux/btf.h
index ebc0c0c9b944..b2983706292f 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -522,6 +522,7 @@ bool btf_param_match_suffix(const struct btf *btf,
const char *suffix);
int btf_ctx_arg_offset(const struct btf *btf, const struct btf_type *func_proto,
u32 arg_no);
+u32 btf_ctx_arg_idx(struct btf *btf, const struct btf_type *func_proto, int off);
struct bpf_verifier_log;
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 23492213ea35..492d23bec7be 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -38,6 +38,17 @@ enum can_termination_gpio {
CAN_TERMINATION_GPIO_MAX,
};
+struct data_bittiming_params {
+ const struct can_bittiming_const *data_bittiming_const;
+ struct can_bittiming data_bittiming;
+ const struct can_tdc_const *tdc_const;
+ struct can_tdc tdc;
+ const u32 *data_bitrate_const;
+ unsigned int data_bitrate_const_cnt;
+ int (*do_set_data_bittiming)(struct net_device *dev);
+ int (*do_get_auto_tdcv)(const struct net_device *dev, u32 *tdcv);
+};
+
/*
* CAN common private data
*/
@@ -45,16 +56,11 @@ struct can_priv {
struct net_device *dev;
struct can_device_stats can_stats;
- const struct can_bittiming_const *bittiming_const,
- *data_bittiming_const;
- struct can_bittiming bittiming, data_bittiming;
- const struct can_tdc_const *tdc_const;
- struct can_tdc tdc;
-
+ const struct can_bittiming_const *bittiming_const;
+ struct can_bittiming bittiming;
+ struct data_bittiming_params fd;
unsigned int bitrate_const_cnt;
const u32 *bitrate_const;
- const u32 *data_bitrate_const;
- unsigned int data_bitrate_const_cnt;
u32 bitrate_max;
struct can_clock clock;
@@ -77,14 +83,12 @@ struct can_priv {
struct delayed_work restart_work;
int (*do_set_bittiming)(struct net_device *dev);
- int (*do_set_data_bittiming)(struct net_device *dev);
int (*do_set_mode)(struct net_device *dev, enum can_mode mode);
int (*do_set_termination)(struct net_device *dev, u16 term);
int (*do_get_state)(const struct net_device *dev,
enum can_state *state);
int (*do_get_berr_counter)(const struct net_device *dev,
struct can_berr_counter *bec);
- int (*do_get_auto_tdcv)(const struct net_device *dev, u32 *tdcv);
};
static inline bool can_tdc_is_enabled(const struct can_priv *priv)
@@ -114,11 +118,11 @@ static inline bool can_tdc_is_enabled(const struct can_priv *priv)
*/
static inline s32 can_get_relative_tdco(const struct can_priv *priv)
{
- const struct can_bittiming *dbt = &priv->data_bittiming;
+ const struct can_bittiming *dbt = &priv->fd.data_bittiming;
s32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
dbt->phase_seg1) * dbt->brp;
- return (s32)priv->tdc.tdco - sample_point_in_tc;
+ return (s32)priv->fd.tdc.tdco - sample_point_in_tc;
}
/* helper to define static CAN controller features at device creation time */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 5bc8f55c8cca..e61687d5e496 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -170,6 +170,23 @@ struct cgroup_subsys_state {
struct percpu_ref refcnt;
/*
+ * Depending on the context, this field is initialized
+ * via css_rstat_init() at different places:
+ *
+ * when css is associated with cgroup::self
+ * when css->cgroup is the root cgroup
+ * performed in cgroup_init()
+ * when css->cgroup is not the root cgroup
+ * performed in cgroup_create()
+ * when css is associated with a subsystem
+ * when css->cgroup is the root cgroup
+ * performed in cgroup_init_subsys() in the non-early path
+ * when css->cgroup is not the root cgroup
+ * performed in css_create()
+ */
+ struct css_rstat_cpu __percpu *rstat_cpu;
+
+ /*
* siblings list anchored at the parent's ->children
*
* linkage is protected by cgroup_mutex or RCU
@@ -177,9 +194,6 @@ struct cgroup_subsys_state {
struct list_head sibling;
struct list_head children;
- /* flush target list anchored at cgrp->rstat_css_list */
- struct list_head rstat_css_node;
-
/*
* PI: Subsys-unique ID. 0 is unused and root is always 1. The
* matching css can be looked up using css_from_id().
@@ -219,6 +233,16 @@ struct cgroup_subsys_state {
* Protected by cgroup_mutex.
*/
int nr_descendants;
+
+ /*
+ * A singly-linked list of css structures to be rstat flushed.
+ * This is a scratch field to be used exclusively by
+ * css_rstat_flush().
+ *
+ * Protected by rstat_base_lock when css is cgroup::self.
+ * Protected by css->ss->rstat_ss_lock otherwise.
+ */
+ struct cgroup_subsys_state *rstat_flush_next;
};
/*
@@ -329,10 +353,10 @@ struct cgroup_base_stat {
/*
* rstat - cgroup scalable recursive statistics. Accounting is done
- * per-cpu in cgroup_rstat_cpu which is then lazily propagated up the
+ * per-cpu in css_rstat_cpu which is then lazily propagated up the
* hierarchy on reads.
*
- * When a stat gets updated, the cgroup_rstat_cpu and its ancestors are
+ * When a stat gets updated, the css_rstat_cpu and its ancestors are
* linked into the updated tree. On the following read, propagation only
* considers and consumes the updated tree. This makes reading O(the
* number of descendants which have been active since last read) instead of
@@ -344,10 +368,29 @@ struct cgroup_base_stat {
* frequency decreases the cost of each read.
*
* This struct hosts both the fields which implement the above -
- * updated_children and updated_next - and the fields which track basic
- * resource statistics on top of it - bsync, bstat and last_bstat.
+ * updated_children and updated_next.
*/
-struct cgroup_rstat_cpu {
+struct css_rstat_cpu {
+ /*
+ * Child cgroups with stat updates on this cpu since the last read
+ * are linked on the parent's ->updated_children through
+ * ->updated_next. updated_children is terminated by its container css.
+ *
+ * In addition to being more compact, singly-linked list pointing to
+ * the css makes it unnecessary for each per-cpu struct to point back
+ * to the associated css.
+ *
+ * Protected by per-cpu css->ss->rstat_ss_cpu_lock.
+ */
+ struct cgroup_subsys_state *updated_children;
+ struct cgroup_subsys_state *updated_next; /* NULL if not on the list */
+};
+
+/*
+ * This struct hosts the fields which track basic resource statistics on
+ * top of it - bsync, bstat and last_bstat.
+ */
+struct cgroup_rstat_base_cpu {
/*
* ->bsync protects ->bstat. These are the only fields which get
* updated in the hot path.
@@ -374,20 +417,6 @@ struct cgroup_rstat_cpu {
* deltas to propagate to the per-cpu subtree_bstat.
*/
struct cgroup_base_stat last_subtree_bstat;
-
- /*
- * Child cgroups with stat updates on this cpu since the last read
- * are linked on the parent's ->updated_children through
- * ->updated_next.
- *
- * In addition to being more compact, singly-linked list pointing
- * to the cgroup makes it unnecessary for each per-cpu struct to
- * point back to the associated cgroup.
- *
- * Protected by per-cpu cgroup_rstat_cpu_lock.
- */
- struct cgroup *updated_children; /* terminated by self cgroup */
- struct cgroup *updated_next; /* NULL iff not on the list */
};
struct cgroup_freezer_state {
@@ -516,23 +545,23 @@ struct cgroup {
struct cgroup *dom_cgrp;
struct cgroup *old_dom_cgrp; /* used while enabling threaded */
- /* per-cpu recursive resource statistics */
- struct cgroup_rstat_cpu __percpu *rstat_cpu;
- struct list_head rstat_css_list;
-
/*
- * Add padding to separate the read mostly rstat_cpu and
- * rstat_css_list into a different cacheline from the following
- * rstat_flush_next and *bstat fields which can have frequent updates.
+ * Depending on the context, this field is initialized via
+ * css_rstat_init() at different places:
+ *
+ * when cgroup is the root cgroup
+ * performed in cgroup_setup_root()
+ * otherwise
+ * performed in cgroup_create()
*/
- CACHELINE_PADDING(_pad_);
+ struct cgroup_rstat_base_cpu __percpu *rstat_base_cpu;
/*
- * A singly-linked list of cgroup structures to be rstat flushed.
- * This is a scratch field to be used exclusively by
- * cgroup_rstat_flush_locked() and protected by cgroup_rstat_lock.
+ * Add padding to keep the read mostly rstat per-cpu pointer on a
+ * different cacheline than the following *bstat fields which can have
+ * frequent updates.
*/
- struct cgroup *rstat_flush_next;
+ CACHELINE_PADDING(_pad_);
/* cgroup basic resource statistics */
struct cgroup_base_stat last_bstat;
@@ -790,6 +819,9 @@ struct cgroup_subsys {
* specifies the mask of subsystems that this one depends on.
*/
unsigned int depends_on;
+
+ spinlock_t rstat_ss_lock;
+ raw_spinlock_t __percpu *rstat_ss_cpu_lock;
};
extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index e7da3c3b098b..b18fb5fcb38e 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -19,6 +19,7 @@
#include <linux/kernfs.h>
#include <linux/jump_label.h>
#include <linux/types.h>
+#include <linux/notifier.h>
#include <linux/ns_common.h>
#include <linux/nsproxy.h>
#include <linux/user_namespace.h>
@@ -40,7 +41,7 @@ struct kernel_clone_args;
#ifdef CONFIG_CGROUPS
-enum {
+enum css_task_iter_flags {
CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */
CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */
CSS_TASK_ITER_SKIPPED = (1U << 16), /* internal flags */
@@ -66,10 +67,16 @@ struct css_task_iter {
struct list_head iters_node; /* css_set->task_iters */
};
+enum cgroup_lifetime_events {
+ CGROUP_LIFETIME_ONLINE,
+ CGROUP_LIFETIME_OFFLINE,
+};
+
extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set;
extern spinlock_t css_set_lock;
+extern struct blocking_notifier_head cgroup_lifetime_notifier;
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
#include <linux/cgroup_subsys.h>
@@ -347,6 +354,17 @@ static inline bool css_is_dying(struct cgroup_subsys_state *css)
return css->flags & CSS_DYING;
}
+static inline bool css_is_self(struct cgroup_subsys_state *css)
+{
+ if (css == &css->cgroup->self) {
+ /* cgroup::self should not have subsystem association */
+ WARN_ON(css->ss != NULL);
+ return true;
+ }
+
+ return false;
+}
+
static inline void cgroup_get(struct cgroup *cgrp)
{
css_get(&cgrp->self);
@@ -688,8 +706,8 @@ static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
/*
* cgroup scalable recursive statistics.
*/
-void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
-void cgroup_rstat_flush(struct cgroup *cgrp);
+void css_rstat_updated(struct cgroup_subsys_state *css, int cpu);
+void css_rstat_flush(struct cgroup_subsys_state *css);
/*
* Basic resource stats.
@@ -785,6 +803,17 @@ struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
struct cgroup_namespace *ns);
+static inline void get_cgroup_ns(struct cgroup_namespace *ns)
+{
+ refcount_inc(&ns->ns.count);
+}
+
+static inline void put_cgroup_ns(struct cgroup_namespace *ns)
+{
+ if (refcount_dec_and_test(&ns->ns.count))
+ free_cgroup_ns(ns);
+}
+
#else /* !CONFIG_CGROUPS */
static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
@@ -795,19 +824,10 @@ copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
return old_ns;
}
-#endif /* !CONFIG_CGROUPS */
+static inline void get_cgroup_ns(struct cgroup_namespace *ns) { }
+static inline void put_cgroup_ns(struct cgroup_namespace *ns) { }
-static inline void get_cgroup_ns(struct cgroup_namespace *ns)
-{
- if (ns)
- refcount_inc(&ns->ns.count);
-}
-
-static inline void put_cgroup_ns(struct cgroup_namespace *ns)
-{
- if (ns && refcount_dec_and_test(&ns->ns.count))
- free_cgroup_ns(ns);
-}
+#endif /* !CONFIG_CGROUPS */
#ifdef CONFIG_CGROUPS
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index 7e57047e1564..7093e1d08af0 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -216,6 +216,25 @@ const volatile void * __must_check_fn(const volatile void *val)
#define return_ptr(p) return no_free_ptr(p)
+/*
+ * Only for situations where an allocation is handed in to another function
+ * and consumed by that function on success.
+ *
+ * struct foo *f __free(kfree) = kzalloc(sizeof(*f), GFP_KERNEL);
+ *
+ * setup(f);
+ * if (some_condition)
+ * return -EINVAL;
+ * ....
+ * ret = bar(f);
+ * if (!ret)
+ * retain_and_null_ptr(f);
+ * return ret;
+ *
+ * After retain_and_null_ptr(f) the variable f is NULL and cannot be
+ * dereferenced anymore.
+ */
+#define retain_and_null_ptr(p) ((void)__get_and_null(p, NULL))
/*
* DEFINE_CLASS(name, type, exit, init, init_args...):
diff --git a/include/linux/codetag.h b/include/linux/codetag.h
index d14dbd26b370..0ee4c21c6dbc 100644
--- a/include/linux/codetag.h
+++ b/include/linux/codetag.h
@@ -36,10 +36,10 @@ union codetag_ref {
struct codetag_type_desc {
const char *section;
size_t tag_size;
- void (*module_load)(struct codetag_type *cttype,
- struct codetag_module *cmod);
- void (*module_unload)(struct codetag_type *cttype,
- struct codetag_module *cmod);
+ void (*module_load)(struct module *mod,
+ struct codetag *start, struct codetag *end);
+ void (*module_unload)(struct module *mod,
+ struct codetag *start, struct codetag *end);
#ifdef CONFIG_MODULES
void (*module_replaced)(struct module *mod, struct module *new_mod);
bool (*needs_section_mem)(struct module *mod, unsigned long size);
diff --git a/include/linux/compiler-version.h b/include/linux/compiler-version.h
index 573fa85b6c0c..ac1665a98a15 100644
--- a/include/linux/compiler-version.h
+++ b/include/linux/compiler-version.h
@@ -12,3 +12,33 @@
* and add dependency on include/config/CC_VERSION_TEXT, which is touched
* by Kconfig when the version string from the compiler changes.
*/
+
+/* Additional tree-wide dependencies start here. */
+
+/*
+ * If any of the GCC plugins change, we need to rebuild everything that
+ * was built with them, as they may have changed their behavior and those
+ * behaviors may need to be synchronized across all translation units.
+ */
+#ifdef GCC_PLUGINS
+#include <generated/gcc-plugins.h>
+#endif
+
+/*
+ * If the randstruct seed itself changes (whether for GCC plugins or
+ * Clang), the entire tree needs to be rebuilt since the randomization of
+ * structures may change between compilation units if not.
+ */
+#ifdef RANDSTRUCT
+#include <generated/randstruct_hash.h>
+#endif
+
+/*
+ * If any external changes affect Clang's integer wrapping sanitizer
+ * behavior, a full rebuild is needed as the coverage for wrapping types
+ * may have changed, which may impact the expected behaviors that should
+ * not differ between compilation units.
+ */
+#ifdef INTEGER_WRAP
+#include <generated/integer-wrap.h>
+#endif
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 501cffddc2f4..20881cc761fa 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -449,6 +449,11 @@ struct ftrace_likely_data {
/*
* When the size of an allocated object is needed, use the best available
* mechanism to find it. (For cases where sizeof() cannot be used.)
+ *
+ * Optional: only supported since gcc >= 12
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Object-Size-Checking.html
+ * clang: https://clang.llvm.org/docs/LanguageExtensions.html#evaluating-object-size
*/
#if __has_builtin(__builtin_dynamic_object_size)
#define __struct_size(p) __builtin_dynamic_object_size(p, 0)
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index c771e9d0d0b9..698520b1bfdb 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -120,15 +120,19 @@ struct configfs_attribute {
ssize_t (*store)(struct config_item *, const char *, size_t);
};
-#define CONFIGFS_ATTR(_pfx, _name) \
+#define CONFIGFS_ATTR_PERM(_pfx, _name, _perm) \
static struct configfs_attribute _pfx##attr_##_name = { \
.ca_name = __stringify(_name), \
- .ca_mode = S_IRUGO | S_IWUSR, \
+ .ca_mode = _perm, \
.ca_owner = THIS_MODULE, \
.show = _pfx##_name##_show, \
.store = _pfx##_name##_store, \
}
+#define CONFIGFS_ATTR(_pfx, _name) CONFIGFS_ATTR_PERM( \
+ _pfx, _name, S_IRUGO | S_IWUSR \
+)
+
#define CONFIGFS_ATTR_RO(_pfx, _name) \
static struct configfs_attribute _pfx##attr_##_name = { \
.ca_name = __stringify(_name), \
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index 77e6e195d1d6..76e41805b92d 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -28,6 +28,7 @@ struct coredump_params {
int vma_count;
size_t vma_data_size;
struct core_vma_metadata *vma_meta;
+ struct pid *pid;
};
extern unsigned int core_file_note_size_limit;
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index e3049543008b..e6089abc28e2 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -78,6 +78,10 @@ extern ssize_t cpu_show_gds(struct device *dev,
extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_ghostwrite(struct device *dev, struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_old_microcode(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
+ struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 7a5b391dcc01..95f3807c8c55 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -170,6 +170,12 @@ struct cpufreq_policy {
struct notifier_block nb_max;
};
+DEFINE_GUARD(cpufreq_policy_write, struct cpufreq_policy *,
+ down_write(&_T->rwsem), up_write(&_T->rwsem))
+
+DEFINE_GUARD(cpufreq_policy_read, struct cpufreq_policy *,
+ down_read(&_T->rwsem), up_read(&_T->rwsem))
+
/*
* Used for passing new cpufreq policy data to the cpufreq driver's ->verify()
* callback for sanitization. That callback is only expected to modify the min
@@ -235,9 +241,6 @@ void disable_cpufreq(void);
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
-struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
-void cpufreq_cpu_release(struct cpufreq_policy *policy);
-int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
void refresh_frequency_limits(struct cpufreq_policy *policy);
void cpufreq_update_policy(unsigned int cpu);
void cpufreq_update_limits(unsigned int cpu);
@@ -395,7 +398,7 @@ struct cpufreq_driver {
unsigned int (*get)(unsigned int cpu);
/* Called to update policy limits on firmware notifications. */
- void (*update_limits)(unsigned int cpu);
+ void (*update_limits)(struct cpufreq_policy *policy);
/* optional */
int (*bios_limit)(int cpu, unsigned int *limit);
@@ -647,6 +650,15 @@ module_exit(__governor##_exit)
struct cpufreq_governor *cpufreq_default_governor(void);
struct cpufreq_governor *cpufreq_fallback_governor(void);
+#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
+bool sugov_is_governor(struct cpufreq_policy *policy);
+#else
+static inline bool sugov_is_governor(struct cpufreq_policy *policy)
+{
+ return false;
+}
+#endif
+
static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
{
if (policy->max < policy->cur)
@@ -1225,6 +1237,8 @@ void cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int transition_latency);
+bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask);
+
static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy)
{
dev_pm_opp_of_register_em(get_cpu_device(policy->cpu),
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 1987400000b4..df366ee15456 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -60,7 +60,6 @@ enum cpuhp_state {
/* PREPARE section invoked on a control CPU */
CPUHP_OFFLINE = 0,
CPUHP_CREATE_THREADS,
- CPUHP_PERF_PREPARE,
CPUHP_PERF_X86_PREPARE,
CPUHP_PERF_X86_AMD_UNCORE_PREP,
CPUHP_PERF_POWER,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index f9a868384083..6a569c7534db 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -179,6 +179,19 @@ unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask
}
/**
+ * cpumask_first_andnot - return the first cpu from *srcp1 & ~*srcp2
+ * @srcp1: the first input
+ * @srcp2: the second input
+ *
+ * Return: >= nr_cpu_ids if no such cpu found.
+ */
+static __always_inline
+unsigned int cpumask_first_andnot(const struct cpumask *srcp1, const struct cpumask *srcp2)
+{
+ return find_first_andnot_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
+}
+
+/**
* cpumask_first_and_and - return the first cpu from *srcp1 & *srcp2 & *srcp3
* @srcp1: the first input
* @srcp2: the second input
@@ -285,6 +298,25 @@ unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
}
/**
+ * cpumask_next_andnot - get the next cpu in *src1p & ~*src2p
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
+ * @src1p: the first cpumask pointer
+ * @src2p: the second cpumask pointer
+ *
+ * Return: >= nr_cpu_ids if no further cpus set in both.
+ */
+static __always_inline
+unsigned int cpumask_next_andnot(int n, const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_andnot_bit(cpumask_bits(src1p), cpumask_bits(src2p),
+ small_cpumask_bits, n + 1);
+}
+
+/**
* cpumask_next_and_wrap - get the next cpu in *src1p & *src2p, starting from
* @n+1. If nothing found, wrap around and start from
* the beginning
@@ -413,14 +445,18 @@ unsigned int cpumask_next_wrap(int n, const struct cpumask *src)
* @cpu: the cpu to ignore.
*
* Often used to find any cpu but smp_processor_id() in a mask.
+ * If @cpu == -1, the function is equivalent to cpumask_any().
* Return: >= nr_cpu_ids if no cpus set.
*/
static __always_inline
-unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
+unsigned int cpumask_any_but(const struct cpumask *mask, int cpu)
{
unsigned int i;
- cpumask_check(cpu);
+ /* -1 is a legal arg here. */
+ if (cpu != -1)
+ cpumask_check(cpu);
+
for_each_cpu(i, mask)
if (i != cpu)
break;
@@ -433,16 +469,20 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
* @mask2: the second input cpumask
* @cpu: the cpu to ignore
*
+ * If @cpu == -1, the function is equivalent to cpumask_any_and().
* Returns >= nr_cpu_ids if no cpus set.
*/
static __always_inline
unsigned int cpumask_any_and_but(const struct cpumask *mask1,
const struct cpumask *mask2,
- unsigned int cpu)
+ int cpu)
{
unsigned int i;
- cpumask_check(cpu);
+ /* -1 is a legal arg here. */
+ if (cpu != -1)
+ cpumask_check(cpu);
+
i = cpumask_first_and(mask1, mask2);
if (i != cpu)
return i;
@@ -451,6 +491,33 @@ unsigned int cpumask_any_and_but(const struct cpumask *mask1,
}
/**
+ * cpumask_any_andnot_but - pick an arbitrary cpu from *mask1 & ~*mask2, but not this one.
+ * @mask1: the first input cpumask
+ * @mask2: the second input cpumask
+ * @cpu: the cpu to ignore
+ *
+ * If @cpu == -1, the function returns the first matching cpu.
+ * Returns >= nr_cpu_ids if no cpus set.
+ */
+static __always_inline
+unsigned int cpumask_any_andnot_but(const struct cpumask *mask1,
+ const struct cpumask *mask2,
+ int cpu)
+{
+ unsigned int i;
+
+ /* -1 is a legal arg here. */
+ if (cpu != -1)
+ cpumask_check(cpu);
+
+ i = cpumask_first_andnot(mask1, mask2);
+ if (i != cpu)
+ return i;
+
+ return cpumask_next_andnot(cpu, mask1, mask2);
+}
+
+/**
* cpumask_nth - get the Nth cpu in a cpumask
* @srcp: the cpumask pointer
* @cpu: the Nth cpu to find, starting from 0
diff --git a/include/linux/crc16.h b/include/linux/crc16.h
index 9fa74529b317..b861d969b161 100644
--- a/include/linux/crc16.h
+++ b/include/linux/crc16.h
@@ -15,14 +15,7 @@
#include <linux/types.h>
-extern u16 const crc16_table[256];
-
-extern u16 crc16(u16 crc, const u8 *buffer, size_t len);
-
-static inline u16 crc16_byte(u16 crc, const u8 data)
-{
- return (crc >> 8) ^ crc16_table[(crc ^ data) & 0xff];
-}
+u16 crc16(u16 crc, const u8 *p, size_t len);
#endif /* __CRC16_H */
diff --git a/include/linux/crc32.h b/include/linux/crc32.h
index 69c2e8bb3782..8c1883b81b42 100644
--- a/include/linux/crc32.h
+++ b/include/linux/crc32.h
@@ -1,7 +1,4 @@
-/*
- * crc32.h
- * See linux/lib/crc32.c for license and changes
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _LINUX_CRC32_H
#define _LINUX_CRC32_H
@@ -76,29 +73,6 @@ static inline u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2)
return crc32_le_shift(crc1, len2) ^ crc2;
}
-u32 crc32c_shift(u32 crc, size_t len);
-
-/**
- * crc32c_combine - Combine two crc32c check values into one. For two sequences
- * of bytes, seq1 and seq2 with lengths len1 and len2, crc32c()
- * check values were calculated for each, crc1 and crc2.
- *
- * @crc1: crc32c of the first block
- * @crc2: crc32c of the second block
- * @len2: length of the second block
- *
- * Return: The crc32c() check value of seq1 and seq2 concatenated, requiring
- * only crc1, crc2, and len2. Note: If seq_full denotes the concatenated
- * memory area of seq1 with seq2, and crc_full the crc32c() value of
- * seq_full, then crc_full == crc32c_combine(crc1, crc2, len2) when
- * crc_full was seeded with the same initializer as crc1, and crc2 seed
- * was 0. See also crc_combine_test().
- */
-static inline u32 crc32c_combine(u32 crc1, u32 crc2, size_t len2)
-{
- return crc32c_shift(crc1, len2) ^ crc2;
-}
-
#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length)
/*
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 1e3809d28abd..b50f1954d1bb 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -14,8 +14,7 @@
#include <linux/completion.h>
#include <linux/errno.h>
-#include <linux/list.h>
-#include <linux/refcount.h>
+#include <linux/refcount_types.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -51,6 +50,15 @@
#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
/*
+ * Set if the algorithm data structure should be duplicated into
+ * kmalloc memory before registration. This is useful for hardware
+ * that can be disconnected at will. Do not use this if the data
+ * structure is embedded into a bigger one. Duplicate the overall
+ * data structure in the driver in that case.
+ */
+#define CRYPTO_ALG_DUP_FIRST 0x00000200
+
+/*
* Set if the algorithm has passed automated run-time testing. Note that
* if there is no run-time testing for a given algorithm it is considered
* to have passed.
@@ -125,8 +133,10 @@
*/
#define CRYPTO_ALG_FIPS_INTERNAL 0x00020000
-/* Set if the algorithm supports request chains and virtual addresses. */
-#define CRYPTO_ALG_REQ_CHAIN 0x00040000
+/* Set if the algorithm supports virtual addresses. */
+#define CRYPTO_ALG_REQ_VIRT 0x00040000
+
+/* The high bits 0xff000000 are reserved for type-specific flags. */
/*
* Transform masks and values (for crt_flags).
@@ -179,7 +189,6 @@ struct crypto_async_request {
struct crypto_tfm *tfm;
u32 flags;
- int err;
};
/**
@@ -278,6 +287,7 @@ struct cipher_alg {
* to the alignmask of the algorithm being used, in order to
* avoid the API having to realign them. Note: the alignmask is
* not supported for hash algorithms and is always 0 for them.
+ * @cra_reqsize: Size of the request context for this algorithm.
* @cra_priority: Priority of this transformation implementation. In case
* multiple transformations with same @cra_name are available to
* the Crypto API, the kernel will use the one with highest
@@ -302,17 +312,8 @@ struct cipher_alg {
* by @cra_type and @cra_flags above, the associated structure must be
* filled with callbacks. This field might be empty. This is the case
* for ahash, shash.
- * @cra_init: Initialize the cryptographic transformation object. This function
- * is used to initialize the cryptographic transformation object.
- * This function is called only once at the instantiation time, right
- * after the transformation context was allocated. In case the
- * cryptographic hardware has some special requirements which need to
- * be handled by software, this function shall check for the precise
- * requirement of the transformation and put any software fallbacks
- * in place.
- * @cra_exit: Deinitialize the cryptographic transformation object. This is a
- * counterpart to @cra_init, used to remove various changes set in
- * @cra_init.
+ * @cra_init: Deprecated, do not use.
+ * @cra_exit: Deprecated, do not use.
* @cra_u.cipher: Union member which contains a single-block symmetric cipher
* definition. See @struct @cipher_alg.
* @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
@@ -333,6 +334,7 @@ struct crypto_alg {
unsigned int cra_blocksize;
unsigned int cra_ctxsize;
unsigned int cra_alignmask;
+ unsigned int cra_reqsize;
int cra_priority;
refcount_t cra_refcnt;
@@ -409,9 +411,11 @@ struct crypto_tfm {
u32 crt_flags;
int node;
-
+
+ struct crypto_tfm *fb;
+
void (*exit)(struct crypto_tfm *tfm);
-
+
struct crypto_alg *__crt_alg;
void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
@@ -452,6 +456,11 @@ static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
return tfm->__crt_alg->cra_alignmask;
}
+static inline unsigned int crypto_tfm_alg_reqsize(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_reqsize;
+}
+
static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
{
return tfm->crt_flags;
@@ -473,22 +482,44 @@ static inline unsigned int crypto_tfm_ctx_alignment(void)
return __alignof__(tfm->__crt_ctx);
}
-static inline void crypto_reqchain_init(struct crypto_async_request *req)
+static inline bool crypto_tfm_is_async(struct crypto_tfm *tfm)
{
- req->err = -EINPROGRESS;
- INIT_LIST_HEAD(&req->list);
+ return tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
}
-static inline void crypto_request_chain(struct crypto_async_request *req,
- struct crypto_async_request *head)
+static inline bool crypto_req_on_stack(struct crypto_async_request *req)
{
- req->err = -EINPROGRESS;
- list_add_tail(&req->list, &head->list);
+ return req->flags & CRYPTO_TFM_REQ_ON_STACK;
}
-static inline bool crypto_tfm_is_async(struct crypto_tfm *tfm)
+static inline void crypto_request_set_callback(
+ struct crypto_async_request *req, u32 flags,
+ crypto_completion_t compl, void *data)
{
- return tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
+ u32 keep = CRYPTO_TFM_REQ_ON_STACK;
+
+ req->complete = compl;
+ req->data = data;
+ req->flags &= keep;
+ req->flags |= flags & ~keep;
+}
+
+static inline void crypto_request_set_tfm(struct crypto_async_request *req,
+ struct crypto_tfm *tfm)
+{
+ req->tfm = tfm;
+ req->flags &= ~CRYPTO_TFM_REQ_ON_STACK;
+}
+
+struct crypto_async_request *crypto_request_clone(
+ struct crypto_async_request *req, size_t total, gfp_t gfp);
+
+static inline void crypto_stack_request_init(struct crypto_async_request *req,
+ struct crypto_tfm *tfm)
+{
+ req->flags = 0;
+ crypto_request_set_tfm(req, tfm);
+ req->flags |= CRYPTO_TFM_REQ_ON_STACK;
}
#endif /* _LINUX_CRYPTO_H */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index e9f07e37dd6f..e29823c701ac 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -57,7 +57,8 @@ struct qstr {
};
#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
-#define QSTR(n) (struct qstr)QSTR_INIT(n, strlen(n))
+#define QSTR_LEN(n,l) (struct qstr)QSTR_INIT(n,l)
+#define QSTR(n) QSTR_LEN(n, strlen(n))
extern const struct qstr empty_name;
extern const struct qstr slash_name;
@@ -281,7 +282,6 @@ extern void d_exchange(struct dentry *, struct dentry *);
extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
extern struct dentry *d_lookup(const struct dentry *, const struct qstr *);
-extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *);
static inline unsigned d_count(const struct dentry *dentry)
{
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 325af611909f..0b61b8b996d4 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -2,79 +2,8 @@
#ifndef _LINUX_DCCP_H
#define _LINUX_DCCP_H
-
-#include <linux/in.h>
-#include <linux/interrupt.h>
-#include <linux/ktime.h>
-#include <linux/list.h>
-#include <linux/uio.h>
-#include <linux/workqueue.h>
-
-#include <net/inet_connection_sock.h>
-#include <net/inet_sock.h>
-#include <net/inet_timewait_sock.h>
-#include <net/tcp_states.h>
#include <uapi/linux/dccp.h>
-enum dccp_state {
- DCCP_OPEN = TCP_ESTABLISHED,
- DCCP_REQUESTING = TCP_SYN_SENT,
- DCCP_LISTEN = TCP_LISTEN,
- DCCP_RESPOND = TCP_SYN_RECV,
- /*
- * States involved in closing a DCCP connection:
- * 1) ACTIVE_CLOSEREQ is entered by a server sending a CloseReq.
- *
- * 2) CLOSING can have three different meanings (RFC 4340, 8.3):
- * a. Client has performed active-close, has sent a Close to the server
- * from state OPEN or PARTOPEN, and is waiting for the final Reset
- * (in this case, SOCK_DONE == 1).
- * b. Client is asked to perform passive-close, by receiving a CloseReq
- * in (PART)OPEN state. It sends a Close and waits for final Reset
- * (in this case, SOCK_DONE == 0).
- * c. Server performs an active-close as in (a), keeps TIMEWAIT state.
- *
- * 3) The following intermediate states are employed to give passively
- * closing nodes a chance to process their unread data:
- * - PASSIVE_CLOSE (from OPEN => CLOSED) and
- * - PASSIVE_CLOSEREQ (from (PART)OPEN to CLOSING; case (b) above).
- */
- DCCP_ACTIVE_CLOSEREQ = TCP_FIN_WAIT1,
- DCCP_PASSIVE_CLOSE = TCP_CLOSE_WAIT, /* any node receiving a Close */
- DCCP_CLOSING = TCP_CLOSING,
- DCCP_TIME_WAIT = TCP_TIME_WAIT,
- DCCP_CLOSED = TCP_CLOSE,
- DCCP_NEW_SYN_RECV = TCP_NEW_SYN_RECV,
- DCCP_PARTOPEN = TCP_MAX_STATES,
- DCCP_PASSIVE_CLOSEREQ, /* clients receiving CloseReq */
- DCCP_MAX_STATES
-};
-
-enum {
- DCCPF_OPEN = TCPF_ESTABLISHED,
- DCCPF_REQUESTING = TCPF_SYN_SENT,
- DCCPF_LISTEN = TCPF_LISTEN,
- DCCPF_RESPOND = TCPF_SYN_RECV,
- DCCPF_ACTIVE_CLOSEREQ = TCPF_FIN_WAIT1,
- DCCPF_CLOSING = TCPF_CLOSING,
- DCCPF_TIME_WAIT = TCPF_TIME_WAIT,
- DCCPF_CLOSED = TCPF_CLOSE,
- DCCPF_NEW_SYN_RECV = TCPF_NEW_SYN_RECV,
- DCCPF_PARTOPEN = (1 << DCCP_PARTOPEN),
-};
-
-static inline struct dccp_hdr *dccp_hdr(const struct sk_buff *skb)
-{
- return (struct dccp_hdr *)skb_transport_header(skb);
-}
-
-static inline struct dccp_hdr *dccp_zeroed_hdr(struct sk_buff *skb, int headlen)
-{
- skb_push(skb, headlen);
- skb_reset_transport_header(skb);
- return memset(skb_transport_header(skb), 0, headlen);
-}
-
static inline struct dccp_hdr_ext *dccp_hdrx(const struct dccp_hdr *dh)
{
return (struct dccp_hdr_ext *)((unsigned char *)dh + sizeof(*dh));
@@ -85,12 +14,6 @@ static inline unsigned int __dccp_basic_hdr_len(const struct dccp_hdr *dh)
return sizeof(*dh) + (dh->dccph_x ? sizeof(struct dccp_hdr_ext) : 0);
}
-static inline unsigned int dccp_basic_hdr_len(const struct sk_buff *skb)
-{
- const struct dccp_hdr *dh = dccp_hdr(skb);
- return __dccp_basic_hdr_len(dh);
-}
-
static inline __u64 dccp_hdr_seq(const struct dccp_hdr *dh)
{
__u64 seq_nr = ntohs(dh->dccph_seq);
@@ -103,222 +26,10 @@ static inline __u64 dccp_hdr_seq(const struct dccp_hdr *dh)
return seq_nr;
}
-static inline struct dccp_hdr_request *dccp_hdr_request(struct sk_buff *skb)
-{
- return (struct dccp_hdr_request *)(skb_transport_header(skb) +
- dccp_basic_hdr_len(skb));
-}
-
-static inline struct dccp_hdr_ack_bits *dccp_hdr_ack_bits(const struct sk_buff *skb)
-{
- return (struct dccp_hdr_ack_bits *)(skb_transport_header(skb) +
- dccp_basic_hdr_len(skb));
-}
-
-static inline u64 dccp_hdr_ack_seq(const struct sk_buff *skb)
-{
- const struct dccp_hdr_ack_bits *dhack = dccp_hdr_ack_bits(skb);
- return ((u64)ntohs(dhack->dccph_ack_nr_high) << 32) + ntohl(dhack->dccph_ack_nr_low);
-}
-
-static inline struct dccp_hdr_response *dccp_hdr_response(struct sk_buff *skb)
-{
- return (struct dccp_hdr_response *)(skb_transport_header(skb) +
- dccp_basic_hdr_len(skb));
-}
-
-static inline struct dccp_hdr_reset *dccp_hdr_reset(struct sk_buff *skb)
-{
- return (struct dccp_hdr_reset *)(skb_transport_header(skb) +
- dccp_basic_hdr_len(skb));
-}
-
static inline unsigned int __dccp_hdr_len(const struct dccp_hdr *dh)
{
return __dccp_basic_hdr_len(dh) +
dccp_packet_hdr_len(dh->dccph_type);
}
-static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
-{
- return __dccp_hdr_len(dccp_hdr(skb));
-}
-
-/**
- * struct dccp_request_sock - represent DCCP-specific connection request
- * @dreq_inet_rsk: structure inherited from
- * @dreq_iss: initial sequence number, sent on the first Response (RFC 4340, 7.1)
- * @dreq_gss: greatest sequence number sent (for retransmitted Responses)
- * @dreq_isr: initial sequence number received in the first Request
- * @dreq_gsr: greatest sequence number received (for retransmitted Request(s))
- * @dreq_service: service code present on the Request (there is just one)
- * @dreq_featneg: feature negotiation options for this connection
- * The following two fields are analogous to the ones in dccp_sock:
- * @dreq_timestamp_echo: last received timestamp to echo (13.1)
- * @dreq_timestamp_echo: the time of receiving the last @dreq_timestamp_echo
- */
-struct dccp_request_sock {
- struct inet_request_sock dreq_inet_rsk;
- __u64 dreq_iss;
- __u64 dreq_gss;
- __u64 dreq_isr;
- __u64 dreq_gsr;
- __be32 dreq_service;
- spinlock_t dreq_lock;
- struct list_head dreq_featneg;
- __u32 dreq_timestamp_echo;
- __u32 dreq_timestamp_time;
-};
-
-static inline struct dccp_request_sock *dccp_rsk(const struct request_sock *req)
-{
- return (struct dccp_request_sock *)req;
-}
-
-extern struct inet_timewait_death_row dccp_death_row;
-
-extern int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
- struct sk_buff *skb);
-
-struct dccp_options_received {
- u64 dccpor_ndp:48;
- u32 dccpor_timestamp;
- u32 dccpor_timestamp_echo;
- u32 dccpor_elapsed_time;
-};
-
-struct ccid;
-
-enum dccp_role {
- DCCP_ROLE_UNDEFINED,
- DCCP_ROLE_LISTEN,
- DCCP_ROLE_CLIENT,
- DCCP_ROLE_SERVER,
-};
-
-struct dccp_service_list {
- __u32 dccpsl_nr;
- __be32 dccpsl_list[];
-};
-
-#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1)
-#define DCCP_SERVICE_CODE_IS_ABSENT 0
-
-static inline bool dccp_list_has_service(const struct dccp_service_list *sl,
- const __be32 service)
-{
- if (likely(sl != NULL)) {
- u32 i = sl->dccpsl_nr;
- while (i--)
- if (sl->dccpsl_list[i] == service)
- return true;
- }
- return false;
-}
-
-struct dccp_ackvec;
-
-/**
- * struct dccp_sock - DCCP socket state
- *
- * @dccps_swl - sequence number window low
- * @dccps_swh - sequence number window high
- * @dccps_awl - acknowledgement number window low
- * @dccps_awh - acknowledgement number window high
- * @dccps_iss - initial sequence number sent
- * @dccps_isr - initial sequence number received
- * @dccps_osr - first OPEN sequence number received
- * @dccps_gss - greatest sequence number sent
- * @dccps_gsr - greatest valid sequence number received
- * @dccps_gar - greatest valid ack number received on a non-Sync; initialized to %dccps_iss
- * @dccps_service - first (passive sock) or unique (active sock) service code
- * @dccps_service_list - second .. last service code on passive socket
- * @dccps_timestamp_echo - latest timestamp received on a TIMESTAMP option
- * @dccps_timestamp_time - time of receiving latest @dccps_timestamp_echo
- * @dccps_l_ack_ratio - feature-local Ack Ratio
- * @dccps_r_ack_ratio - feature-remote Ack Ratio
- * @dccps_l_seq_win - local Sequence Window (influences ack number validity)
- * @dccps_r_seq_win - remote Sequence Window (influences seq number validity)
- * @dccps_pcslen - sender partial checksum coverage (via sockopt)
- * @dccps_pcrlen - receiver partial checksum coverage (via sockopt)
- * @dccps_send_ndp_count - local Send NDP Count feature (7.7.2)
- * @dccps_ndp_count - number of Non Data Packets since last data packet
- * @dccps_mss_cache - current value of MSS (path MTU minus header sizes)
- * @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4)
- * @dccps_featneg - tracks feature-negotiation state (mostly during handshake)
- * @dccps_hc_rx_ackvec - rx half connection ack vector
- * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection)
- * @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection)
- * @dccps_options_received - parsed set of retrieved options
- * @dccps_qpolicy - TX dequeueing policy, one of %dccp_packet_dequeueing_policy
- * @dccps_tx_qlen - maximum length of the TX queue
- * @dccps_role - role of this sock, one of %dccp_role
- * @dccps_hc_rx_insert_options - receiver wants to add options when acking
- * @dccps_hc_tx_insert_options - sender wants to add options when sending
- * @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3)
- * @dccps_sync_scheduled - flag which signals "send out-of-band message soon"
- * @dccps_xmitlet - tasklet scheduled by the TX CCID to dequeue data packets
- * @dccps_xmit_timer - used by the TX CCID to delay sending (rate-based pacing)
- * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs)
- */
-struct dccp_sock {
- /* inet_connection_sock has to be the first member of dccp_sock */
- struct inet_connection_sock dccps_inet_connection;
-#define dccps_syn_rtt dccps_inet_connection.icsk_ack.lrcvtime
- __u64 dccps_swl;
- __u64 dccps_swh;
- __u64 dccps_awl;
- __u64 dccps_awh;
- __u64 dccps_iss;
- __u64 dccps_isr;
- __u64 dccps_osr;
- __u64 dccps_gss;
- __u64 dccps_gsr;
- __u64 dccps_gar;
- __be32 dccps_service;
- __u32 dccps_mss_cache;
- struct dccp_service_list *dccps_service_list;
- __u32 dccps_timestamp_echo;
- __u32 dccps_timestamp_time;
- __u16 dccps_l_ack_ratio;
- __u16 dccps_r_ack_ratio;
- __u64 dccps_l_seq_win:48;
- __u64 dccps_r_seq_win:48;
- __u8 dccps_pcslen:4;
- __u8 dccps_pcrlen:4;
- __u8 dccps_send_ndp_count:1;
- __u64 dccps_ndp_count:48;
- unsigned long dccps_rate_last;
- struct list_head dccps_featneg;
- struct dccp_ackvec *dccps_hc_rx_ackvec;
- struct ccid *dccps_hc_rx_ccid;
- struct ccid *dccps_hc_tx_ccid;
- struct dccp_options_received dccps_options_received;
- __u8 dccps_qpolicy;
- __u32 dccps_tx_qlen;
- enum dccp_role dccps_role:2;
- __u8 dccps_hc_rx_insert_options:1;
- __u8 dccps_hc_tx_insert_options:1;
- __u8 dccps_server_timewait:1;
- __u8 dccps_sync_scheduled:1;
- struct tasklet_struct dccps_xmitlet;
- struct timer_list dccps_xmit_timer;
-};
-
-#define dccp_sk(ptr) container_of_const(ptr, struct dccp_sock, \
- dccps_inet_connection.icsk_inet.sk)
-
-static inline const char *dccp_role(const struct sock *sk)
-{
- switch (dccp_sk(sk)->dccps_role) {
- case DCCP_ROLE_UNDEFINED: return "undefined";
- case DCCP_ROLE_LISTEN: return "listen";
- case DCCP_ROLE_SERVER: return "server";
- case DCCP_ROLE_CLIENT: return "client";
- }
- return NULL;
-}
-
-extern void dccp_syn_ack_timeout(const struct request_sock *req);
-
#endif /* _LINUX_DCCP_H */
diff --git a/include/linux/device.h b/include/linux/device.h
index 79e49fe494b7..4940db137fff 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -281,44 +281,6 @@ int __must_check device_create_bin_file(struct device *dev,
void device_remove_bin_file(struct device *dev,
const struct bin_attribute *attr);
-/* allows to add/remove a custom action to devres stack */
-int devm_remove_action_nowarn(struct device *dev, void (*action)(void *), void *data);
-
-/**
- * devm_remove_action() - removes previously added custom action
- * @dev: Device that owns the action
- * @action: Function implementing the action
- * @data: Pointer to data passed to @action implementation
- *
- * Removes instance of @action previously added by devm_add_action().
- * Both action and data should match one of the existing entries.
- */
-static inline
-void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
-{
- WARN_ON(devm_remove_action_nowarn(dev, action, data));
-}
-
-void devm_release_action(struct device *dev, void (*action)(void *), void *data);
-
-int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name);
-#define devm_add_action(dev, action, data) \
- __devm_add_action(dev, action, data, #action)
-
-static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(void *),
- void *data, const char *name)
-{
- int ret;
-
- ret = __devm_add_action(dev, action, data, name);
- if (ret)
- action(data);
-
- return ret;
-}
-#define devm_add_action_or_reset(dev, action, data) \
- __devm_add_action_or_reset(dev, action, data, #action)
-
/**
* devm_alloc_percpu - Resource-managed alloc_percpu
* @dev: Device to allocate per-cpu memory for
diff --git a/include/linux/device/devres.h b/include/linux/device/devres.h
index 9b49f9915850..ae696d10faff 100644
--- a/include/linux/device/devres.h
+++ b/include/linux/device/devres.h
@@ -8,6 +8,7 @@
#include <linux/overflow.h>
#include <linux/stdarg.h>
#include <linux/types.h>
+#include <asm/bug.h>
struct device;
struct device_node;
@@ -126,4 +127,44 @@ void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int in
#endif
+/* allows to add/remove a custom action to devres stack */
+int devm_remove_action_nowarn(struct device *dev, void (*action)(void *), void *data);
+
+/**
+ * devm_remove_action() - removes previously added custom action
+ * @dev: Device that owns the action
+ * @action: Function implementing the action
+ * @data: Pointer to data passed to @action implementation
+ *
+ * Removes instance of @action previously added by devm_add_action().
+ * Both action and data should match one of the existing entries.
+ */
+static inline
+void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
+{
+ WARN_ON(devm_remove_action_nowarn(dev, action, data));
+}
+
+void devm_release_action(struct device *dev, void (*action)(void *), void *data);
+
+int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name);
+#define devm_add_action(dev, action, data) \
+ __devm_add_action(dev, action, data, #action)
+
+static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(void *),
+ void *data, const char *name)
+{
+ int ret;
+
+ ret = __devm_add_action(dev, action, data, name);
+ if (ret)
+ action(data);
+
+ return ret;
+}
+#define devm_add_action_or_reset(dev, action, data) \
+ __devm_add_action_or_reset(dev, action, data, #action)
+
+bool devm_is_action_added(struct device *dev, void (*action)(void *), void *data);
+
#endif /* _DEVICE_DEVRES_H_ */
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
index d02f32b7514e..0864773a57e8 100644
--- a/include/linux/device_cgroup.h
+++ b/include/linux/device_cgroup.h
@@ -18,15 +18,16 @@ static inline int devcgroup_inode_permission(struct inode *inode, int mask)
{
short type, access = 0;
+ if (likely(!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode)))
+ return 0;
+
if (likely(!inode->i_rdev))
return 0;
if (S_ISBLK(inode->i_mode))
type = DEVCG_DEV_BLOCK;
- else if (S_ISCHR(inode->i_mode))
+ else /* S_ISCHR by the test above */
type = DEVCG_DEV_CHAR;
- else
- return 0;
if (mask & MAY_WRITE)
access |= DEVCG_ACC_WRITE;
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 36216d28d8bd..d58e329ac0e7 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -35,15 +35,6 @@ struct dma_buf_attachment;
*/
struct dma_buf_ops {
/**
- * @cache_sgt_mapping:
- *
- * If true the framework will cache the first mapping made for each
- * attachment. This avoids creating mappings for attachments multiple
- * times.
- */
- bool cache_sgt_mapping;
-
- /**
* @attach:
*
* This is called from dma_buf_attach() to make sure that a given
@@ -370,10 +361,8 @@ struct dma_buf {
*/
struct module *owner;
-#if IS_ENABLED(CONFIG_DEBUG_FS)
/** @list_node: node for dma_buf accounting and debugging. */
struct list_head list_node;
-#endif
/** @priv: exporter specific private data for this buffer object. */
void *priv;
@@ -493,8 +482,6 @@ struct dma_buf_attach_ops {
* @dmabuf: buffer for this attachment.
* @dev: device attached to the buffer.
* @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
- * @sgt: cached mapping.
- * @dir: direction of cached mapping.
* @peer2peer: true if the importer can handle peer resources without pages.
* @priv: exporter specific attachment data.
* @importer_ops: importer operations for this attachment, if provided
@@ -514,8 +501,6 @@ struct dma_buf_attachment {
struct dma_buf *dmabuf;
struct device *dev;
struct list_head node;
- struct sg_table *sgt;
- enum dma_data_direction dir;
bool peer2peer;
const struct dma_buf_attach_ops *importer_ops;
void *importer_priv;
@@ -583,20 +568,6 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
return !!dmabuf->ops->pin;
}
-/**
- * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
- * mappings
- * @attach: the DMA-buf attachment to check
- *
- * Returns true if a DMA-buf importer wants to call the map/unmap functions with
- * the dma_resv lock held.
- */
-static inline bool
-dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
-{
- return !!attach->importer_ops;
-}
-
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev);
struct dma_buf_attachment *
@@ -636,4 +607,6 @@ int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map);
void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
+struct dma_buf *dma_buf_iter_begin(void);
+struct dma_buf *dma_buf_iter_next(struct dma_buf *dmbuf);
#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma-fence-unwrap.h b/include/linux/dma-fence-unwrap.h
index 66b1e56fbb81..62df222fe0f1 100644
--- a/include/linux/dma-fence-unwrap.h
+++ b/include/linux/dma-fence-unwrap.h
@@ -52,6 +52,8 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence **fences,
struct dma_fence_unwrap *cursors);
+int dma_fence_dedup_array(struct dma_fence **array, int num_fences);
+
/**
* dma_fence_unwrap_merge - unwrap and merge fences
*
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index e7ad819962e3..b12776883d14 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -169,8 +169,8 @@ struct dma_fence_ops {
* implementation know that there is another driver waiting on the
* signal (ie. hw->sw case).
*
- * This function can be called from atomic context, but not
- * from irq context, so normal spinlocks can be used.
+ * This is called with irq's disabled, so only spinlocks which disable
+ * IRQ's can be used in the code outside of this callback.
*
* A return value of false indicates the fence already passed,
* or some failure occurred that made it impossible to enable
@@ -239,27 +239,6 @@ struct dma_fence_ops {
void (*release)(struct dma_fence *fence);
/**
- * @fence_value_str:
- *
- * Callback to fill in free-form debug info specific to this fence, like
- * the sequence number.
- *
- * This callback is optional.
- */
- void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
-
- /**
- * @timeline_value_str:
- *
- * Fills in the current value of the timeline as a string, like the
- * sequence number. Note that the specific fence passed to this function
- * should not matter, drivers should only use it to look up the
- * corresponding timeline structures.
- */
- void (*timeline_value_str)(struct dma_fence *fence,
- char *str, int size);
-
- /**
* @set_deadline:
*
* Callback to allow a fence waiter to inform the fence signaler of
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index e172522cd936..f48e5fb88bd5 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -434,58 +434,4 @@ static inline void debug_dma_dump_mappings(struct device *dev)
#endif /* CONFIG_DMA_API_DEBUG */
extern const struct dma_map_ops dma_dummy_ops;
-
-enum pci_p2pdma_map_type {
- /*
- * PCI_P2PDMA_MAP_UNKNOWN: Used internally for indicating the mapping
- * type hasn't been calculated yet. Functions that return this enum
- * never return this value.
- */
- PCI_P2PDMA_MAP_UNKNOWN = 0,
-
- /*
- * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will
- * traverse the host bridge and the host bridge is not in the
- * allowlist. DMA Mapping routines should return an error when
- * this is returned.
- */
- PCI_P2PDMA_MAP_NOT_SUPPORTED,
-
- /*
- * PCI_P2PDMA_BUS_ADDR: Indicates that two devices can talk to
- * each other directly through a PCI switch and the transaction will
- * not traverse the host bridge. Such a mapping should program
- * the DMA engine with PCI bus addresses.
- */
- PCI_P2PDMA_MAP_BUS_ADDR,
-
- /*
- * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk
- * to each other, but the transaction traverses a host bridge on the
- * allowlist. In this case, a normal mapping either with CPU physical
- * addresses (in the case of dma-direct) or IOVA addresses (in the
- * case of IOMMUs) should be used to program the DMA engine.
- */
- PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
-};
-
-struct pci_p2pdma_map_state {
- struct dev_pagemap *pgmap;
- int map;
- u64 bus_off;
-};
-
-#ifdef CONFIG_PCI_P2PDMA
-enum pci_p2pdma_map_type
-pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
- struct scatterlist *sg);
-#else /* CONFIG_PCI_P2PDMA */
-static inline enum pci_p2pdma_map_type
-pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
- struct scatterlist *sg)
-{
- return PCI_P2PDMA_MAP_NOT_SUPPORTED;
-}
-#endif /* CONFIG_PCI_P2PDMA */
-
#endif /* _LINUX_DMA_MAP_OPS_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 85ab710ec0e7..55c03e5fe8cb 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -72,6 +72,22 @@
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+struct dma_iova_state {
+ dma_addr_t addr;
+ u64 __size;
+};
+
+/*
+ * Use the high bit to mark if we used swiotlb for one or more ranges.
+ */
+#define DMA_IOVA_USE_SWIOTLB (1ULL << 63)
+
+static inline size_t dma_iova_size(struct dma_iova_state *state)
+{
+ /* Casting is needed for 32-bits systems */
+ return (size_t)(state->__size & ~DMA_IOVA_USE_SWIOTLB);
+}
+
#ifdef CONFIG_DMA_API_DEBUG
void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
void debug_dma_map_single(struct device *dev, const void *addr,
@@ -277,6 +293,70 @@ static inline int dma_mmap_noncontiguous(struct device *dev,
}
#endif /* CONFIG_HAS_DMA */
+#ifdef CONFIG_IOMMU_DMA
+/**
+ * dma_use_iova - check if the IOVA API is used for this state
+ * @state: IOVA state
+ *
+ * Return %true if the DMA transfers uses the dma_iova_*() calls or %false if
+ * they can't be used.
+ */
+static inline bool dma_use_iova(struct dma_iova_state *state)
+{
+ return state->__size != 0;
+}
+
+bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
+ phys_addr_t phys, size_t size);
+void dma_iova_free(struct device *dev, struct dma_iova_state *state);
+void dma_iova_destroy(struct device *dev, struct dma_iova_state *state,
+ size_t mapped_len, enum dma_data_direction dir,
+ unsigned long attrs);
+int dma_iova_sync(struct device *dev, struct dma_iova_state *state,
+ size_t offset, size_t size);
+int dma_iova_link(struct device *dev, struct dma_iova_state *state,
+ phys_addr_t phys, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void dma_iova_unlink(struct device *dev, struct dma_iova_state *state,
+ size_t offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+#else /* CONFIG_IOMMU_DMA */
+static inline bool dma_use_iova(struct dma_iova_state *state)
+{
+ return false;
+}
+static inline bool dma_iova_try_alloc(struct device *dev,
+ struct dma_iova_state *state, phys_addr_t phys, size_t size)
+{
+ return false;
+}
+static inline void dma_iova_free(struct device *dev,
+ struct dma_iova_state *state)
+{
+}
+static inline void dma_iova_destroy(struct device *dev,
+ struct dma_iova_state *state, size_t mapped_len,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+}
+static inline int dma_iova_sync(struct device *dev,
+ struct dma_iova_state *state, size_t offset, size_t size)
+{
+ return -EOPNOTSUPP;
+}
+static inline int dma_iova_link(struct device *dev,
+ struct dma_iova_state *state, phys_addr_t phys, size_t offset,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ return -EOPNOTSUPP;
+}
+static inline void dma_iova_unlink(struct device *dev,
+ struct dma_iova_state *state, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+}
+#endif /* CONFIG_IOMMU_DMA */
+
#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir);
@@ -326,6 +406,7 @@ static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
{
return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false;
}
+bool dma_need_unmap(struct device *dev);
#else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
static inline bool dma_dev_need_sync(const struct device *dev)
{
@@ -351,6 +432,10 @@ static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
{
return false;
}
+static inline bool dma_need_unmap(struct device *dev)
+{
+ return false;
+}
#endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
struct page *dma_alloc_pages(struct device *dev, size_t size,
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index f632ecfb4238..06c4de602b2f 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -11,6 +11,7 @@
#ifndef LINUX_DMAPOOL_H
#define LINUX_DMAPOOL_H
+#include <linux/nodemask_types.h>
#include <linux/scatterlist.h>
#include <asm/io.h>
@@ -18,8 +19,8 @@ struct device;
#ifdef CONFIG_HAS_DMA
-struct dma_pool *dma_pool_create(const char *name, struct device *dev,
- size_t size, size_t align, size_t allocation);
+struct dma_pool *dma_pool_create_node(const char *name, struct device *dev,
+ size_t size, size_t align, size_t boundary, int node);
void dma_pool_destroy(struct dma_pool *pool);
@@ -35,9 +36,12 @@ struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
void dmam_pool_destroy(struct dma_pool *pool);
#else /* !CONFIG_HAS_DMA */
-static inline struct dma_pool *dma_pool_create(const char *name,
- struct device *dev, size_t size, size_t align, size_t allocation)
-{ return NULL; }
+static inline struct dma_pool *dma_pool_create_node(const char *name,
+ struct device *dev, size_t size, size_t align, size_t boundary,
+ int node)
+{
+ return NULL;
+}
static inline void dma_pool_destroy(struct dma_pool *pool) { }
static inline void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle) { return NULL; }
@@ -49,6 +53,13 @@ static inline struct dma_pool *dmam_pool_create(const char *name,
static inline void dmam_pool_destroy(struct dma_pool *pool) { }
#endif /* !CONFIG_HAS_DMA */
+static inline struct dma_pool *dma_pool_create(const char *name,
+ struct device *dev, size_t size, size_t align, size_t boundary)
+{
+ return dma_pool_create_node(name, dev, size, align, boundary,
+ NUMA_NO_NODE);
+}
+
static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle)
{
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index d8eabbf86a5b..7fa1eb3cc823 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -179,6 +179,7 @@ int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
int em_dev_update_chip_binning(struct device *dev);
int em_update_performance_limits(struct em_perf_domain *pd,
unsigned long freq_min_khz, unsigned long freq_max_khz);
+void em_adjust_cpu_capacity(unsigned int cpu);
void em_rebuild_sched_domains(void);
/**
@@ -403,6 +404,7 @@ int em_update_performance_limits(struct em_perf_domain *pd,
{
return -EINVAL;
}
+static inline void em_adjust_cpu_capacity(unsigned int cpu) {}
static inline void em_rebuild_sched_domains(void) {}
#endif
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index fc61d0205c97..f94f3fdf15fc 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -14,6 +14,7 @@
#include <linux/kmsan.h>
#include <asm/entry-common.h>
+#include <asm/syscall.h>
/*
* Define dummy _TIF work flags if not defined by the architecture or for
@@ -367,6 +368,15 @@ static __always_inline void exit_to_user_mode(void)
}
/**
+ * syscall_exit_work - Handle work before returning to user mode
+ * @regs: Pointer to current pt_regs
+ * @work: Current thread syscall work
+ *
+ * Do one-time syscall specific work.
+ */
+void syscall_exit_work(struct pt_regs *regs, unsigned long work);
+
+/**
* syscall_exit_to_user_mode_work - Handle work before returning to user mode
* @regs: Pointer to currents pt_regs
*
@@ -379,7 +389,30 @@ static __always_inline void exit_to_user_mode(void)
* make the final state transitions. Interrupts must stay disabled between
* return from this function and the invocation of exit_to_user_mode().
*/
-void syscall_exit_to_user_mode_work(struct pt_regs *regs);
+static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
+{
+ unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
+ unsigned long nr = syscall_get_nr(current, regs);
+
+ CT_WARN_ON(ct_state() != CT_STATE_KERNEL);
+
+ if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
+ if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
+ local_irq_enable();
+ }
+
+ rseq_syscall(regs);
+
+ /*
+ * Do one-time syscall specific work. If these work items are
+ * enabled, we want to run them exactly once per syscall exit with
+ * interrupts enabled.
+ */
+ if (unlikely(work & SYSCALL_WORK_EXIT))
+ syscall_exit_work(regs, work);
+ local_irq_disable_exit_to_user();
+ exit_to_user_mode_prepare(regs);
+}
/**
* syscall_exit_to_user_mode - Handle work before returning to user mode
@@ -410,7 +443,13 @@ void syscall_exit_to_user_mode_work(struct pt_regs *regs);
* exit_to_user_mode(). This function is preferred unless there is a
* compelling architectural reason to use the separate functions.
*/
-void syscall_exit_to_user_mode(struct pt_regs *regs);
+static __always_inline void syscall_exit_to_user_mode(struct pt_regs *regs)
+{
+ instrumentation_begin();
+ syscall_exit_to_user_mode_work(regs);
+ instrumentation_end();
+ exit_to_user_mode();
+}
/**
* irqentry_enter_from_user_mode - Establish state before invoking the irq handler
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 8210ece94fa6..5e0dd333ad1f 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -17,9 +17,14 @@
#include <linux/compat.h>
#include <linux/if_ether.h>
#include <linux/netlink.h>
+#include <linux/timer_types.h>
#include <uapi/linux/ethtool.h>
+#include <uapi/linux/ethtool_netlink_generated.h>
#include <uapi/linux/net_tstamp.h>
+#define ETHTOOL_MM_MAX_VERIFY_TIME_MS 128
+#define ETHTOOL_MM_MAX_VERIFY_RETRIES 3
+
struct compat_ethtool_rx_flow_spec {
u32 flow_type;
union ethtool_flow_union h_u;
@@ -718,6 +723,75 @@ struct ethtool_mm_stats {
u64 MACMergeHoldCount;
};
+enum ethtool_mmsv_event {
+ ETHTOOL_MMSV_LP_SENT_VERIFY_MPACKET,
+ ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET,
+ ETHTOOL_MMSV_LP_SENT_RESPONSE_MPACKET,
+};
+
+/* MAC Merge verification mPacket type */
+enum ethtool_mpacket {
+ ETHTOOL_MPACKET_VERIFY,
+ ETHTOOL_MPACKET_RESPONSE,
+};
+
+struct ethtool_mmsv;
+
+/**
+ * struct ethtool_mmsv_ops - Operations for MAC Merge Software Verification
+ * @configure_tx: Driver callback for the event where the preemptible TX
+ * becomes active or inactive. Preemptible traffic
+ * classes must be committed to hardware only while
+ * preemptible TX is active.
+ * @configure_pmac: Driver callback for the event where the pMAC state
+ * changes as result of an administrative setting
+ * (ethtool) or a call to ethtool_mmsv_link_state_handle().
+ * @send_mpacket: Driver-provided method for sending a Verify or a Response
+ * mPacket.
+ */
+struct ethtool_mmsv_ops {
+ void (*configure_tx)(struct ethtool_mmsv *mmsv, bool tx_active);
+ void (*configure_pmac)(struct ethtool_mmsv *mmsv, bool pmac_enabled);
+ void (*send_mpacket)(struct ethtool_mmsv *mmsv, enum ethtool_mpacket mpacket);
+};
+
+/**
+ * struct ethtool_mmsv - MAC Merge Software Verification
+ * @ops: operations for MAC Merge Software Verification
+ * @dev: pointer to net_device structure
+ * @lock: serialize access to MAC Merge state between
+ * ethtool requests and link state updates.
+ * @status: current verification FSM state
+ * @verify_timer: timer for verification in local TX direction
+ * @verify_enabled: indicates if verification is enabled
+ * @verify_retries: number of retries for verification
+ * @pmac_enabled: indicates if the preemptible MAC is enabled
+ * @verify_time: time for verification in milliseconds
+ * @tx_enabled: indicates if transmission is enabled
+ */
+struct ethtool_mmsv {
+ const struct ethtool_mmsv_ops *ops;
+ struct net_device *dev;
+ spinlock_t lock;
+ enum ethtool_mm_verify_status status;
+ struct timer_list verify_timer;
+ bool verify_enabled;
+ int verify_retries;
+ bool pmac_enabled;
+ u32 verify_time;
+ bool tx_enabled;
+};
+
+void ethtool_mmsv_stop(struct ethtool_mmsv *mmsv);
+void ethtool_mmsv_link_state_handle(struct ethtool_mmsv *mmsv, bool up);
+void ethtool_mmsv_event_handle(struct ethtool_mmsv *mmsv,
+ enum ethtool_mmsv_event event);
+void ethtool_mmsv_get_mm(struct ethtool_mmsv *mmsv,
+ struct ethtool_mm_state *state);
+void ethtool_mmsv_set_mm(struct ethtool_mmsv *mmsv, struct ethtool_mm_cfg *cfg);
+void ethtool_mmsv_init(struct ethtool_mmsv *mmsv, struct net_device *dev,
+ const struct ethtool_mmsv_ops *ops);
+
/**
* struct ethtool_rxfh_param - RXFH (RSS) parameters
* @hfunc: Defines the current RSS hash function used by HW (or to be set to).
@@ -757,6 +831,8 @@ struct ethtool_rxfh_param {
* @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags
* @phc_index: device index of the associated PHC, or -1 if there is none
* @phc_qualifier: qualifier of the associated PHC
+ * @phc_source: source device of the associated PHC
+ * @phc_phyindex: index of PHY device source of the associated PHC
* @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values
* @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values
*/
@@ -765,6 +841,8 @@ struct kernel_ethtool_ts_info {
u32 so_timestamping;
int phc_index;
enum hwtstamp_provider_qualifier phc_qualifier;
+ enum hwtstamp_source phc_source;
+ int phc_phyindex;
enum hwtstamp_tx_types tx_types;
enum hwtstamp_rx_filters rx_filters;
};
@@ -926,10 +1004,11 @@ struct kernel_ethtool_ts_info {
* @get_ts_info: Get the time stamping and PTP hardware clock capabilities.
* It may be called with RCU, or rtnl or reference on the device.
* Drivers supporting transmit time stamps in software should set this to
- * ethtool_op_get_ts_info(). Drivers must not zero statistics which they
- * don't report. The stats structure is initialized to ETHTOOL_STAT_NOT_SET
- * indicating driver does not report statistics.
- * @get_ts_stats: Query the device hardware timestamping statistics.
+ * ethtool_op_get_ts_info().
+ * @get_ts_stats: Query the device hardware timestamping statistics. Drivers
+ * must not zero statistics which they don't report. The stats structure
+ * is initialized to ETHTOOL_STAT_NOT_SET indicating driver does not
+ * report statistics.
* @get_module_info: Get the size and type of the eeprom contained within
* a plug-in module.
* @get_module_eeprom: Get the eeprom information from the plug-in module
@@ -1329,6 +1408,17 @@ extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...);
*/
extern void ethtool_puts(u8 **data, const char *str);
+/**
+ * ethtool_cpy - Write possibly-not-NUL-terminated string to ethtool string data
+ * @data: Pointer to a pointer to the start of string to write into
+ * @str: NUL-byte padded char array of size ETH_GSTRING_LEN to copy from
+ */
+#define ethtool_cpy(data, str) do { \
+ BUILD_BUG_ON(sizeof(str) != ETH_GSTRING_LEN); \
+ memcpy(*(data), str, ETH_GSTRING_LEN); \
+ *(data) += ETH_GSTRING_LEN; \
+} while (0)
+
/* Link mode to forced speed capabilities maps */
struct ethtool_forced_speed_map {
u32 speed;
diff --git a/include/linux/execmem.h b/include/linux/execmem.h
index 65655a5d1be2..ca42d5e46ccc 100644
--- a/include/linux/execmem.h
+++ b/include/linux/execmem.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/moduleloader.h>
+#include <linux/cleanup.h>
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
!defined(CONFIG_KASAN_VMALLOC)
@@ -53,7 +54,7 @@ enum execmem_range_flags {
EXECMEM_ROX_CACHE = (1 << 1),
};
-#ifdef CONFIG_ARCH_HAS_EXECMEM_ROX
+#if defined(CONFIG_ARCH_HAS_EXECMEM_ROX) && defined(CONFIG_EXECMEM)
/**
* execmem_fill_trapping_insns - set memory to contain instructions that
* will trap
@@ -93,9 +94,15 @@ int execmem_make_temp_rw(void *ptr, size_t size);
* Return: 0 on success or negative error code on failure.
*/
int execmem_restore_rox(void *ptr, size_t size);
+
+/*
+ * Called from mark_readonly(), where the system transitions to ROX.
+ */
+void execmem_cache_make_ro(void);
#else
static inline int execmem_make_temp_rw(void *ptr, size_t size) { return 0; }
static inline int execmem_restore_rox(void *ptr, size_t size) { return 0; }
+static inline void execmem_cache_make_ro(void) { }
#endif
/**
@@ -170,6 +177,8 @@ void *execmem_alloc(enum execmem_type type, size_t size);
*/
void execmem_free(void *ptr);
+DEFINE_FREE(execmem, void *, if (_T) execmem_free(_T));
+
#ifdef CONFIG_MMU
/**
* execmem_vmap - create virtual mapping for EXECMEM_MODULE_DATA memory
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index 3c817dc6292e..879cff5eccd4 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -38,8 +38,7 @@
FAN_REPORT_PIDFD | \
FAN_REPORT_FD_ERROR | \
FAN_UNLIMITED_QUEUE | \
- FAN_UNLIMITED_MARKS | \
- FAN_REPORT_MNT)
+ FAN_UNLIMITED_MARKS)
/*
* fanotify_init() flags that are allowed for user without CAP_SYS_ADMIN.
@@ -48,7 +47,7 @@
* so one of the flags for reporting file handles is required.
*/
#define FANOTIFY_USER_INIT_FLAGS (FAN_CLASS_NOTIF | \
- FANOTIFY_FID_BITS | \
+ FANOTIFY_FID_BITS | FAN_REPORT_MNT | \
FAN_CLOEXEC | FAN_NONBLOCK)
#define FANOTIFY_INIT_FLAGS (FANOTIFY_ADMIN_INIT_FLAGS | \
diff --git a/include/linux/file.h b/include/linux/file.h
index 302f11355b10..af1768d934a0 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -59,7 +59,7 @@ static inline struct fd CLONED_FD(struct file *f)
static inline void fdput(struct fd fd)
{
- if (fd.word & FDPUT_FPUT)
+ if (unlikely(fd.word & FDPUT_FPUT))
fput(fd_file(fd));
}
diff --git a/include/linux/find.h b/include/linux/find.h
index 68685714bc18..5a2c267ea7f9 100644
--- a/include/linux/find.h
+++ b/include/linux/find.h
@@ -29,6 +29,8 @@ unsigned long __find_nth_and_andnot_bit(const unsigned long *addr1, const unsign
unsigned long n);
extern unsigned long _find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size);
+unsigned long _find_first_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size);
unsigned long _find_first_and_and_bit(const unsigned long *addr1, const unsigned long *addr2,
const unsigned long *addr3, unsigned long size);
extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
@@ -348,6 +350,29 @@ unsigned long find_first_and_bit(const unsigned long *addr1,
#endif
/**
+ * find_first_andnot_bit - find the first bit set in 1st memory region and unset in 2nd
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the first set bit
+ * If no bits are set, returns >= @size.
+ */
+static __always_inline
+unsigned long find_first_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & (~*addr2) & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_andnot_bit(addr1, addr2, size);
+}
+
+/**
* find_first_and_and_bit - find the first set bit in 3 memory regions
* @addr1: The first address to base the search on
* @addr2: The second address to base the search on
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 016b0fe1536e..0db87f8e676c 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -408,6 +408,7 @@ struct kiocb {
void *private;
int ki_flags;
u16 ki_ioprio; /* See linux/ioprio.h */
+ u8 ki_write_stream;
union {
/*
* Only used for async buffered reads, where it denotes the
@@ -433,7 +434,6 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
}
struct address_space_operations {
- int (*writepage)(struct page *page, struct writeback_control *wbc);
int (*read_folio)(struct file *, struct folio *);
/* Write back some dirty pages from this mapping. */
@@ -867,6 +867,11 @@ static inline void inode_lock(struct inode *inode)
down_write(&inode->i_rwsem);
}
+static inline __must_check int inode_lock_killable(struct inode *inode)
+{
+ return down_write_killable(&inode->i_rwsem);
+}
+
static inline void inode_unlock(struct inode *inode)
{
up_write(&inode->i_rwsem);
@@ -877,6 +882,11 @@ static inline void inode_lock_shared(struct inode *inode)
down_read(&inode->i_rwsem);
}
+static inline __must_check int inode_lock_shared_killable(struct inode *inode)
+{
+ return down_read_killable(&inode->i_rwsem);
+}
+
static inline void inode_unlock_shared(struct inode *inode)
{
up_read(&inode->i_rwsem);
@@ -1307,6 +1317,7 @@ struct sb_writers {
unsigned short frozen; /* Is sb frozen? */
int freeze_kcount; /* How many kernel freeze requests? */
int freeze_ucount; /* How many userspace freeze requests? */
+ const void *freeze_owner; /* Owner of the freeze */
struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
};
@@ -1780,7 +1791,7 @@ static inline void __sb_end_write(struct super_block *sb, int level)
static inline void __sb_start_write(struct super_block *sb, int level)
{
- percpu_down_read(sb->s_writers.rw_sem + level - 1);
+ percpu_down_read_freezable(sb->s_writers.rw_sem + level - 1, true);
}
static inline bool __sb_start_write_trylock(struct super_block *sb, int level)
@@ -2071,8 +2082,18 @@ typedef bool (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
struct dir_context {
filldir_t actor;
loff_t pos;
+ /*
+ * Filesystems MUST NOT MODIFY count, but may use as a hint:
+ * 0 unknown
+ * > 0 space in buffer (assume at least one entry)
+ * INT_MAX unlimited
+ */
+ int count;
};
+/* If OR-ed with d_type, pending signals are not checked */
+#define FILLDIR_FLAG_NOINTR 0x1000
+
/*
* These flags let !MMU mmap() govern direct device mapping vs immediate
* copying more easily for MAP_PRIVATE, especially for ROM filesystems.
@@ -2186,7 +2207,7 @@ struct file_operations {
/* Supports asynchronous lock callbacks */
#define FOP_ASYNC_LOCK ((__force fop_flags_t)(1 << 6))
/* File system supports uncached read/write buffered IO */
-#define FOP_DONTCACHE ((__force fop_flags_t)(1 << 7))
+#define FOP_DONTCACHE 0 /* ((__force fop_flags_t)(1 << 7)) */
/* Wrap a directory iterator that needs exclusive inode access */
int wrap_directory_iterator(struct file *, struct dir_context *,
@@ -2269,6 +2290,7 @@ extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
* @FREEZE_HOLDER_KERNEL: kernel wants to freeze or thaw filesystem
* @FREEZE_HOLDER_USERSPACE: userspace wants to freeze or thaw filesystem
* @FREEZE_MAY_NEST: whether nesting freeze and thaw requests is allowed
+ * @FREEZE_EXCL: a freeze that can only be undone by the owner
*
* Indicate who the owner of the freeze or thaw request is and whether
* the freeze needs to be exclusive or can nest.
@@ -2282,6 +2304,7 @@ enum freeze_holder {
FREEZE_HOLDER_KERNEL = (1U << 0),
FREEZE_HOLDER_USERSPACE = (1U << 1),
FREEZE_MAY_NEST = (1U << 2),
+ FREEZE_EXCL = (1U << 3),
};
struct super_operations {
@@ -2295,9 +2318,9 @@ struct super_operations {
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
- int (*freeze_super) (struct super_block *, enum freeze_holder who);
+ int (*freeze_super) (struct super_block *, enum freeze_holder who, const void *owner);
int (*freeze_fs) (struct super_block *);
- int (*thaw_super) (struct super_block *, enum freeze_holder who);
+ int (*thaw_super) (struct super_block *, enum freeze_holder who, const void *owner);
int (*unfreeze_fs) (struct super_block *);
int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
@@ -2344,6 +2367,7 @@ struct super_operations {
#define S_CASEFOLD (1 << 15) /* Casefolded file */
#define S_VERITY (1 << 16) /* Verity file (using fs/verity/) */
#define S_KERNEL_FILE (1 << 17) /* File is in use by the kernel (eg. fs/cachefiles) */
+#define S_ANON_INODE (1 << 19) /* Inode is an anonymous inode */
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
@@ -2400,6 +2424,7 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags
#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
(inode)->i_rdev == WHITEOUT_DEV)
+#define IS_ANON_FILE(inode) ((inode)->i_flags & S_ANON_INODE)
static inline bool HAS_UNMAPPED_ID(struct mnt_idmap *idmap,
struct inode *inode)
@@ -2705,8 +2730,10 @@ extern int unregister_filesystem(struct file_system_type *);
extern int vfs_statfs(const struct path *, struct kstatfs *);
extern int user_statfs(const char __user *, struct kstatfs *);
extern int fd_statfs(int, struct kstatfs *);
-int freeze_super(struct super_block *super, enum freeze_holder who);
-int thaw_super(struct super_block *super, enum freeze_holder who);
+int freeze_super(struct super_block *super, enum freeze_holder who,
+ const void *freeze_owner);
+int thaw_super(struct super_block *super, enum freeze_holder who,
+ const void *freeze_owner);
extern __printf(2, 3)
int super_setup_bdi_name(struct super_block *sb, char *fmt, ...);
extern int super_setup_bdi(struct super_block *sb);
@@ -3475,7 +3502,8 @@ void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *);
void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
void generic_fill_statx_atomic_writes(struct kstat *stat,
unsigned int unit_min,
- unsigned int unit_max);
+ unsigned int unit_max,
+ unsigned int unit_max_opt);
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
void __inode_add_bytes(struct inode *inode, loff_t bytes);
@@ -3515,9 +3543,11 @@ extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
extern void drop_super(struct super_block *sb);
extern void drop_super_exclusive(struct super_block *sb);
-extern void iterate_supers(void (*)(struct super_block *, void *), void *);
+extern void iterate_supers(void (*f)(struct super_block *, void *), void *arg);
extern void iterate_supers_type(struct file_system_type *,
void (*)(struct super_block *, void *), void *);
+void filesystems_freeze(void);
+void filesystems_thaw(void);
extern int dcache_dir_open(struct inode *, struct file *);
extern int dcache_dir_close(struct inode *, struct file *);
diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h
index 53e566efd5fd..5a0e897cae80 100644
--- a/include/linux/fs_parser.h
+++ b/include/linux/fs_parser.h
@@ -87,14 +87,9 @@ extern int lookup_constant(const struct constant_table tbl[], const char *name,
extern const struct constant_table bool_names[];
#ifdef CONFIG_VALIDATE_FS_PARSER
-extern bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
- int low, int high, int special);
extern bool fs_validate_description(const char *name,
const struct fs_parameter_spec *desc);
#else
-static inline bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
- int low, int high, int special)
-{ return true; }
static inline bool fs_validate_description(const char *name,
const struct fs_parameter_spec *desc)
{ return true; }
@@ -125,8 +120,6 @@ static inline bool fs_validate_description(const char *name,
#define fsparam_u32(NAME, OPT) __fsparam(fs_param_is_u32, NAME, OPT, 0, NULL)
#define fsparam_u32oct(NAME, OPT) \
__fsparam(fs_param_is_u32, NAME, OPT, 0, (void *)8)
-#define fsparam_u32hex(NAME, OPT) \
- __fsparam(fs_param_is_u32_hex, NAME, OPT, 0, (void *)16)
#define fsparam_s32(NAME, OPT) __fsparam(fs_param_is_s32, NAME, OPT, 0, NULL)
#define fsparam_u64(NAME, OPT) __fsparam(fs_param_is_u64, NAME, OPT, 0, NULL)
#define fsparam_enum(NAME, OPT, array) __fsparam(fs_param_is_enum, NAME, OPT, 0, array)
diff --git a/include/linux/fsl/ntmp.h b/include/linux/fsl/ntmp.h
new file mode 100644
index 000000000000..916dc4fe7de3
--- /dev/null
+++ b/include/linux/fsl/ntmp.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2025 NXP */
+#ifndef __NETC_NTMP_H
+#define __NETC_NTMP_H
+
+#include <linux/bitops.h>
+#include <linux/if_ether.h>
+
+struct maft_keye_data {
+ u8 mac_addr[ETH_ALEN];
+ __le16 resv;
+};
+
+struct maft_cfge_data {
+ __le16 si_bitmap;
+ __le16 resv;
+};
+
+struct netc_cbdr_regs {
+ void __iomem *pir;
+ void __iomem *cir;
+ void __iomem *mr;
+
+ void __iomem *bar0;
+ void __iomem *bar1;
+ void __iomem *lenr;
+};
+
+struct netc_tbl_vers {
+ u8 maft_ver;
+ u8 rsst_ver;
+};
+
+struct netc_cbdr {
+ struct device *dev;
+ struct netc_cbdr_regs regs;
+
+ int bd_num;
+ int next_to_use;
+ int next_to_clean;
+
+ int dma_size;
+ void *addr_base;
+ void *addr_base_align;
+ dma_addr_t dma_base;
+ dma_addr_t dma_base_align;
+
+ /* Serialize the order of command BD ring */
+ spinlock_t ring_lock;
+};
+
+struct ntmp_user {
+ int cbdr_num; /* number of control BD ring */
+ struct device *dev;
+ struct netc_cbdr *ring;
+ struct netc_tbl_vers tbl;
+};
+
+struct maft_entry_data {
+ struct maft_keye_data keye;
+ struct maft_cfge_data cfge;
+};
+
+#if IS_ENABLED(CONFIG_NXP_NETC_LIB)
+int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
+ const struct netc_cbdr_regs *regs);
+void ntmp_free_cbdr(struct netc_cbdr *cbdr);
+
+/* NTMP APIs */
+int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft);
+int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft);
+int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id);
+int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table,
+ int count);
+int ntmp_rsst_query_entry(struct ntmp_user *user,
+ u32 *table, int count);
+#else
+static inline int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
+ const struct netc_cbdr_regs *regs)
+{
+ return 0;
+}
+
+static inline void ntmp_free_cbdr(struct netc_cbdr *cbdr)
+{
+}
+
+static inline int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft)
+{
+ return 0;
+}
+
+static inline int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft)
+{
+ return 0;
+}
+
+static inline int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id)
+{
+ return 0;
+}
+
+static inline int ntmp_rsst_update_entry(struct ntmp_user *user,
+ const u32 *table, int count)
+{
+ return 0;
+}
+
+static inline int ntmp_rsst_query_entry(struct ntmp_user *user,
+ u32 *table, int count)
+{
+ return 0;
+}
+
+#endif
+
+#endif
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index fc27b53c58c2..d4034ddaf392 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -250,6 +250,7 @@ struct fsnotify_group {
* full */
struct mem_cgroup *memcg; /* memcg to charge allocations */
+ struct user_namespace *user_ns; /* user ns where group was created */
/* groups can define private fields here or use the void *private */
union {
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index fbabc3d848b3..a4d3816d252a 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -328,6 +328,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* DIRECT - Used by the direct ftrace_ops helper for direct functions
* (internal ftrace only, should not be used by others)
* SUBOP - Is controlled by another op in field managed.
+ * GRAPH - Is a component of the fgraph_ops structure
*/
enum {
FTRACE_OPS_FL_ENABLED = BIT(0),
@@ -349,6 +350,7 @@ enum {
FTRACE_OPS_FL_PERMANENT = BIT(16),
FTRACE_OPS_FL_DIRECT = BIT(17),
FTRACE_OPS_FL_SUBOP = BIT(18),
+ FTRACE_OPS_FL_GRAPH = BIT(19),
};
#ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
@@ -569,8 +571,6 @@ static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
#ifdef CONFIG_STACK_TRACER
-extern int stack_tracer_enabled;
-
int stack_trace_sysctl(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
@@ -1298,16 +1298,9 @@ static inline void unpause_graph_tracing(void) { }
#ifdef CONFIG_TRACING
enum ftrace_dump_mode;
-#define MAX_TRACER_SIZE 100
-extern char ftrace_dump_on_oops[];
extern int ftrace_dump_on_oops_enabled(void);
-extern int tracepoint_printk;
extern void disable_trace_on_warning(void);
-extern int __disable_trace_on_warning;
-
-int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
#else /* CONFIG_TRACING */
static inline void disable_trace_on_warning(void) { }
diff --git a/include/linux/futex.h b/include/linux/futex.h
index b70df27d7e85..005b040c4791 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -4,11 +4,11 @@
#include <linux/sched.h>
#include <linux/ktime.h>
+#include <linux/mm_types.h>
#include <uapi/linux/futex.h>
struct inode;
-struct mm_struct;
struct task_struct;
/*
@@ -34,6 +34,7 @@ union futex_key {
u64 i_seq;
unsigned long pgoff;
unsigned int offset;
+ /* unsigned int node; */
} shared;
struct {
union {
@@ -42,11 +43,13 @@ union futex_key {
};
unsigned long address;
unsigned int offset;
+ /* unsigned int node; */
} private;
struct {
u64 ptr;
unsigned long word;
unsigned int offset;
+ unsigned int node; /* NOT hashed! */
} both;
};
@@ -77,7 +80,25 @@ void futex_exec_release(struct task_struct *tsk);
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
u32 __user *uaddr2, u32 val2, u32 val3);
-#else
+int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4);
+
+#ifdef CONFIG_FUTEX_PRIVATE_HASH
+int futex_hash_allocate_default(void);
+void futex_hash_free(struct mm_struct *mm);
+
+static inline void futex_mm_init(struct mm_struct *mm)
+{
+ RCU_INIT_POINTER(mm->futex_phash, NULL);
+ mutex_init(&mm->futex_hash_lock);
+}
+
+#else /* !CONFIG_FUTEX_PRIVATE_HASH */
+static inline int futex_hash_allocate_default(void) { return 0; }
+static inline void futex_hash_free(struct mm_struct *mm) { }
+static inline void futex_mm_init(struct mm_struct *mm) { }
+#endif /* CONFIG_FUTEX_PRIVATE_HASH */
+
+#else /* !CONFIG_FUTEX */
static inline void futex_init_task(struct task_struct *tsk) { }
static inline void futex_exit_recursive(struct task_struct *tsk) { }
static inline void futex_exit_release(struct task_struct *tsk) { }
@@ -88,6 +109,17 @@ static inline long do_futex(u32 __user *uaddr, int op, u32 val,
{
return -EINVAL;
}
+static inline int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4)
+{
+ return -EINVAL;
+}
+static inline int futex_hash_allocate_default(void)
+{
+ return 0;
+}
+static inline void futex_hash_free(struct mm_struct *mm) { }
+static inline void futex_mm_init(struct mm_struct *mm) { }
+
#endif
#endif
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 8adc8e9cb4a7..f0b1982da0cc 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -181,6 +181,8 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
enum gpiod_flags flags,
const char *label);
+bool gpiod_is_equal(struct gpio_desc *desc, struct gpio_desc *other);
+
#else /* CONFIG_GPIOLIB */
#include <linux/bug.h>
@@ -548,6 +550,13 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
return ERR_PTR(-ENOSYS);
}
+static inline bool
+gpiod_is_equal(struct gpio_desc *desc, struct gpio_desc *other)
+{
+ WARN_ON(desc || other);
+ return false;
+}
+
#endif /* CONFIG_GPIOLIB */
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_HTE)
@@ -588,7 +597,7 @@ struct gpio_desc *devm_fwnode_gpiod_get(struct device *dev,
struct acpi_gpio_params {
unsigned int crs_entry_index;
- unsigned int line_index;
+ unsigned short line_index;
bool active_low;
};
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 4c0294a9104d..b53233051bee 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -287,8 +287,9 @@ struct gpio_irq_chip {
/**
* @first:
*
- * Required for static IRQ allocation. If set, irq_domain_add_simple()
- * will allocate and map all IRQs during initialization.
+ * Required for static IRQ allocation. If set,
+ * irq_domain_create_simple() will allocate and map all IRQs
+ * during initialization.
*/
unsigned int first;
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
index dd100e849f5e..9a7683d79a4b 100644
--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -73,6 +73,14 @@ static inline void *kmap_local_page(struct page *page)
return __kmap_local_page_prot(page, kmap_prot);
}
+static inline void *kmap_local_page_try_from_panic(struct page *page)
+{
+ if (!PageHighMem(page))
+ return page_address(page);
+ /* If the page is in HighMem, it's not safe to kmap it.*/
+ return NULL;
+}
+
static inline void *kmap_local_folio(struct folio *folio, size_t offset)
{
struct page *page = folio_page(folio, offset / PAGE_SIZE);
@@ -180,6 +188,11 @@ static inline void *kmap_local_page(struct page *page)
return page_address(page);
}
+static inline void *kmap_local_page_try_from_panic(struct page *page)
+{
+ return page_address(page);
+}
+
static inline void *kmap_local_folio(struct folio *folio, size_t offset)
{
return page_address(&folio->page) + offset;
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 5c6bea81a90e..c698f8415675 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -461,7 +461,7 @@ static inline void memcpy_from_folio(char *to, struct folio *folio,
const char *from = kmap_local_folio(folio, offset);
size_t chunk = len;
- if (folio_test_highmem(folio) &&
+ if (folio_test_partial_kmap(folio) &&
chunk > PAGE_SIZE - offset_in_page(offset))
chunk = PAGE_SIZE - offset_in_page(offset);
memcpy(to, from, chunk);
@@ -489,7 +489,7 @@ static inline void memcpy_to_folio(struct folio *folio, size_t offset,
char *to = kmap_local_folio(folio, offset);
size_t chunk = len;
- if (folio_test_highmem(folio) &&
+ if (folio_test_partial_kmap(folio) &&
chunk > PAGE_SIZE - offset_in_page(offset))
chunk = PAGE_SIZE - offset_in_page(offset);
memcpy(to, from, chunk);
@@ -522,7 +522,7 @@ static inline __must_check void *folio_zero_tail(struct folio *folio,
{
size_t len = folio_size(folio) - offset;
- if (folio_test_highmem(folio)) {
+ if (folio_test_partial_kmap(folio)) {
size_t max = PAGE_SIZE - offset_in_page(offset);
while (len > max) {
@@ -560,7 +560,7 @@ static inline void folio_fill_tail(struct folio *folio, size_t offset,
VM_BUG_ON(offset + len > folio_size(folio));
- if (folio_test_highmem(folio)) {
+ if (folio_test_partial_kmap(folio)) {
size_t max = PAGE_SIZE - offset_in_page(offset);
while (len > max) {
@@ -597,7 +597,7 @@ static inline size_t memcpy_from_file_folio(char *to, struct folio *folio,
size_t offset = offset_in_folio(folio, pos);
char *from = kmap_local_folio(folio, offset);
- if (folio_test_highmem(folio)) {
+ if (folio_test_partial_kmap(folio)) {
offset = offset_in_page(offset);
len = min_t(size_t, len, PAGE_SIZE - offset);
} else
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 8f3ac832ee7f..4861a7e304bb 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -275,6 +275,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
bool is_hugetlb_entry_migration(pte_t pte);
bool is_hugetlb_entry_hwpoisoned(pte_t pte);
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
+void fixup_hugetlb_reservations(struct vm_area_struct *vma);
#else /* !CONFIG_HUGETLB_PAGE */
@@ -468,6 +469,10 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
+static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
+{
+}
+
#endif /* !CONFIG_HUGETLB_PAGE */
#ifndef pgd_write
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index d6ffe01962c2..b52ac40d5830 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1167,13 +1167,6 @@ extern int vmbus_sendpacket(struct vmbus_channel *channel,
enum vmbus_packet_type type,
u32 flags);
-extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
- struct hv_page_buffer pagebuffers[],
- u32 pagecount,
- void *buffer,
- u32 bufferlen,
- u64 requestid);
-
extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
struct vmbus_packet_mpb_array *mpb,
u32 desc_size,
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 457b4fba88bd..420c7f9aa6ee 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -2325,6 +2325,7 @@ struct ieee80211_eht_cap_elem {
#define IEEE80211_EHT_OPER_EHT_DEF_PE_DURATION 0x04
#define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_LIMIT 0x08
#define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_EXP_MASK 0x30
+#define IEEE80211_EHT_OPER_MCS15_DISABLE 0x40
/**
* struct ieee80211_eht_operation - eht operation element
@@ -4087,6 +4088,9 @@ enum ieee80211_tdls_actioncode {
/* Defines support for enhanced multi-bssid advertisement*/
#define WLAN_EXT_CAPA11_EMA_SUPPORT BIT(3)
+/* Enable Beacon Protection */
+#define WLAN_EXT_CAPA11_BCN_PROTECT BIT(4)
+
/* TDLS specific payload type in the LLC/SNAP header */
#define WLAN_TDLS_SNAP_RFTYPE 0x2
@@ -5615,6 +5619,80 @@ static inline bool ieee80211_tid_to_link_map_size_ok(const u8 *data, size_t len)
return len >= fixed + elem_len;
}
+/**
+ * ieee80211_emlsr_pad_delay_in_us - Fetch the EMLSR Padding delay
+ * in microseconds
+ * @eml_cap: EML capabilities field value from common info field of
+ * the Multi-link element
+ * Return: the EMLSR Padding delay (in microseconds) encoded in the
+ * EML Capabilities field
+ */
+
+static inline u32 ieee80211_emlsr_pad_delay_in_us(u16 eml_cap)
+{
+ /* IEEE Std 802.11be-2024 Table 9-417i—Encoding of the EMLSR
+ * Padding Delay subfield.
+ */
+ u32 pad_delay = u16_get_bits(eml_cap,
+ IEEE80211_EML_CAP_EMLSR_PADDING_DELAY);
+
+ if (!pad_delay ||
+ pad_delay > IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US)
+ return 0;
+
+ return 32 * (1 << (pad_delay - 1));
+}
+
+/**
+ * ieee80211_emlsr_trans_delay_in_us - Fetch the EMLSR Transition
+ * delay in microseconds
+ * @eml_cap: EML capabilities field value from common info field of
+ * the Multi-link element
+ * Return: the EMLSR Transition delay (in microseconds) encoded in the
+ * EML Capabilities field
+ */
+
+static inline u32 ieee80211_emlsr_trans_delay_in_us(u16 eml_cap)
+{
+ /* IEEE Std 802.11be-2024 Table 9-417j—Encoding of the EMLSR
+ * Transition Delay subfield.
+ */
+ u32 trans_delay =
+ u16_get_bits(eml_cap,
+ IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY);
+
+ /* invalid values also just use 0 */
+ if (!trans_delay ||
+ trans_delay > IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US)
+ return 0;
+
+ return 16 * (1 << (trans_delay - 1));
+}
+
+/**
+ * ieee80211_eml_trans_timeout_in_us - Fetch the EMLSR Transition
+ * timeout value in microseconds
+ * @eml_cap: EML capabilities field value from common info field of
+ * the Multi-link element
+ * Return: the EMLSR Transition timeout (in microseconds) encoded in
+ * the EML Capabilities field
+ */
+
+static inline u32 ieee80211_eml_trans_timeout_in_us(u16 eml_cap)
+{
+ /* IEEE Std 802.11be-2024 Table 9-417m—Encoding of the
+ * Transition Timeout subfield.
+ */
+ u8 timeout = u16_get_bits(eml_cap,
+ IEEE80211_EML_CAP_TRANSITION_TIMEOUT);
+
+ /* invalid values also just use 0 */
+ if (!timeout || timeout > IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU)
+ return 0;
+
+ return 128 * (1 << (timeout - 1));
+}
+
#define for_each_mle_subelement(_elem, _data, _len) \
if (ieee80211_mle_size_ok(_data, _len)) \
for_each_element(_elem, \
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 0bae61a15b60..8e29cb4e6a01 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -32,6 +32,9 @@ static inline void ima_appraise_parse_cmdline(void) {}
#ifdef CONFIG_IMA_KEXEC
extern void ima_add_kexec_buffer(struct kimage *image);
+extern void ima_kexec_post_load(struct kimage *image);
+#else
+static inline void ima_kexec_post_load(struct kimage *image) {}
#endif
#else
diff --git a/include/linux/inet.h b/include/linux/inet.h
index bd8276e96e60..9158772f3559 100644
--- a/include/linux/inet.h
+++ b/include/linux/inet.h
@@ -55,6 +55,6 @@ extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char
extern int inet_pton_with_scope(struct net *net, unsigned short af,
const char *src, const char *port, struct sockaddr_storage *addr);
-extern bool inet_addr_is_any(struct sockaddr *addr);
+bool inet_addr_is_any(struct sockaddr_storage *addr);
#endif /* _LINUX_INET_H */
diff --git a/include/linux/intel_vsec.h b/include/linux/intel_vsec.h
index b94beab64610..bc95821f1bfb 100644
--- a/include/linux/intel_vsec.h
+++ b/include/linux/intel_vsec.h
@@ -139,12 +139,13 @@ static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device
}
#if IS_ENABLED(CONFIG_INTEL_VSEC)
-void intel_vsec_register(struct pci_dev *pdev,
+int intel_vsec_register(struct pci_dev *pdev,
struct intel_vsec_platform_info *info);
#else
-static inline void intel_vsec_register(struct pci_dev *pdev,
+static inline int intel_vsec_register(struct pci_dev *pdev,
struct intel_vsec_platform_info *info)
{
+ return -ENODEV;
}
#endif
#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index c782a74d2a30..51b6484c0493 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -140,7 +140,7 @@ extern irqreturn_t no_action(int cpl, void *dev_id);
/*
* If a (PCI) device interrupt is not connected we set dev->irq to
* IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
- * can distingiush that case from other error returns.
+ * can distinguish that case from other error returns.
*
* 0x80000000 is guaranteed to be outside the available range of interrupts
* and easy to distinguish from other possible incorrect values.
diff --git a/include/linux/io.h b/include/linux/io.h
index 6a6bc4d46d0a..0642c7ee41db 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -183,4 +183,25 @@ static inline void arch_io_free_memtype_wc(resource_size_t base,
int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
resource_size_t size);
+#ifdef CONFIG_STRICT_DEVMEM
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+ u64 from = ((u64)pfn) << PAGE_SHIFT;
+ u64 to = from + size;
+ u64 cursor = from;
+
+ while (cursor < to) {
+ if (!devmem_is_allowed(pfn))
+ return 0;
+ cursor += PAGE_SIZE;
+ pfn++;
+ }
+ return 1;
+}
+#else
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+ return 1;
+}
+#endif
#endif /* _LINUX_IO_H */
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
index 0634a3de1782..53408124c1e5 100644
--- a/include/linux/io_uring/cmd.h
+++ b/include/linux/io_uring/cmd.h
@@ -140,6 +140,15 @@ static inline struct io_uring_cmd_data *io_uring_cmd_get_async_data(struct io_ur
return cmd_to_io_kiocb(cmd)->async_data;
}
+/*
+ * Return uring_cmd's context reference as its context handle for driver to
+ * track per-context resource, such as registered kernel IO buffer
+ */
+static inline void *io_uring_cmd_ctx_handle(struct io_uring_cmd *cmd)
+{
+ return cmd_to_io_kiocb(cmd)->ctx;
+}
+
int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
void (*release)(void *), unsigned int index,
unsigned int issue_flags);
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index b44d201520d8..2922635986f5 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -40,8 +40,6 @@ enum io_uring_cmd_flags {
IO_URING_F_TASK_DEAD = (1 << 13),
};
-struct io_zcrx_ifq;
-
struct io_wq_work_node {
struct io_wq_work_node *next;
};
@@ -343,7 +341,6 @@ struct io_ring_ctx {
unsigned cached_cq_tail;
unsigned cq_entries;
struct io_ev_fd __rcu *io_ev_fd;
- unsigned cq_extra;
void *cq_wait_arg;
size_t cq_wait_size;
@@ -394,7 +391,8 @@ struct io_ring_ctx {
struct wait_queue_head poll_wq;
struct io_restriction restrictions;
- struct io_zcrx_ifq *ifq;
+ /* Stores zcrx object pointers of type struct io_zcrx_ifq */
+ struct xarray zcrx_ctxs;
u32 pers_next;
struct xarray personalities;
@@ -418,6 +416,7 @@ struct io_ring_ctx {
struct callback_head poll_wq_task_work;
struct list_head defer_list;
+ unsigned nr_drained;
struct io_alloc_cache msg_cache;
spinlock_t msg_lock;
@@ -436,6 +435,7 @@ struct io_ring_ctx {
/* protected by ->completion_lock */
unsigned evfd_last_cq_tail;
+ unsigned nr_req_allocated;
/*
* Protection for resize vs mmap races - both the mmap and resize
@@ -448,8 +448,6 @@ struct io_ring_ctx {
struct io_mapped_region ring_region;
/* used for optimised request parameter and wait argument passing */
struct io_mapped_region param_region;
- /* just one zcrx per ring for now, will move to io_zcrx_ifq eventually */
- struct io_mapped_region zcrx_region;
};
/*
@@ -653,8 +651,7 @@ struct io_kiocb {
u8 iopoll_completed;
/*
* Can be either a fixed buffer index, or used with provided buffers.
- * For the latter, before issue it points to the buffer group ID,
- * and after selection it points to the buffer ID itself.
+ * For the latter, it points to the selected buffer ID.
*/
u16 buf_index;
@@ -713,7 +710,7 @@ struct io_kiocb {
const struct cred *creds;
struct io_wq_work work;
- struct {
+ struct io_big_cqe {
u64 extra1;
u64 extra2;
} big_cqe;
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 3a8d35d41fda..15cdadace993 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -872,6 +872,10 @@ extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
+int iommu_map_nosync(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
+int iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
+ size_t size);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size);
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 2f74dd90c271..7da6602eab71 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -93,7 +93,8 @@ struct ipmi_user_hndl {
/*
* Called when the interface detects a watchdog pre-timeout. If
- * this is NULL, it will be ignored for the user.
+ * this is NULL, it will be ignored for the user. Note that you
+ * can't do any IPMI calls from here, it's called with locks held.
*/
void (*ipmi_watchdog_pretimeout)(void *handler_data);
@@ -343,4 +344,14 @@ extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data);
/* Helper function for computing the IPMB checksum of some data. */
unsigned char ipmb_checksum(unsigned char *data, int size);
+/*
+ * For things that must send messages at panic time, like the IPMI watchdog
+ * driver that extends the reset time on a panic, use this to send messages
+ * from panic context. Note that this puts the driver into a mode that
+ * only works at panic time, so only use it then.
+ */
+void ipmi_panic_request_and_wait(struct ipmi_user *user,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg);
+
#endif /* __LINUX_IPMI_H */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index dd5df1e2d032..1d6b606a81ef 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -597,7 +597,6 @@ enum {
struct irqaction;
extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
-extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
extern void irq_cpu_online(void);
@@ -700,7 +699,7 @@ extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
extern int noirqdebug_setup(char *str);
/* Checks whether the interrupt can be requested by request_irq(): */
-extern int can_request_irq(unsigned int irq, unsigned long irqflags);
+extern bool can_request_irq(unsigned int irq, unsigned long irqflags);
/* Dummy irq-chip implementations: */
extern struct irq_chip no_irq_chip;
@@ -1222,31 +1221,6 @@ static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
#define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)
-#ifdef CONFIG_SMP
-static inline void irq_gc_lock(struct irq_chip_generic *gc)
-{
- raw_spin_lock(&gc->lock);
-}
-
-static inline void irq_gc_unlock(struct irq_chip_generic *gc)
-{
- raw_spin_unlock(&gc->lock);
-}
-#else
-static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
-static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
-#endif
-
-/*
- * The irqsave variants are for usage in non interrupt code. Do not use
- * them in irq_chip callbacks. Use irq_gc_lock() instead.
- */
-#define irq_gc_lock_irqsave(gc, flags) \
- raw_spin_lock_irqsave(&(gc)->lock, flags)
-
-#define irq_gc_unlock_irqrestore(gc, flags) \
- raw_spin_unlock_irqrestore(&(gc)->lock, flags)
-
static inline void irq_reg_writel(struct irq_chip_generic *gc,
u32 val, int reg_offset)
{
diff --git a/include/linux/irqchip/irq-msi-lib.h b/include/linux/irqchip/irq-msi-lib.h
new file mode 100644
index 000000000000..dd8d1d138544
--- /dev/null
+++ b/include/linux/irqchip/irq-msi-lib.h
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2022 Linutronix GmbH
+// Copyright (C) 2022 Intel
+
+#ifndef _IRQCHIP_IRQ_MSI_LIB_H
+#define _IRQCHIP_IRQ_MSI_LIB_H
+
+#include <linux/bits.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+
+#ifdef CONFIG_PCI_MSI
+#define MATCH_PCI_MSI BIT(DOMAIN_BUS_PCI_MSI)
+#else
+#define MATCH_PCI_MSI (0)
+#endif
+
+#define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI)
+
+int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token);
+
+bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent,
+ struct msi_domain_info *info);
+
+#endif /* _IRQCHIP_IRQ_MSI_LIB_H */
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index bb7111105296..7387d183029b 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -1,30 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * irq_domain - IRQ translation domains
+ * irq_domain - IRQ Translation Domains
*
- * Translation infrastructure between hw and linux irq numbers. This is
- * helpful for interrupt controllers to implement mapping between hardware
- * irq numbers and the Linux irq number space.
- *
- * irq_domains also have hooks for translating device tree or other
- * firmware interrupt representations into a hardware irq number that
- * can be mapped back to a Linux irq number without any extra platform
- * support code.
- *
- * Interrupt controller "domain" data structure. This could be defined as a
- * irq domain controller. That is, it handles the mapping between hardware
- * and virtual interrupt numbers for a given interrupt domain. The domain
- * structure is generally created by the PIC code for a given PIC instance
- * (though a domain can cover more than one PIC if they have a flat number
- * model). It's the domain callbacks that are responsible for setting the
- * irq_chip on a given irq_desc after it's been mapped.
- *
- * The host code and data structures use a fwnode_handle pointer to
- * identify the domain. In some cases, and in order to preserve source
- * code compatibility, this fwnode pointer is "upgraded" to a DT
- * device_node. For those firmware infrastructures that do not provide
- * a unique identifier for an interrupt controller, the irq_domain
- * code offers a fwnode allocator.
+ * See Documentation/core-api/irq/irq-domain.rst for the details.
*/
#ifndef _LINUX_IRQDOMAIN_H
@@ -61,9 +39,9 @@ struct msi_parent_ops;
* pass a device-specific description of an interrupt.
*/
struct irq_fwspec {
- struct fwnode_handle *fwnode;
- int param_count;
- u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS];
+ struct fwnode_handle *fwnode;
+ int param_count;
+ u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS];
};
/* Conversion function from of_phandle_args fields to fwspec */
@@ -72,26 +50,26 @@ void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
/**
* struct irq_domain_ops - Methods for irq_domain objects
- * @match: Match an interrupt controller device node to a domain, returns
- * 1 on a match
- * @select: Match an interrupt controller fw specification. It is more generic
- * than @match as it receives a complete struct irq_fwspec. Therefore,
- * @select is preferred if provided. Returns 1 on a match.
- * @map: Create or update a mapping between a virtual irq number and a hw
- * irq number. This is called only once for a given mapping.
- * @unmap: Dispose of such a mapping
- * @xlate: Given a device tree node and interrupt specifier, decode
- * the hardware irq number and linux irq type value.
- * @alloc: Allocate @nr_irqs interrupts starting from @virq.
- * @free: Free @nr_irqs interrupts starting from @virq.
- * @activate: Activate one interrupt in HW (@irqd). If @reserve is set, only
- * reserve the vector. If unset, assign the vector (called from
- * request_irq()).
- * @deactivate: Disarm one interrupt (@irqd).
- * @translate: Given @fwspec, decode the hardware irq number (@out_hwirq) and
- * linux irq type value (@out_type). This is a generalised @xlate
- * (over struct irq_fwspec) and is preferred if provided.
- * @debug_show: For domains to show specific data for an interrupt in debugfs.
+ * @match: Match an interrupt controller device node to a domain, returns
+ * 1 on a match
+ * @select: Match an interrupt controller fw specification. It is more generic
+ * than @match as it receives a complete struct irq_fwspec. Therefore,
+ * @select is preferred if provided. Returns 1 on a match.
+ * @map: Create or update a mapping between a virtual irq number and a hw
+ * irq number. This is called only once for a given mapping.
+ * @unmap: Dispose of such a mapping
+ * @xlate: Given a device tree node and interrupt specifier, decode
+ * the hardware irq number and linux irq type value.
+ * @alloc: Allocate @nr_irqs interrupts starting from @virq.
+ * @free: Free @nr_irqs interrupts starting from @virq.
+ * @activate: Activate one interrupt in HW (@irqd). If @reserve is set, only
+ * reserve the vector. If unset, assign the vector (called from
+ * request_irq()).
+ * @deactivate: Disarm one interrupt (@irqd).
+ * @translate: Given @fwspec, decode the hardware irq number (@out_hwirq) and
+ * linux irq type value (@out_type). This is a generalised @xlate
+ * (over struct irq_fwspec) and is preferred if provided.
+ * @debug_show: For domains to show specific data for an interrupt in debugfs.
*
* Functions below are provided by the driver and called whenever a new mapping
* is created or an old mapping is disposed. The driver can then proceed to
@@ -99,29 +77,29 @@ void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
* to setup the irq_desc when returning from map().
*/
struct irq_domain_ops {
- int (*match)(struct irq_domain *d, struct device_node *node,
- enum irq_domain_bus_token bus_token);
- int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec,
- enum irq_domain_bus_token bus_token);
- int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
- void (*unmap)(struct irq_domain *d, unsigned int virq);
- int (*xlate)(struct irq_domain *d, struct device_node *node,
- const u32 *intspec, unsigned int intsize,
- unsigned long *out_hwirq, unsigned int *out_type);
+ int (*match)(struct irq_domain *d, struct device_node *node,
+ enum irq_domain_bus_token bus_token);
+ int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token);
+ int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
+ void (*unmap)(struct irq_domain *d, unsigned int virq);
+ int (*xlate)(struct irq_domain *d, struct device_node *node,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/* extended V2 interfaces to support hierarchy irq_domains */
- int (*alloc)(struct irq_domain *d, unsigned int virq,
- unsigned int nr_irqs, void *arg);
- void (*free)(struct irq_domain *d, unsigned int virq,
- unsigned int nr_irqs);
- int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve);
- void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
- int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
- unsigned long *out_hwirq, unsigned int *out_type);
+ int (*alloc)(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *arg);
+ void (*free)(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs);
+ int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve);
+ void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
+ int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
#endif
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
- void (*debug_show)(struct seq_file *m, struct irq_domain *d,
- struct irq_data *irqd, int ind);
+ void (*debug_show)(struct seq_file *m, struct irq_domain *d,
+ struct irq_data *irqd, int ind);
#endif
};
@@ -231,6 +209,9 @@ enum {
/* Irq domain must destroy generic chips when removed */
IRQ_DOMAIN_FLAG_DESTROY_GC = (1 << 10),
+ /* Address and data pair is mutable when irq_set_affinity() */
+ IRQ_DOMAIN_FLAG_MSI_IMMUTABLE = (1 << 11),
+
/*
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
* for implementation specific purposes and ignored by the
@@ -244,8 +225,7 @@ static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d)
return to_of_node(d->fwnode);
}
-static inline void irq_domain_set_pm_device(struct irq_domain *d,
- struct device *dev)
+static inline void irq_domain_set_pm_device(struct irq_domain *d, struct device *dev)
{
if (d)
d->pm_dev = dev;
@@ -261,14 +241,12 @@ enum {
IRQCHIP_FWNODE_NAMED_ID,
};
-static inline
-struct fwnode_handle *irq_domain_alloc_named_fwnode(const char *name)
+static inline struct fwnode_handle *irq_domain_alloc_named_fwnode(const char *name)
{
return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED, 0, name, NULL);
}
-static inline
-struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id)
+static inline struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id)
{
return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED_ID, id, name,
NULL);
@@ -281,6 +259,8 @@ static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa)
void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
+DEFINE_FREE(irq_domain_free_fwnode, struct fwnode_handle *, if (_T) irq_domain_free_fwnode(_T))
+
struct irq_domain_chip_generic_info;
/**
@@ -333,36 +313,19 @@ struct irq_domain *irq_domain_instantiate(const struct irq_domain_info *info);
struct irq_domain *devm_irq_domain_instantiate(struct device *dev,
const struct irq_domain_info *info);
-struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode,
- unsigned int size,
+struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode, unsigned int size,
unsigned int first_irq,
- const struct irq_domain_ops *ops,
- void *host_data);
-struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
- unsigned int size,
- unsigned int first_irq,
- irq_hw_number_t first_hwirq,
- const struct irq_domain_ops *ops,
- void *host_data);
-struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode,
- unsigned int size,
- unsigned int first_irq,
- irq_hw_number_t first_hwirq,
- const struct irq_domain_ops *ops,
- void *host_data);
+ const struct irq_domain_ops *ops, void *host_data);
+struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode, unsigned int size,
+ unsigned int first_irq, irq_hw_number_t first_hwirq,
+ const struct irq_domain_ops *ops, void *host_data);
struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token);
void irq_set_default_domain(struct irq_domain *domain);
struct irq_domain *irq_get_default_domain(void);
-int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
- irq_hw_number_t hwirq, int node,
+int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, irq_hw_number_t hwirq, int node,
const struct irq_affinity_desc *affinity);
-static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
-{
- return node ? &node->fwnode : NULL;
-}
-
extern const struct fwnode_operations irqchip_fwnode_ops;
static inline bool is_fwnode_irqchip(const struct fwnode_handle *fwnode)
@@ -370,12 +333,10 @@ static inline bool is_fwnode_irqchip(const struct fwnode_handle *fwnode)
return fwnode && fwnode->ops == &irqchip_fwnode_ops;
}
-void irq_domain_update_bus_token(struct irq_domain *domain,
- enum irq_domain_bus_token bus_token);
+void irq_domain_update_bus_token(struct irq_domain *domain, enum irq_domain_bus_token bus_token);
-static inline
-struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
- enum irq_domain_bus_token bus_token)
+static inline struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+ enum irq_domain_bus_token bus_token)
{
struct irq_fwspec fwspec = {
.fwnode = fwnode,
@@ -387,7 +348,7 @@ struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
static inline struct irq_domain *irq_find_matching_host(struct device_node *node,
enum irq_domain_bus_token bus_token)
{
- return irq_find_matching_fwnode(of_node_to_fwnode(node), bus_token);
+ return irq_find_matching_fwnode(of_fwnode_handle(node), bus_token);
}
static inline struct irq_domain *irq_find_host(struct device_node *node)
@@ -401,128 +362,92 @@ static inline struct irq_domain *irq_find_host(struct device_node *node)
return d;
}
-static inline struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
- unsigned int size,
- unsigned int first_irq,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return irq_domain_create_simple(of_node_to_fwnode(of_node), size, first_irq, ops, host_data);
-}
-
-/**
- * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
- * @of_node: pointer to interrupt controller's device tree node.
- * @size: Number of interrupts in the domain.
- * @ops: map/unmap domain callbacks
- * @host_data: Controller private data pointer
- */
-static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
- unsigned int size,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- struct irq_domain_info info = {
- .fwnode = of_node_to_fwnode(of_node),
- .size = size,
- .hwirq_max = size,
- .ops = ops,
- .host_data = host_data,
- };
- struct irq_domain *d;
-
- d = irq_domain_instantiate(&info);
- return IS_ERR(d) ? NULL : d;
-}
-
#ifdef CONFIG_IRQ_DOMAIN_NOMAP
-static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
- unsigned int max_irq,
- const struct irq_domain_ops *ops,
- void *host_data)
+static inline struct irq_domain *irq_domain_create_nomap(struct fwnode_handle *fwnode,
+ unsigned int max_irq,
+ const struct irq_domain_ops *ops,
+ void *host_data)
{
- struct irq_domain_info info = {
- .fwnode = of_node_to_fwnode(of_node),
+ const struct irq_domain_info info = {
+ .fwnode = fwnode,
.hwirq_max = max_irq,
.direct_max = max_irq,
.ops = ops,
.host_data = host_data,
};
- struct irq_domain *d;
+ struct irq_domain *d = irq_domain_instantiate(&info);
- d = irq_domain_instantiate(&info);
return IS_ERR(d) ? NULL : d;
}
unsigned int irq_create_direct_mapping(struct irq_domain *domain);
#endif
-static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- struct irq_domain_info info = {
- .fwnode = of_node_to_fwnode(of_node),
- .hwirq_max = ~0U,
- .ops = ops,
- .host_data = host_data,
- };
- struct irq_domain *d;
-
- d = irq_domain_instantiate(&info);
- return IS_ERR(d) ? NULL : d;
-}
-
+/**
+ * irq_domain_create_linear - Allocate and register a linear revmap irq_domain.
+ * @fwnode: pointer to interrupt controller's FW node.
+ * @size: Number of interrupts in the domain.
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * Returns: Newly created irq_domain
+ */
static inline struct irq_domain *irq_domain_create_linear(struct fwnode_handle *fwnode,
- unsigned int size,
- const struct irq_domain_ops *ops,
- void *host_data)
+ unsigned int size,
+ const struct irq_domain_ops *ops,
+ void *host_data)
{
- struct irq_domain_info info = {
+ const struct irq_domain_info info = {
.fwnode = fwnode,
.size = size,
.hwirq_max = size,
.ops = ops,
.host_data = host_data,
};
- struct irq_domain *d;
+ struct irq_domain *d = irq_domain_instantiate(&info);
- d = irq_domain_instantiate(&info);
return IS_ERR(d) ? NULL : d;
}
static inline struct irq_domain *irq_domain_create_tree(struct fwnode_handle *fwnode,
- const struct irq_domain_ops *ops,
- void *host_data)
+ const struct irq_domain_ops *ops,
+ void *host_data)
{
- struct irq_domain_info info = {
+ const struct irq_domain_info info = {
.fwnode = fwnode,
.hwirq_max = ~0,
.ops = ops,
.host_data = host_data,
};
- struct irq_domain *d;
+ struct irq_domain *d = irq_domain_instantiate(&info);
- d = irq_domain_instantiate(&info);
return IS_ERR(d) ? NULL : d;
}
void irq_domain_remove(struct irq_domain *domain);
-int irq_domain_associate(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq);
-void irq_domain_associate_many(struct irq_domain *domain,
- unsigned int irq_base,
+int irq_domain_associate(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq);
+void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
irq_hw_number_t hwirq_base, int count);
-unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
- irq_hw_number_t hwirq,
+unsigned int irq_create_mapping_affinity(struct irq_domain *domain, irq_hw_number_t hwirq,
const struct irq_affinity_desc *affinity);
unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec);
void irq_dispose_mapping(unsigned int virq);
-static inline unsigned int irq_create_mapping(struct irq_domain *domain,
- irq_hw_number_t hwirq)
+/**
+ * irq_create_mapping - Map a hardware interrupt into linux irq space
+ * @domain: domain owning this hardware interrupt or NULL for default domain
+ * @hwirq: hardware irq number in that domain space
+ *
+ * Only one mapping per hardware interrupt is permitted.
+ *
+ * If the sense/trigger is to be specified, set_irq_type() should be called
+ * on the number returned from that call.
+ *
+ * Returns: Linux irq number or 0 on error
+ */
+static inline unsigned int irq_create_mapping(struct irq_domain *domain, irq_hw_number_t hwirq)
{
return irq_create_mapping_affinity(domain, hwirq, NULL);
}
@@ -531,6 +456,13 @@ struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq,
unsigned int *irq);
+/**
+ * irq_resolve_mapping - Find a linux irq from a hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * Returns: Interrupt descriptor
+ */
static inline struct irq_desc *irq_resolve_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
@@ -539,8 +471,10 @@ static inline struct irq_desc *irq_resolve_mapping(struct irq_domain *domain,
/**
* irq_find_mapping() - Find a linux irq from a hw irq number.
- * @domain: domain owning this hardware interrupt
- * @hwirq: hardware irq number in that domain space
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * Returns: Linux irq number or 0 if not found
*/
static inline unsigned int irq_find_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq)
@@ -553,107 +487,115 @@ static inline unsigned int irq_find_mapping(struct irq_domain *domain,
return 0;
}
-static inline unsigned int irq_linear_revmap(struct irq_domain *domain,
- irq_hw_number_t hwirq)
-{
- return irq_find_mapping(domain, hwirq);
-}
-
extern const struct irq_domain_ops irq_domain_simple_ops;
/* stock xlate functions */
int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type);
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type);
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type);
-
-int irq_domain_translate_twocell(struct irq_domain *d,
- struct irq_fwspec *fwspec,
- unsigned long *out_hwirq,
- unsigned int *out_type);
-
-int irq_domain_translate_onecell(struct irq_domain *d,
- struct irq_fwspec *fwspec,
- unsigned long *out_hwirq,
- unsigned int *out_type);
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
+int irq_domain_xlate_twothreecell(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
+
+int irq_domain_translate_onecell(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
+int irq_domain_translate_twocell(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
+int irq_domain_translate_twothreecell(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
/* IPI functions */
int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest);
int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest);
/* V2 interfaces to support hierarchy IRQ domains. */
-struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
- unsigned int virq);
-void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
- irq_hw_number_t hwirq,
- const struct irq_chip *chip,
- void *chip_data, irq_flow_handler_t handler,
+struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, unsigned int virq);
+void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq,
+ const struct irq_chip *chip, void *chip_data, irq_flow_handler_t handler,
void *handler_data, const char *handler_name);
void irq_domain_reset_irq_data(struct irq_data *irq_data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
-struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
- unsigned int flags,
- unsigned int size,
- struct fwnode_handle *fwnode,
- const struct irq_domain_ops *ops,
- void *host_data);
-
-static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
- unsigned int flags,
- unsigned int size,
- struct device_node *node,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return irq_domain_create_hierarchy(parent, flags, size,
- of_node_to_fwnode(node),
- ops, host_data);
-}
-
-int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
- unsigned int nr_irqs, int node, void *arg,
- bool realloc,
+/**
+ * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
+ * @parent: Parent irq domain to associate with the new domain
+ * @flags: Irq domain flags associated to the domain
+ * @size: Size of the domain. See below
+ * @fwnode: Optional fwnode of the interrupt controller
+ * @ops: Pointer to the interrupt domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * If @size is 0 a tree domain is created, otherwise a linear domain.
+ *
+ * If successful the parent is associated to the new domain and the
+ * domain flags are set.
+ *
+ * Returns: A pointer to IRQ domain, or %NULL on failure.
+ */
+static inline struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
+ unsigned int flags, unsigned int size,
+ struct fwnode_handle *fwnode,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ const struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .size = size,
+ .hwirq_max = size ? : ~0U,
+ .ops = ops,
+ .host_data = host_data,
+ .domain_flags = flags,
+ .parent = parent,
+ };
+ struct irq_domain *d = irq_domain_instantiate(&info);
+
+ return IS_ERR(d) ? NULL : d;
+}
+
+int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned int nr_irqs,
+ int node, void *arg, bool realloc,
const struct irq_affinity_desc *affinity);
void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
int irq_domain_activate_irq(struct irq_data *irq_data, bool early);
void irq_domain_deactivate_irq(struct irq_data *irq_data);
-static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
- unsigned int nr_irqs, int node, void *arg)
+/**
+ * irq_domain_alloc_irqs - Allocate IRQs from domain
+ * @domain: domain to allocate from
+ * @nr_irqs: number of IRQs to allocate
+ * @node: NUMA node id for memory allocation
+ * @arg: domain specific argument
+ *
+ * See __irq_domain_alloc_irqs()' documentation.
+ */
+static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs,
+ int node, void *arg)
{
- return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false,
- NULL);
+ return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false, NULL);
}
-int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
- unsigned int virq,
- irq_hw_number_t hwirq,
- const struct irq_chip *chip,
+int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq, const struct irq_chip *chip,
void *chip_data);
-void irq_domain_free_irqs_common(struct irq_domain *domain,
- unsigned int virq,
+void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs);
-void irq_domain_free_irqs_top(struct irq_domain *domain,
- unsigned int virq, unsigned int nr_irqs);
+void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs);
int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg);
int irq_domain_pop_irq(struct irq_domain *domain, int virq);
-int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
- unsigned int irq_base,
+int irq_domain_alloc_irqs_parent(struct irq_domain *domain, unsigned int irq_base,
unsigned int nr_irqs, void *arg);
-void irq_domain_free_irqs_parent(struct irq_domain *domain,
- unsigned int irq_base,
+void irq_domain_free_irqs_parent(struct irq_domain *domain, unsigned int irq_base,
unsigned int nr_irqs);
-int irq_domain_disconnect_hierarchy(struct irq_domain *domain,
- unsigned int virq);
+int irq_domain_disconnect_hierarchy(struct irq_domain *domain, unsigned int virq);
static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{
@@ -662,8 +604,7 @@ static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
static inline bool irq_domain_is_ipi(struct irq_domain *domain)
{
- return domain->flags &
- (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE);
+ return domain->flags & (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE);
}
static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain)
@@ -691,15 +632,18 @@ static inline bool irq_domain_is_msi_device(struct irq_domain *domain)
return domain->flags & IRQ_DOMAIN_FLAG_MSI_DEVICE;
}
+static inline bool irq_domain_is_msi_immutable(struct irq_domain *domain)
+{
+ return domain->flags & IRQ_DOMAIN_FLAG_MSI_IMMUTABLE;
+}
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
-static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
- unsigned int nr_irqs, int node, void *arg)
+static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs,
+ int node, void *arg)
{
return -1;
}
-static inline void irq_domain_free_irqs(unsigned int virq,
- unsigned int nr_irqs) { }
+static inline void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) { }
static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{
@@ -739,8 +683,7 @@ static inline bool irq_domain_is_msi_device(struct irq_domain *domain)
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
#ifdef CONFIG_GENERIC_MSI_IRQ
-int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq,
- unsigned int type);
+int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq, unsigned int type);
void msi_device_domain_free_wired(struct irq_domain *domain, unsigned int virq);
#else
static inline int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq,
@@ -755,10 +698,50 @@ static inline void msi_device_domain_free_wired(struct irq_domain *domain, unsig
}
#endif
+/* Deprecated functions. Will be removed in the merge window */
+static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
+{
+ return node ? &node->fwnode : NULL;
+}
+
+static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(of_node),
+ .hwirq_max = ~0U,
+ .ops = ops,
+ .host_data = host_data,
+ };
+ struct irq_domain *d;
+
+ d = irq_domain_instantiate(&info);
+ return IS_ERR(d) ? NULL : d;
+}
+
+static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
+ unsigned int size,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(of_node),
+ .size = size,
+ .hwirq_max = size,
+ .ops = ops,
+ .host_data = host_data,
+ };
+ struct irq_domain *d;
+
+ d = irq_domain_instantiate(&info);
+ return IS_ERR(d) ? NULL : d;
+}
+
#else /* CONFIG_IRQ_DOMAIN */
static inline void irq_dispose_mapping(unsigned int virq) { }
-static inline struct irq_domain *irq_find_matching_fwnode(
- struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token)
+static inline struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+ enum irq_domain_bus_token bus_token)
{
return NULL;
}
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 023e8abdb99a..43b9297fe8a7 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1723,7 +1723,7 @@ static inline int tid_geq(tid_t x, tid_t y)
return (difference >= 0);
}
-extern int jbd2_journal_blocks_per_page(struct inode *inode);
+extern int jbd2_journal_blocks_per_folio(struct inode *inode);
extern size_t journal_tag_bytes(journal_t *journal);
static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
@@ -1766,8 +1766,7 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal)
#define BJ_Reserved 4 /* Buffer is reserved for access by journal */
#define BJ_Types 5
-static inline u32 jbd2_chksum(journal_t *journal, u32 crc,
- const void *address, unsigned int length)
+static inline u32 jbd2_chksum(u32 crc, const void *address, unsigned int length)
{
return crc32c(crc, address, length);
}
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 0ea8c9887429..91b20788273d 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -59,7 +59,7 @@
/* LATCH is used in the interval timer and ftape setup. */
#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
-extern int register_refined_jiffies(long clock_tick_rate);
+extern void register_refined_jiffies(long clock_tick_rate);
/* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */
#define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ)
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index c8971861521a..53ef1b6c8712 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -369,6 +369,9 @@ struct kimage {
phys_addr_t ima_buffer_addr;
size_t ima_buffer_size;
+
+ unsigned long ima_segment_index;
+ bool is_ima_segment_index_set;
#endif
/* Core ELF header buffer */
@@ -474,13 +477,19 @@ extern bool kexec_file_dbg_print;
#define kexec_dprintk(fmt, arg...) \
do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0)
+extern void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size);
+extern void kimage_unmap_segment(void *buffer);
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
+struct kimage;
static inline void __crash_kexec(struct pt_regs *regs) { }
static inline void crash_kexec(struct pt_regs *regs) { }
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
static inline int kexec_crash_loaded(void) { return 0; }
+static inline void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size)
+{ return NULL; }
+static inline void kimage_unmap_segment(void *buffer) { }
#define kexec_in_progress false
#endif /* CONFIG_KEXEC_CORE */
diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h
index 4862c98d80d3..da4d9b5f58f1 100644
--- a/include/linux/kvm_dirty_ring.h
+++ b/include/linux/kvm_dirty_ring.h
@@ -32,7 +32,7 @@ struct kvm_dirty_ring {
* If CONFIG_HAVE_HVM_DIRTY_RING not defined, kvm_dirty_ring.o should
* not be included as well, so define these nop functions for the arch.
*/
-static inline u32 kvm_dirty_ring_get_rsvd_entries(void)
+static inline u32 kvm_dirty_ring_get_rsvd_entries(struct kvm *kvm)
{
return 0;
}
@@ -42,7 +42,7 @@ static inline bool kvm_use_dirty_bitmap(struct kvm *kvm)
return true;
}
-static inline int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring,
+static inline int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *ring,
int index, u32 size)
{
return 0;
@@ -71,11 +71,12 @@ static inline void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
#else /* CONFIG_HAVE_KVM_DIRTY_RING */
-int kvm_cpu_dirty_log_size(void);
+int kvm_cpu_dirty_log_size(struct kvm *kvm);
bool kvm_use_dirty_bitmap(struct kvm *kvm);
bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm);
-u32 kvm_dirty_ring_get_rsvd_entries(void);
-int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size);
+u32 kvm_dirty_ring_get_rsvd_entries(struct kvm *kvm);
+int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *ring,
+ int index, u32 size);
/*
* called with kvm->slots_lock held, returns the number of
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 291d49b9bf05..1dedc421b3e3 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1610,6 +1610,7 @@ void kvm_arch_disable_virtualization(void);
int kvm_arch_enable_virtualization_cpu(void);
void kvm_arch_disable_virtualization_cpu(void);
#endif
+bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
@@ -2284,6 +2285,7 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
}
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
+extern bool enable_virt_at_load;
extern bool kvm_rebooting;
#endif
@@ -2571,4 +2573,12 @@ long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
struct kvm_pre_fault_memory *range);
#endif
+#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
+int kvm_enable_virtualization(void);
+void kvm_disable_virtualization(void);
+#else
+static inline int kvm_enable_virtualization(void) { return 0; }
+static inline void kvm_disable_virtualization(void) { }
+#endif
+
#endif
diff --git a/include/linux/livepatch_sched.h b/include/linux/livepatch_sched.h
index 013794fb5da0..065c185f2763 100644
--- a/include/linux/livepatch_sched.h
+++ b/include/linux/livepatch_sched.h
@@ -3,27 +3,23 @@
#define _LINUX_LIVEPATCH_SCHED_H_
#include <linux/jump_label.h>
-#include <linux/static_call_types.h>
+#include <linux/sched.h>
#ifdef CONFIG_LIVEPATCH
void __klp_sched_try_switch(void);
-#if !defined(CONFIG_PREEMPT_DYNAMIC) || !defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-
DECLARE_STATIC_KEY_FALSE(klp_sched_try_switch_key);
-static __always_inline void klp_sched_try_switch(void)
+static __always_inline void klp_sched_try_switch(struct task_struct *curr)
{
- if (static_branch_unlikely(&klp_sched_try_switch_key))
+ if (static_branch_unlikely(&klp_sched_try_switch_key) &&
+ READ_ONCE(curr->__state) & TASK_FREEZABLE)
__klp_sched_try_switch();
}
-#endif /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
-
#else /* !CONFIG_LIVEPATCH */
-static inline void klp_sched_try_switch(void) {}
-static inline void __klp_sched_try_switch(void) {}
+static inline void klp_sched_try_switch(struct task_struct *curr) {}
#endif /* CONFIG_LIVEPATCH */
#endif /* _LINUX_LIVEPATCH_SCHED_H_ */
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 3c3deac57894..e43ff9f980a4 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -45,10 +45,7 @@ struct mdio_device {
unsigned int reset_deassert_delay;
};
-static inline struct mdio_device *to_mdio_device(const struct device *dev)
-{
- return container_of(dev, struct mdio_device, dev);
-}
+#define to_mdio_device(__dev) container_of_const(__dev, struct mdio_device, dev)
/* struct mdio_driver_common: Common to all MDIO drivers */
struct mdio_driver_common {
diff --git a/include/linux/mfd/max77759.h b/include/linux/mfd/max77759.h
new file mode 100644
index 000000000000..c6face34e385
--- /dev/null
+++ b/include/linux/mfd/max77759.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Google Inc.
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Maxim MAX77759 core driver
+ */
+
+#ifndef __LINUX_MFD_MAX77759_H
+#define __LINUX_MFD_MAX77759_H
+
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+#define MAX77759_PMIC_REG_PMIC_ID 0x00
+#define MAX77759_PMIC_REG_PMIC_REVISION 0x01
+#define MAX77759_PMIC_REG_OTP_REVISION 0x02
+#define MAX77759_PMIC_REG_INTSRC 0x22
+#define MAX77759_PMIC_REG_INTSRCMASK 0x23
+#define MAX77759_PMIC_REG_INTSRC_MAXQ BIT(3)
+#define MAX77759_PMIC_REG_INTSRC_TOPSYS BIT(1)
+#define MAX77759_PMIC_REG_INTSRC_CHGR BIT(0)
+#define MAX77759_PMIC_REG_TOPSYS_INT 0x24
+#define MAX77759_PMIC_REG_TOPSYS_INT_MASK 0x26
+#define MAX77759_PMIC_REG_TOPSYS_INT_TSHDN BIT(6)
+#define MAX77759_PMIC_REG_TOPSYS_INT_SYSOVLO BIT(5)
+#define MAX77759_PMIC_REG_TOPSYS_INT_SYSUVLO BIT(4)
+#define MAX77759_PMIC_REG_TOPSYS_INT_FSHIP BIT(0)
+#define MAX77759_PMIC_REG_I2C_CNFG 0x40
+#define MAX77759_PMIC_REG_SWRESET 0x50
+#define MAX77759_PMIC_REG_CONTROL_FG 0x51
+
+#define MAX77759_MAXQ_REG_UIC_INT1 0x64
+#define MAX77759_MAXQ_REG_UIC_INT1_APCMDRESI BIT(7)
+#define MAX77759_MAXQ_REG_UIC_INT1_SYSMSGI BIT(6)
+#define MAX77759_MAXQ_REG_UIC_INT1_GPIO6I BIT(1)
+#define MAX77759_MAXQ_REG_UIC_INT1_GPIO5I BIT(0)
+#define MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(offs, en) (((en) & 1) << (offs))
+#define MAX77759_MAXQ_REG_UIC_INT1_GPIOxI_MASK(offs) \
+ MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(offs, ~0)
+#define MAX77759_MAXQ_REG_UIC_INT2 0x65
+#define MAX77759_MAXQ_REG_UIC_INT3 0x66
+#define MAX77759_MAXQ_REG_UIC_INT4 0x67
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS1 0x68
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS2 0x69
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS3 0x6a
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS4 0x6b
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS5 0x6c
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS6 0x6d
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS7 0x6f
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS8 0x6f
+#define MAX77759_MAXQ_REG_UIC_INT1_M 0x70
+#define MAX77759_MAXQ_REG_UIC_INT2_M 0x71
+#define MAX77759_MAXQ_REG_UIC_INT3_M 0x72
+#define MAX77759_MAXQ_REG_UIC_INT4_M 0x73
+#define MAX77759_MAXQ_REG_AP_DATAOUT0 0x81
+#define MAX77759_MAXQ_REG_AP_DATAOUT32 0xa1
+#define MAX77759_MAXQ_REG_AP_DATAIN0 0xb1
+#define MAX77759_MAXQ_REG_UIC_SWRST 0xe0
+
+#define MAX77759_CHGR_REG_CHG_INT 0xb0
+#define MAX77759_CHGR_REG_CHG_INT2 0xb1
+#define MAX77759_CHGR_REG_CHG_INT_MASK 0xb2
+#define MAX77759_CHGR_REG_CHG_INT2_MASK 0xb3
+#define MAX77759_CHGR_REG_CHG_INT_OK 0xb4
+#define MAX77759_CHGR_REG_CHG_DETAILS_00 0xb5
+#define MAX77759_CHGR_REG_CHG_DETAILS_01 0xb6
+#define MAX77759_CHGR_REG_CHG_DETAILS_02 0xb7
+#define MAX77759_CHGR_REG_CHG_DETAILS_03 0xb8
+#define MAX77759_CHGR_REG_CHG_CNFG_00 0xb9
+#define MAX77759_CHGR_REG_CHG_CNFG_01 0xba
+#define MAX77759_CHGR_REG_CHG_CNFG_02 0xbb
+#define MAX77759_CHGR_REG_CHG_CNFG_03 0xbc
+#define MAX77759_CHGR_REG_CHG_CNFG_04 0xbd
+#define MAX77759_CHGR_REG_CHG_CNFG_05 0xbe
+#define MAX77759_CHGR_REG_CHG_CNFG_06 0xbf
+#define MAX77759_CHGR_REG_CHG_CNFG_07 0xc0
+#define MAX77759_CHGR_REG_CHG_CNFG_08 0xc1
+#define MAX77759_CHGR_REG_CHG_CNFG_09 0xc2
+#define MAX77759_CHGR_REG_CHG_CNFG_10 0xc3
+#define MAX77759_CHGR_REG_CHG_CNFG_11 0xc4
+#define MAX77759_CHGR_REG_CHG_CNFG_12 0xc5
+#define MAX77759_CHGR_REG_CHG_CNFG_13 0xc6
+#define MAX77759_CHGR_REG_CHG_CNFG_14 0xc7
+#define MAX77759_CHGR_REG_CHG_CNFG_15 0xc8
+#define MAX77759_CHGR_REG_CHG_CNFG_16 0xc9
+#define MAX77759_CHGR_REG_CHG_CNFG_17 0xca
+#define MAX77759_CHGR_REG_CHG_CNFG_18 0xcb
+#define MAX77759_CHGR_REG_CHG_CNFG_19 0xcc
+
+/* MaxQ opcodes for max77759_maxq_command() */
+#define MAX77759_MAXQ_OPCODE_MAXLENGTH (MAX77759_MAXQ_REG_AP_DATAOUT32 - \
+ MAX77759_MAXQ_REG_AP_DATAOUT0 + \
+ 1)
+
+#define MAX77759_MAXQ_OPCODE_GPIO_TRIGGER_READ 0x21
+#define MAX77759_MAXQ_OPCODE_GPIO_TRIGGER_WRITE 0x22
+#define MAX77759_MAXQ_OPCODE_GPIO_CONTROL_READ 0x23
+#define MAX77759_MAXQ_OPCODE_GPIO_CONTROL_WRITE 0x24
+#define MAX77759_MAXQ_OPCODE_USER_SPACE_READ 0x81
+#define MAX77759_MAXQ_OPCODE_USER_SPACE_WRITE 0x82
+
+/**
+ * struct max77759 - core max77759 internal data structure
+ *
+ * @regmap_top: Regmap for accessing TOP registers
+ * @maxq_lock: Lock for serializing access to MaxQ
+ * @regmap_maxq: Regmap for accessing MaxQ registers
+ * @cmd_done: Used to signal completion of a MaxQ command
+ * @regmap_charger: Regmap for accessing charger registers
+ *
+ * The MAX77759 comprises several sub-blocks, namely TOP, MaxQ, Charger,
+ * Fuel Gauge, and TCPCI.
+ */
+struct max77759 {
+ struct regmap *regmap_top;
+
+ /* This protects MaxQ commands - only one can be active */
+ struct mutex maxq_lock;
+ struct regmap *regmap_maxq;
+ struct completion cmd_done;
+
+ struct regmap *regmap_charger;
+};
+
+/**
+ * struct max77759_maxq_command - structure containing the MaxQ command to
+ * send
+ *
+ * @length: The number of bytes to send.
+ * @cmd: The data to send.
+ */
+struct max77759_maxq_command {
+ u8 length;
+ u8 cmd[] __counted_by(length);
+};
+
+/**
+ * struct max77759_maxq_response - structure containing the MaxQ response
+ *
+ * @length: The number of bytes to receive.
+ * @rsp: The data received. Must have at least @length bytes space.
+ */
+struct max77759_maxq_response {
+ u8 length;
+ u8 rsp[] __counted_by(length);
+};
+
+/**
+ * max77759_maxq_command() - issue a MaxQ command and wait for the response
+ * and associated data
+ *
+ * @max77759: The core max77759 device handle.
+ * @cmd: The command to be sent.
+ * @rsp: Any response data associated with the command will be copied here;
+ * can be %NULL if the command has no response (other than ACK).
+ *
+ * Return: 0 on success, a negative error number otherwise.
+ */
+int max77759_maxq_command(struct max77759 *max77759,
+ const struct max77759_maxq_command *cmd,
+ struct max77759_maxq_response *rsp);
+
+#endif /* __LINUX_MFD_MAX77759_H */
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 591bf5b5e8dc..9af01bdd86d2 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -44,7 +44,6 @@
#define MICREL_PHY_50MHZ_CLK BIT(0)
#define MICREL_PHY_FXEN BIT(1)
#define MICREL_KSZ8_P1_ERRATA BIT(2)
-#define MICREL_NO_EEE BIT(3)
#define MICREL_KSZ9021_EXTREG_CTRL 0xB
#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
diff --git a/include/linux/misc_cgroup.h b/include/linux/misc_cgroup.h
index 4bf261d41a6d..71cf5bfc6349 100644
--- a/include/linux/misc_cgroup.h
+++ b/include/linux/misc_cgroup.h
@@ -18,6 +18,10 @@ enum misc_res_type {
/** @MISC_CG_RES_SEV_ES: AMD SEV-ES ASIDs resource */
MISC_CG_RES_SEV_ES,
#endif
+#ifdef CONFIG_INTEL_TDX_HOST
+ /* Intel TDX HKIDs resource */
+ MISC_CG_RES_TDX,
+#endif
/** @MISC_CG_RES_TYPES: count of enum misc_res_type constants */
MISC_CG_RES_TYPES
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index bf55206935c4..e51dba8398f7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -385,7 +385,7 @@ extern unsigned int kobjsize(const void *objp);
#endif
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
-# define VM_UFFD_MINOR_BIT 38
+# define VM_UFFD_MINOR_BIT 41
# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */
#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
# define VM_UFFD_MINOR VM_NONE
@@ -4265,4 +4265,62 @@ int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
#define VM_SEALED_SYSMAP VM_NONE
#endif
+/*
+ * DMA mapping IDs for page_pool
+ *
+ * When DMA-mapping a page, page_pool allocates an ID (from an xarray) and
+ * stashes it in the upper bits of page->pp_magic. We always want to be able to
+ * unambiguously identify page pool pages (using page_pool_page_is_pp()). Non-PP
+ * pages can have arbitrary kernel pointers stored in the same field as pp_magic
+ * (since it overlaps with page->lru.next), so we must ensure that we cannot
+ * mistake a valid kernel pointer with any of the values we write into this
+ * field.
+ *
+ * On architectures that set POISON_POINTER_DELTA, this is already ensured,
+ * since this value becomes part of PP_SIGNATURE; meaning we can just use the
+ * space between the PP_SIGNATURE value (without POISON_POINTER_DELTA), and the
+ * lowest bits of POISON_POINTER_DELTA. On arches where POISON_POINTER_DELTA is
+ * 0, we make sure that we leave the two topmost bits empty, as that guarantees
+ * we won't mistake a valid kernel pointer for a value we set, regardless of the
+ * VMSPLIT setting.
+ *
+ * Altogether, this means that the number of bits available is constrained by
+ * the size of an unsigned long (at the upper end, subtracting two bits per the
+ * above), and the definition of PP_SIGNATURE (with or without
+ * POISON_POINTER_DELTA).
+ */
+#define PP_DMA_INDEX_SHIFT (1 + __fls(PP_SIGNATURE - POISON_POINTER_DELTA))
+#if POISON_POINTER_DELTA > 0
+/* PP_SIGNATURE includes POISON_POINTER_DELTA, so limit the size of the DMA
+ * index to not overlap with that if set
+ */
+#define PP_DMA_INDEX_BITS MIN(32, __ffs(POISON_POINTER_DELTA) - PP_DMA_INDEX_SHIFT)
+#else
+/* Always leave out the topmost two; see above. */
+#define PP_DMA_INDEX_BITS MIN(32, BITS_PER_LONG - PP_DMA_INDEX_SHIFT - 2)
+#endif
+
+#define PP_DMA_INDEX_MASK GENMASK(PP_DMA_INDEX_BITS + PP_DMA_INDEX_SHIFT - 1, \
+ PP_DMA_INDEX_SHIFT)
+
+/* Mask used for checking in page_pool_page_is_pp() below. page->pp_magic is
+ * OR'ed with PP_SIGNATURE after the allocation in order to preserve bit 0 for
+ * the head page of compound page and bit 1 for pfmemalloc page, as well as the
+ * bits used for the DMA index. page_is_pfmemalloc() is checked in
+ * __page_pool_put_page() to avoid recycling the pfmemalloc page.
+ */
+#define PP_MAGIC_MASK ~(PP_DMA_INDEX_MASK | 0x3UL)
+
+#ifdef CONFIG_PAGE_POOL
+static inline bool page_pool_page_is_pp(struct page *page)
+{
+ return (page->pp_magic & PP_MAGIC_MASK) == PP_SIGNATURE;
+}
+#else
+static inline bool page_pool_page_is_pp(struct page *page)
+{
+ return false;
+}
+#endif
+
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 56d07edd01f9..32ba5126e221 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -31,6 +31,7 @@
#define INIT_PASID 0
struct address_space;
+struct futex_private_hash;
struct mem_cgroup;
/*
@@ -1031,7 +1032,11 @@ struct mm_struct {
*/
seqcount_t mm_lock_seq;
#endif
-
+#ifdef CONFIG_FUTEX_PRIVATE_HASH
+ struct mutex futex_hash_lock;
+ struct futex_private_hash __rcu *futex_phash;
+ struct futex_private_hash *futex_phash_new;
+#endif
unsigned long hiwater_rss; /* High-watermark of RSS usage */
unsigned long hiwater_vm; /* High-water virtual memory usage */
diff --git a/include/linux/mman.h b/include/linux/mman.h
index bce214fece16..f4c6346a8fcd 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -155,7 +155,9 @@ calc_vm_flag_bits(struct file *file, unsigned long flags)
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
_calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) |
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
_calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) |
+#endif
arch_calc_vm_flag_bits(file, flags);
}
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 4706c6769902..e0eddfd306ef 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -7,6 +7,7 @@
#include <linux/rwsem.h>
#include <linux/tracepoint-defs.h>
#include <linux/types.h>
+#include <linux/cleanup.h>
#define MMAP_LOCK_INITIALIZER(name) \
.mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
@@ -211,6 +212,9 @@ static inline void mmap_read_unlock(struct mm_struct *mm)
up_read(&mm->mmap_lock);
}
+DEFINE_GUARD(mmap_read_lock, struct mm_struct *,
+ mmap_read_lock(_T), mmap_read_unlock(_T))
+
static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
{
__mmap_lock_trace_released(mm, false);
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 526fce581657..ddcdf23d731c 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -329,6 +329,7 @@ struct mmc_card {
#define MMC_QUIRK_BROKEN_SD_CACHE (1<<15) /* Disable broken SD cache support */
#define MMC_QUIRK_BROKEN_CACHE_FLUSH (1<<16) /* Don't flush cache until the write has occurred */
#define MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY (1<<17) /* Disable broken SD poweroff notify support */
+#define MMC_QUIRK_NO_UHS_DDR50_TUNING (1<<18) /* Disable DDR50 tuning */
bool written_flag; /* Indicates eMMC has been written since power on */
bool reenable_cmdq; /* Re-enable Command Queue */
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 1ed7b0d1e4f9..23ac5696fa38 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -24,7 +24,7 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config);
int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on);
void mmc_gpiod_request_cd_irq(struct mmc_host *host);
-bool mmc_can_gpio_cd(struct mmc_host *host);
-bool mmc_can_gpio_ro(struct mmc_host *host);
+bool mmc_host_can_gpio_cd(struct mmc_host *host);
+bool mmc_host_can_gpio_ro(struct mmc_host *host);
#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6ccec1bf2896..b1c459f7a485 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -148,7 +148,6 @@ enum zone_stat_item {
NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
/* Second 128 byte cacheline */
- NR_BOUNCE,
#if IS_ENABLED(CONFIG_ZSMALLOC)
NR_ZSPAGES, /* allocated in zsmalloc */
#endif
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index bd7e60c0b72f..ebcee9328168 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -601,7 +601,7 @@ struct dmi_system_id {
#define DMI_MATCH(a, b) { .slot = a, .substr = b }
#define DMI_EXACT_MATCH(a, b) { .slot = a, .substr = b, .exact_match = 1 }
-#define PLATFORM_NAME_SIZE 20
+#define PLATFORM_NAME_SIZE 24
#define PLATFORM_MODULE_PREFIX "platform:"
struct platform_device_id {
diff --git a/include/linux/module.h b/include/linux/module.h
index b3329110d668..8050f77c3b64 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -586,6 +586,11 @@ struct module {
atomic_t refcnt;
#endif
+#ifdef CONFIG_MITIGATION_ITS
+ int its_num_pages;
+ void **its_page_array;
+#endif
+
#ifdef CONFIG_CONSTRUCTORS
/* Constructor functions. */
ctor_fn_t *ctors;
diff --git a/include/linux/mount.h b/include/linux/mount.h
index dcc17ce8a959..6904ad33ee7a 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -22,48 +22,51 @@ struct fs_context;
struct file;
struct path;
-#define MNT_NOSUID 0x01
-#define MNT_NODEV 0x02
-#define MNT_NOEXEC 0x04
-#define MNT_NOATIME 0x08
-#define MNT_NODIRATIME 0x10
-#define MNT_RELATIME 0x20
-#define MNT_READONLY 0x40 /* does the user want this to be r/o? */
-#define MNT_NOSYMFOLLOW 0x80
-
-#define MNT_SHRINKABLE 0x100
-#define MNT_WRITE_HOLD 0x200
-
-#define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */
-#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */
-/*
- * MNT_SHARED_MASK is the set of flags that should be cleared when a
- * mount becomes shared. Currently, this is only the flag that says a
- * mount cannot be bind mounted, since this is how we create a mount
- * that shares events with another mount. If you add a new MNT_*
- * flag, consider how it interacts with shared mounts.
- */
-#define MNT_SHARED_MASK (MNT_UNBINDABLE)
-#define MNT_USER_SETTABLE_MASK (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \
- | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \
- | MNT_READONLY | MNT_NOSYMFOLLOW)
-#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
-
-#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
- MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED)
-
-#define MNT_INTERNAL 0x4000
-
-#define MNT_LOCK_ATIME 0x040000
-#define MNT_LOCK_NOEXEC 0x080000
-#define MNT_LOCK_NOSUID 0x100000
-#define MNT_LOCK_NODEV 0x200000
-#define MNT_LOCK_READONLY 0x400000
-#define MNT_LOCKED 0x800000
-#define MNT_DOOMED 0x1000000
-#define MNT_SYNC_UMOUNT 0x2000000
-#define MNT_MARKED 0x4000000
-#define MNT_UMOUNT 0x8000000
+enum mount_flags {
+ MNT_NOSUID = 0x01,
+ MNT_NODEV = 0x02,
+ MNT_NOEXEC = 0x04,
+ MNT_NOATIME = 0x08,
+ MNT_NODIRATIME = 0x10,
+ MNT_RELATIME = 0x20,
+ MNT_READONLY = 0x40, /* does the user want this to be r/o? */
+ MNT_NOSYMFOLLOW = 0x80,
+
+ MNT_SHRINKABLE = 0x100,
+ MNT_WRITE_HOLD = 0x200,
+
+ MNT_SHARED = 0x1000, /* if the vfsmount is a shared mount */
+ MNT_UNBINDABLE = 0x2000, /* if the vfsmount is a unbindable mount */
+
+ MNT_INTERNAL = 0x4000,
+
+ MNT_LOCK_ATIME = 0x040000,
+ MNT_LOCK_NOEXEC = 0x080000,
+ MNT_LOCK_NOSUID = 0x100000,
+ MNT_LOCK_NODEV = 0x200000,
+ MNT_LOCK_READONLY = 0x400000,
+ MNT_LOCKED = 0x800000,
+ MNT_DOOMED = 0x1000000,
+ MNT_SYNC_UMOUNT = 0x2000000,
+ MNT_MARKED = 0x4000000,
+ MNT_UMOUNT = 0x8000000,
+
+ /*
+ * MNT_SHARED_MASK is the set of flags that should be cleared when a
+ * mount becomes shared. Currently, this is only the flag that says a
+ * mount cannot be bind mounted, since this is how we create a mount
+ * that shares events with another mount. If you add a new MNT_*
+ * flag, consider how it interacts with shared mounts.
+ */
+ MNT_SHARED_MASK = MNT_UNBINDABLE,
+ MNT_USER_SETTABLE_MASK = MNT_NOSUID | MNT_NODEV | MNT_NOEXEC
+ | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME
+ | MNT_READONLY | MNT_NOSYMFOLLOW,
+ MNT_ATIME_MASK = MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME,
+
+ MNT_INTERNAL_FLAGS = MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL |
+ MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED,
+};
struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
index 58a2401e4b55..0075f6e5c3da 100644
--- a/include/linux/mroute_base.h
+++ b/include/linux/mroute_base.h
@@ -262,6 +262,11 @@ struct mr_table {
int mroute_reg_vif_num;
};
+static inline bool mr_can_free_table(struct net *net)
+{
+ return !check_net(net) || !net_initialized(net);
+}
+
#ifdef CONFIG_IP_MROUTE_COMMON
void vif_device_init(struct vif_device *v,
struct net_device *dev,
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 86e42742fd0f..6863540f4b71 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -229,8 +229,11 @@ struct msi_dev_domain {
int msi_setup_device_data(struct device *dev);
-void msi_lock_descs(struct device *dev);
-void msi_unlock_descs(struct device *dev);
+void __msi_lock_descs(struct device *dev);
+void __msi_unlock_descs(struct device *dev);
+
+DEFINE_LOCK_GUARD_1(msi_descs_lock, struct device, __msi_lock_descs(_T->lock),
+ __msi_unlock_descs(_T->lock));
struct msi_desc *msi_domain_first_desc(struct device *dev, unsigned int domid,
enum msi_desc_filter filter);
@@ -420,6 +423,7 @@ struct msi_domain_info;
* @msi_init: Domain specific init function for MSI interrupts
* @msi_free: Domain specific function to free a MSI interrupts
* @msi_prepare: Prepare the allocation of the interrupts in the domain
+ * @msi_teardown: Reverse the effects of @msi_prepare
* @prepare_desc: Optional function to prepare the allocated MSI descriptor
* in the domain
* @set_desc: Set the msi descriptor for an interrupt
@@ -435,8 +439,9 @@ struct msi_domain_info;
* @get_hwirq, @msi_init and @msi_free are callbacks used by the underlying
* irqdomain.
*
- * @msi_check, @msi_prepare, @prepare_desc and @set_desc are callbacks used by the
- * msi_domain_alloc/free_irqs*() variants.
+ * @msi_check, @msi_prepare, @msi_teardown, @prepare_desc and
+ * @set_desc are callbacks used by the msi_domain_alloc/free_irqs*()
+ * variants.
*
* @domain_alloc_irqs, @domain_free_irqs can be used to override the
* default allocation/free functions (__msi_domain_alloc/free_irqs). This
@@ -458,6 +463,8 @@ struct msi_domain_ops {
int (*msi_prepare)(struct irq_domain *domain,
struct device *dev, int nvec,
msi_alloc_info_t *arg);
+ void (*msi_teardown)(struct irq_domain *domain,
+ msi_alloc_info_t *arg);
void (*prepare_desc)(struct irq_domain *domain, msi_alloc_info_t *arg,
struct msi_desc *desc);
void (*set_desc)(msi_alloc_info_t *arg,
@@ -486,6 +493,7 @@ struct msi_domain_ops {
* @handler: Optional: associated interrupt flow handler
* @handler_data: Optional: associated interrupt flow handler data
* @handler_name: Optional: associated interrupt flow handler name
+ * @alloc_data: Optional: associated interrupt allocation data
* @data: Optional: domain specific data
*/
struct msi_domain_info {
@@ -498,6 +506,7 @@ struct msi_domain_info {
irq_flow_handler_t handler;
void *handler_data;
const char *handler_name;
+ msi_alloc_info_t *alloc_data;
void *data;
};
@@ -507,12 +516,14 @@ struct msi_domain_info {
* @chip: Interrupt chip for this domain
* @ops: MSI domain ops
* @info: MSI domain info data
+ * @alloc_info: MSI domain allocation data (architecture specific)
*/
struct msi_domain_template {
char name[48];
struct irq_chip chip;
struct msi_domain_ops ops;
struct msi_domain_info info;
+ msi_alloc_info_t alloc_info;
};
/*
@@ -625,6 +636,10 @@ struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
+struct irq_domain_info;
+struct irq_domain *msi_create_parent_irq_domain(struct irq_domain_info *info,
+ const struct msi_parent_ops *msi_parent_ops);
+
bool msi_create_device_irq_domain(struct device *dev, unsigned int domid,
const struct msi_domain_template *template,
unsigned int hwsize, void *domain_data,
diff --git a/include/linux/namei.h b/include/linux/namei.h
index bbaf55fb3101..5d085428e471 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -70,17 +70,16 @@ int vfs_path_parent_lookup(struct filename *filename, unsigned int flags,
int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *,
unsigned int, struct path *);
-extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int);
-extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
-extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
-extern struct dentry *lookup_positive_unlocked(const char *, struct dentry *, int);
-struct dentry *lookup_one(struct mnt_idmap *, const char *, struct dentry *, int);
+extern struct dentry *try_lookup_noperm(struct qstr *, struct dentry *);
+extern struct dentry *lookup_noperm(struct qstr *, struct dentry *);
+extern struct dentry *lookup_noperm_unlocked(struct qstr *, struct dentry *);
+extern struct dentry *lookup_noperm_positive_unlocked(struct qstr *, struct dentry *);
+struct dentry *lookup_one(struct mnt_idmap *, struct qstr *, struct dentry *);
struct dentry *lookup_one_unlocked(struct mnt_idmap *idmap,
- const char *name, struct dentry *base,
- int len);
+ struct qstr *name, struct dentry *base);
struct dentry *lookup_one_positive_unlocked(struct mnt_idmap *idmap,
- const char *name,
- struct dentry *base, int len);
+ struct qstr *name,
+ struct dentry *base);
extern int follow_down_one(struct path *);
extern int follow_down(struct path *path, unsigned int flags);
diff --git a/include/linux/net.h b/include/linux/net.h
index 0ff950eecc6b..ec09620f40f7 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -36,14 +36,13 @@ struct net;
* in sock->flags, but moved into sk->sk_wq->flags to be RCU protected.
* Eventually all flags will be in sk->sk_wq->flags.
*/
-#define SOCKWQ_ASYNC_NOSPACE 0
-#define SOCKWQ_ASYNC_WAITDATA 1
-#define SOCK_NOSPACE 2
-#define SOCK_PASSCRED 3
-#define SOCK_PASSSEC 4
-#define SOCK_SUPPORT_ZC 5
-#define SOCK_CUSTOM_SOCKOPT 6
-#define SOCK_PASSPIDFD 7
+enum socket_flags {
+ SOCKWQ_ASYNC_NOSPACE,
+ SOCKWQ_ASYNC_WAITDATA,
+ SOCK_NOSPACE,
+ SOCK_SUPPORT_ZC,
+ SOCK_CUSTOM_SOCKOPT,
+};
#ifndef ARCH_HAS_SOCKET_TYPES
/**
@@ -70,6 +69,7 @@ enum sock_type {
SOCK_DCCP = 6,
SOCK_PACKET = 10,
};
+#endif /* ARCH_HAS_SOCKET_TYPES */
#define SOCK_MAX (SOCK_PACKET + 1)
/* Mask which covers at least up to SOCK_MASK-1. The
@@ -81,8 +81,7 @@ enum sock_type {
#ifndef SOCK_NONBLOCK
#define SOCK_NONBLOCK O_NONBLOCK
#endif
-
-#endif /* ARCH_HAS_SOCKET_TYPES */
+#define SOCK_COREDUMP O_NOCTTY
/**
* enum sock_shutdown_cmd - Shutdown types
diff --git a/include/linux/net/intel/iidc.h b/include/linux/net/intel/iidc.h
deleted file mode 100644
index 13274c3def66..000000000000
--- a/include/linux/net/intel/iidc.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2021, Intel Corporation. */
-
-#ifndef _IIDC_H_
-#define _IIDC_H_
-
-#include <linux/auxiliary_bus.h>
-#include <linux/dcbnl.h>
-#include <linux/device.h>
-#include <linux/if_ether.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-
-enum iidc_event_type {
- IIDC_EVENT_BEFORE_MTU_CHANGE,
- IIDC_EVENT_AFTER_MTU_CHANGE,
- IIDC_EVENT_BEFORE_TC_CHANGE,
- IIDC_EVENT_AFTER_TC_CHANGE,
- IIDC_EVENT_CRIT_ERR,
- IIDC_EVENT_NBITS /* must be last */
-};
-
-enum iidc_reset_type {
- IIDC_PFR,
- IIDC_CORER,
- IIDC_GLOBR,
-};
-
-enum iidc_rdma_protocol {
- IIDC_RDMA_PROTOCOL_IWARP = BIT(0),
- IIDC_RDMA_PROTOCOL_ROCEV2 = BIT(1),
-};
-
-#define IIDC_MAX_USER_PRIORITY 8
-#define IIDC_MAX_DSCP_MAPPING 64
-#define IIDC_DSCP_PFC_MODE 0x1
-
-/* Struct to hold per RDMA Qset info */
-struct iidc_rdma_qset_params {
- /* Qset TEID returned to the RDMA driver in
- * ice_add_rdma_qset and used by RDMA driver
- * for calls to ice_del_rdma_qset
- */
- u32 teid; /* Qset TEID */
- u16 qs_handle; /* RDMA driver provides this */
- u16 vport_id; /* VSI index */
- u8 tc; /* TC branch the Qset should belong to */
-};
-
-struct iidc_qos_info {
- u64 tc_ctx;
- u8 rel_bw;
- u8 prio_type;
- u8 egress_virt_up;
- u8 ingress_virt_up;
-};
-
-/* Struct to pass QoS info */
-struct iidc_qos_params {
- struct iidc_qos_info tc_info[IEEE_8021QAZ_MAX_TCS];
- u8 up2tc[IIDC_MAX_USER_PRIORITY];
- u8 vport_relative_bw;
- u8 vport_priority_type;
- u8 num_tc;
- u8 pfc_mode;
- u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
-};
-
-struct iidc_event {
- DECLARE_BITMAP(type, IIDC_EVENT_NBITS);
- u32 reg;
-};
-
-struct ice_pf;
-
-int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset);
-int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset);
-int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type);
-int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable);
-void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos);
-int ice_alloc_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry);
-void ice_free_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry);
-
-/* Structure representing auxiliary driver tailored information about the core
- * PCI dev, each auxiliary driver using the IIDC interface will have an
- * instance of this struct dedicated to it.
- */
-
-struct iidc_auxiliary_dev {
- struct auxiliary_device adev;
- struct ice_pf *pf;
-};
-
-/* structure representing the auxiliary driver. This struct is to be
- * allocated and populated by the auxiliary driver's owner. The core PCI
- * driver will access these ops by performing a container_of on the
- * auxiliary_device->dev.driver.
- */
-struct iidc_auxiliary_drv {
- struct auxiliary_driver adrv;
- /* This event_handler is meant to be a blocking call. For instance,
- * when a BEFORE_MTU_CHANGE event comes in, the event_handler will not
- * return until the auxiliary driver is ready for the MTU change to
- * happen.
- */
- void (*event_handler)(struct ice_pf *pf, struct iidc_event *event);
-};
-
-#endif /* _IIDC_H_*/
diff --git a/include/linux/net/intel/iidc_rdma.h b/include/linux/net/intel/iidc_rdma.h
new file mode 100644
index 000000000000..8baad1082042
--- /dev/null
+++ b/include/linux/net/intel/iidc_rdma.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021-2025, Intel Corporation. */
+
+#ifndef _IIDC_RDMA_H_
+#define _IIDC_RDMA_H_
+
+#include <linux/auxiliary_bus.h>
+#include <linux/device.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <net/dscp.h>
+
+enum iidc_rdma_event_type {
+ IIDC_RDMA_EVENT_BEFORE_MTU_CHANGE,
+ IIDC_RDMA_EVENT_AFTER_MTU_CHANGE,
+ IIDC_RDMA_EVENT_BEFORE_TC_CHANGE,
+ IIDC_RDMA_EVENT_AFTER_TC_CHANGE,
+ IIDC_RDMA_EVENT_WARN_RESET,
+ IIDC_RDMA_EVENT_CRIT_ERR,
+ IIDC_RDMA_EVENT_NBITS /* must be last */
+};
+
+struct iidc_rdma_event {
+ DECLARE_BITMAP(type, IIDC_RDMA_EVENT_NBITS);
+ u32 reg;
+};
+
+enum iidc_rdma_reset_type {
+ IIDC_FUNC_RESET,
+ IIDC_DEV_RESET,
+};
+
+enum iidc_rdma_protocol {
+ IIDC_RDMA_PROTOCOL_IWARP = BIT(0),
+ IIDC_RDMA_PROTOCOL_ROCEV2 = BIT(1),
+};
+
+/* Structure to be populated by core LAN PCI driver */
+struct iidc_rdma_core_dev_info {
+ struct pci_dev *pdev; /* PCI device of corresponding to main function */
+ struct auxiliary_device *adev;
+ /* Current active RDMA protocol */
+ enum iidc_rdma_protocol rdma_protocol;
+ void *iidc_priv; /* elements unique to each driver */
+};
+
+/* Structure representing auxiliary driver tailored information about the core
+ * PCI dev, each auxiliary driver using the IIDC interface will have an
+ * instance of this struct dedicated to it.
+ */
+struct iidc_rdma_core_auxiliary_dev {
+ struct auxiliary_device adev;
+ struct iidc_rdma_core_dev_info *cdev_info;
+};
+
+/* structure representing the auxiliary driver. This struct is to be
+ * allocated and populated by the auxiliary driver's owner. The core PCI
+ * driver will access these ops by performing a container_of on the
+ * auxiliary_device->dev.driver.
+ */
+struct iidc_rdma_core_auxiliary_drv {
+ struct auxiliary_driver adrv;
+ void (*event_handler)(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_event *event);
+};
+
+#endif /* _IIDC_RDMA_H_*/
diff --git a/include/linux/net/intel/iidc_rdma_ice.h b/include/linux/net/intel/iidc_rdma_ice.h
new file mode 100644
index 000000000000..b40eed0e13fe
--- /dev/null
+++ b/include/linux/net/intel/iidc_rdma_ice.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021-2025, Intel Corporation. */
+
+#ifndef _IIDC_RDMA_ICE_H_
+#define _IIDC_RDMA_ICE_H_
+
+#include <linux/dcbnl.h>
+
+#define IIDC_MAX_USER_PRIORITY 8
+#define IIDC_DSCP_PFC_MODE 0x1
+
+/**
+ * struct iidc_rdma_qset_params - Struct to hold per RDMA Qset info
+ * @teid: TEID of the Qset node
+ * @qs_handle: SW index of the Qset, RDMA provides this
+ * @vport_id: VSI index
+ * @tc: Traffic Class branch the QSet should belong to
+ */
+struct iidc_rdma_qset_params {
+ /* Qset TEID returned to the RDMA driver in
+ * ice_add_rdma_qset and used by RDMA driver
+ * for calls to ice_del_rdma_qset
+ */
+ u32 teid;
+ u16 qs_handle;
+ u16 vport_id;
+ u8 tc;
+};
+
+struct iidc_rdma_qos_info {
+ u64 tc_ctx;
+ u8 rel_bw;
+ u8 prio_type;
+ u8 egress_virt_up;
+ u8 ingress_virt_up;
+};
+
+/* Struct to pass QoS info */
+struct iidc_rdma_qos_params {
+ struct iidc_rdma_qos_info tc_info[IEEE_8021QAZ_MAX_TCS];
+ u8 up2tc[IIDC_MAX_USER_PRIORITY];
+ u8 vport_relative_bw;
+ u8 vport_priority_type;
+ u8 num_tc;
+ u8 pfc_mode;
+ u8 dscp_map[DSCP_MAX];
+};
+
+struct iidc_rdma_priv_dev_info {
+ u8 pf_id;
+ u16 vport_id;
+ struct net_device *netdev;
+ struct iidc_rdma_qos_params qos_info;
+ u8 __iomem *hw_addr;
+};
+
+int ice_add_rdma_qset(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_qset_params *qset);
+int ice_del_rdma_qset(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_qset_params *qset);
+int ice_rdma_request_reset(struct iidc_rdma_core_dev_info *cdev,
+ enum iidc_rdma_reset_type reset_type);
+int ice_rdma_update_vsi_filter(struct iidc_rdma_core_dev_info *cdev, u16 vsi_id,
+ bool enable);
+int ice_alloc_rdma_qvector(struct iidc_rdma_core_dev_info *cdev,
+ struct msix_entry *entry);
+void ice_free_rdma_qvector(struct iidc_rdma_core_dev_info *cdev,
+ struct msix_entry *entry);
+
+#endif /* _IIDC_RDMA_ICE_H_*/
diff --git a/include/linux/net_tstamp.h b/include/linux/net_tstamp.h
index ff0758e88ea1..f4936d9c2b3c 100644
--- a/include/linux/net_tstamp.h
+++ b/include/linux/net_tstamp.h
@@ -4,6 +4,7 @@
#define _LINUX_NET_TIMESTAMPING_H_
#include <uapi/linux/net_tstamp.h>
+#include <uapi/linux/ethtool_netlink_generated.h>
#define SOF_TIMESTAMPING_SOFTWARE_MASK (SOF_TIMESTAMPING_RX_SOFTWARE | \
SOF_TIMESTAMPING_TX_SOFTWARE | \
@@ -13,12 +14,6 @@
SOF_TIMESTAMPING_TX_HARDWARE | \
SOF_TIMESTAMPING_RAW_HARDWARE)
-enum hwtstamp_source {
- HWTSTAMP_SOURCE_UNSPEC,
- HWTSTAMP_SOURCE_NETDEV,
- HWTSTAMP_SOURCE_PHYLIB,
-};
-
/**
* struct hwtstamp_provider_desc - hwtstamp provider description
*
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7ea022750e4e..adb14db25798 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -688,6 +688,7 @@ struct netdev_queue {
/* Subordinate device that the queue has been assigned to */
struct net_device *sb_dev;
#ifdef CONFIG_XDP_SOCKETS
+ /* "ops protected", see comment about net_device::lock */
struct xsk_buff_pool *pool;
#endif
@@ -1012,9 +1013,13 @@ struct netdev_bpf {
#ifdef CONFIG_XFRM_OFFLOAD
struct xfrmdev_ops {
- int (*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack);
- void (*xdo_dev_state_delete) (struct xfrm_state *x);
- void (*xdo_dev_state_free) (struct xfrm_state *x);
+ int (*xdo_dev_state_add)(struct net_device *dev,
+ struct xfrm_state *x,
+ struct netlink_ext_ack *extack);
+ void (*xdo_dev_state_delete)(struct net_device *dev,
+ struct xfrm_state *x);
+ void (*xdo_dev_state_free)(struct net_device *dev,
+ struct xfrm_state *x);
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
@@ -1771,6 +1776,7 @@ enum netdev_reg_state {
* @lltx: device supports lockless Tx. Deprecated for real HW
* drivers. Mainly used by logical interfaces, such as
* bonding and tunnels
+ * @netmem_tx: device support netmem_tx.
*
* @name: This is the first field of the "visible" part of this structure
* (i.e. as seen by users in the "Space.c" file). It is the name
@@ -1945,13 +1951,11 @@ enum netdev_reg_state {
*
* @reg_state: Register/unregister state machine
* @dismantle: Device is going to be freed
- * @rtnl_link_state: This enum represents the phases of creating
- * a new link
- *
* @needs_free_netdev: Should unregister perform free_netdev?
* @priv_destructor: Called from unregister
* @npinfo: XXX: need comments on this one
* @nd_net: Network namespace this network device is inside
+ * protected by @lock
*
* @ml_priv: Mid-layer private
* @ml_priv_type: Mid-layer private type
@@ -2088,6 +2092,7 @@ struct net_device {
struct_group(priv_flags_fast,
unsigned long priv_flags:32;
unsigned long lltx:1;
+ unsigned long netmem_tx:1;
);
const struct net_device_ops *netdev_ops;
const struct header_ops *header_ops;
@@ -2359,10 +2364,10 @@ struct net_device {
bool dismantle;
- enum {
- RTNL_LINK_INITIALIZED,
- RTNL_LINK_INITIALIZING,
- } rtnl_link_state:16;
+ /** @moving_ns: device is changing netns, protected by @lock */
+ bool moving_ns;
+ /** @rtnl_link_initializing: Device being created, suppress events */
+ bool rtnl_link_initializing;
bool needs_free_netdev;
void (*priv_destructor)(struct net_device *dev);
@@ -2521,7 +2526,7 @@ struct net_device {
* @net_shaper_hierarchy, @reg_state, @threaded
*
* Double protects:
- * @up
+ * @up, @moving_ns, @nd_net, @xdp_features
*
* Double ops protects:
* @real_num_rx_queues, @real_num_tx_queues
@@ -3267,7 +3272,7 @@ int call_netdevice_notifiers_info(unsigned long val,
#define for_each_netdev_continue_rcu(net, d) \
list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_in_bond_rcu(bond, slave) \
- for_each_netdev_rcu(&init_net, slave) \
+ for_each_netdev_rcu(dev_net_rcu(bond), slave) \
if (netdev_master_upper_dev_get_rcu(slave) == (bond))
#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
@@ -3502,7 +3507,12 @@ struct softnet_data {
};
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
-DECLARE_PER_CPU(struct page_pool *, system_page_pool);
+
+struct page_pool_bh {
+ struct page_pool *pool;
+ local_lock_t bh_lock;
+};
+DECLARE_PER_CPU(struct page_pool_bh, system_page_pool);
#ifndef CONFIG_PREEMPT_RT
static inline int dev_recursion_level(void)
@@ -4202,11 +4212,11 @@ int netif_set_mtu(struct net_device *dev, int new_mtu);
int dev_set_mtu(struct net_device *, int);
int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
struct netlink_ext_ack *extack);
-int netif_set_mac_address(struct net_device *dev, struct sockaddr *sa,
+int netif_set_mac_address(struct net_device *dev, struct sockaddr_storage *ss,
struct netlink_ext_ack *extack);
-int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
+int dev_set_mac_address(struct net_device *dev, struct sockaddr_storage *ss,
struct netlink_ext_ack *extack);
-int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
+int dev_set_mac_address_user(struct net_device *dev, struct sockaddr_storage *ss,
struct netlink_ext_ack *extack);
int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
int dev_get_port_parent_id(struct net_device *dev,
@@ -4689,9 +4699,10 @@ static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
/*
* txq->trans_start can be read locklessly from dev_watchdog()
*/
-static inline void txq_trans_update(struct netdev_queue *txq)
+static inline void txq_trans_update(const struct net_device *dev,
+ struct netdev_queue *txq)
{
- if (txq->xmit_lock_owner != -1)
+ if (!dev->lltx)
WRITE_ONCE(txq->trans_start, jiffies);
}
@@ -5212,7 +5223,7 @@ static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_devi
rc = __netdev_start_xmit(ops, skb, dev, more);
if (rc == NETDEV_TX_OK)
- txq_trans_update(txq);
+ txq_trans_update(dev, txq);
return rc;
}
diff --git a/include/linux/netdevice_xmit.h b/include/linux/netdevice_xmit.h
index 38325e070296..813a19122ebb 100644
--- a/include/linux/netdevice_xmit.h
+++ b/include/linux/netdevice_xmit.h
@@ -8,6 +8,12 @@ struct netdev_xmit {
#ifdef CONFIG_NET_EGRESS
u8 skip_txqueue;
#endif
+#if IS_ENABLED(CONFIG_NET_ACT_MIRRED)
+ u8 sched_mirred_nest;
+#endif
+#if IS_ENABLED(CONFIG_NF_DUP_NETDEV)
+ u8 nf_dup_skb_recursion;
+#endif
};
#endif
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 2b8aac2c70ad..5f896fcc074d 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -95,6 +95,9 @@ enum nf_hook_ops_type {
};
struct nf_hook_ops {
+ struct list_head list;
+ struct rcu_head rcu;
+
/* User fills in from here down. */
nf_hookfn *hook;
struct net_device *dev;
@@ -470,6 +473,7 @@ struct nf_ct_hook {
void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb);
void (*set_closing)(struct nf_conntrack *nfct);
int (*confirm)(struct sk_buff *skb);
+ u32 (*get_id)(const struct nf_conntrack *nfct);
};
extern const struct nf_ct_hook __rcu *nf_ct_hook;
@@ -498,17 +502,6 @@ extern const struct nf_defrag_hook __rcu *nf_defrag_v4_hook;
extern const struct nf_defrag_hook __rcu *nf_defrag_v6_hook;
/*
- * nf_skb_duplicated - TEE target has sent a packet
- *
- * When a xtables target sends a packet, the OUTPUT and POSTROUTING
- * hooks are traversed again, i.e. nft and xtables are invoked recursively.
- *
- * This is used by xtables TEE target to prevent the duplicated skb from
- * being duplicated again.
- */
-DECLARE_PER_CPU(bool, nf_skb_duplicated);
-
-/*
* Contains bitmask of ctnetlink event subscribers, if any.
* Can't be pernet due to NETLINK_LISTEN_ALL_NSID setsockopt flag.
*/
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index c3ae84a77e16..882e9c1b6c1d 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -63,7 +63,7 @@ netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg)
}
/* this can be increased when necessary - don't expose to userland */
-#define NETLINK_MAX_COOKIE_LEN 20
+#define NETLINK_MAX_COOKIE_LEN 8
#define NETLINK_MAX_FMTMSG_LEN 80
/**
@@ -212,6 +212,7 @@ static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack,
{
if (!extack)
return;
+ BUILD_BUG_ON(sizeof(extack->cookie) < sizeof(cookie));
memcpy(extack->cookie, &cookie, sizeof(cookie));
extack->cookie_len = sizeof(cookie);
}
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 71319637a84e..ee03f3cef30c 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -213,6 +213,15 @@ struct nfs_server {
char *fscache_uniq; /* Uniquifier (or NULL) */
#endif
+ /* The following #defines numerically match the NFSv4 equivalents */
+#define NFS_FH_NOEXPIRE_WITH_OPEN (0x1)
+#define NFS_FH_VOLATILE_ANY (0x2)
+#define NFS_FH_VOL_MIGRATION (0x4)
+#define NFS_FH_VOL_RENAME (0x8)
+#define NFS_FH_RENAME_UNSAFE (NFS_FH_VOLATILE_ANY | NFS_FH_VOL_RENAME)
+ u32 fh_expire_type; /* V4 bitmask representing file
+ handle volatility type for
+ this filesystem */
u32 pnfs_blksize; /* layout_blksize attr */
#if IS_ENABLED(CONFIG_NFS_V4)
u32 attr_bitmask[3];/* V4 bitmask representing the set
@@ -236,9 +245,6 @@ struct nfs_server {
u32 acl_bitmask; /* V4 bitmask representing the ACEs
that are supported on this
filesystem */
- u32 fh_expire_type; /* V4 bitmask representing file
- handle volatility type for
- this filesystem */
struct pnfs_layoutdriver_type *pnfs_curr_ld; /* Active layout driver */
struct rpc_wait_queue roc_rpcwaitq;
void *pnfs_ld_data; /* per mount point data */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 2479ed10f53e..51308f65b72f 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -303,6 +303,7 @@ enum nvme_ctrl_attr {
NVME_CTRL_ATTR_TBKAS = (1 << 6),
NVME_CTRL_ATTR_ELBAS = (1 << 15),
NVME_CTRL_ATTR_RHII = (1 << 18),
+ NVME_CTRL_ATTR_FDPS = (1 << 19),
};
struct nvme_id_ctrl {
@@ -689,6 +690,44 @@ struct nvme_rotational_media_log {
__u8 rsvd24[488];
};
+struct nvme_fdp_config {
+ __u8 flags;
+#define FDPCFG_FDPE (1U << 0)
+ __u8 fdpcidx;
+ __le16 reserved;
+};
+
+struct nvme_fdp_ruh_desc {
+ __u8 ruht;
+ __u8 reserved[3];
+};
+
+struct nvme_fdp_config_desc {
+ __le16 dsze;
+ __u8 fdpa;
+ __u8 vss;
+ __le32 nrg;
+ __le16 nruh;
+ __le16 maxpids;
+ __le32 nns;
+ __le64 runs;
+ __le32 erutl;
+ __u8 rsvd28[36];
+ struct nvme_fdp_ruh_desc ruhs[];
+};
+
+struct nvme_fdp_config_log {
+ __le16 numfdpc;
+ __u8 ver;
+ __u8 rsvd3;
+ __le32 sze;
+ __u8 rsvd8[8];
+ /*
+ * This is followed by variable number of nvme_fdp_config_desc
+ * structures, but sparse doesn't like nested variable sized arrays.
+ */
+};
+
struct nvme_smart_log {
__u8 critical_warning;
__u8 temperature[2];
@@ -915,6 +954,7 @@ enum nvme_opcode {
nvme_cmd_resv_register = 0x0d,
nvme_cmd_resv_report = 0x0e,
nvme_cmd_resv_acquire = 0x11,
+ nvme_cmd_io_mgmt_recv = 0x12,
nvme_cmd_resv_release = 0x15,
nvme_cmd_zone_mgmt_send = 0x79,
nvme_cmd_zone_mgmt_recv = 0x7a,
@@ -936,6 +976,7 @@ enum nvme_opcode {
nvme_opcode_name(nvme_cmd_resv_register), \
nvme_opcode_name(nvme_cmd_resv_report), \
nvme_opcode_name(nvme_cmd_resv_acquire), \
+ nvme_opcode_name(nvme_cmd_io_mgmt_recv), \
nvme_opcode_name(nvme_cmd_resv_release), \
nvme_opcode_name(nvme_cmd_zone_mgmt_send), \
nvme_opcode_name(nvme_cmd_zone_mgmt_recv), \
@@ -1087,6 +1128,7 @@ enum {
NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
NVME_RW_PRINFO_PRACT = 1 << 13,
NVME_RW_DTYPE_STREAMS = 1 << 4,
+ NVME_RW_DTYPE_DPLCMT = 2 << 4,
NVME_WZ_DEAC = 1 << 9,
};
@@ -1174,6 +1216,38 @@ struct nvme_zone_mgmt_recv_cmd {
__le32 cdw14[2];
};
+struct nvme_io_mgmt_recv_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __le64 rsvd2[2];
+ union nvme_data_ptr dptr;
+ __u8 mo;
+ __u8 rsvd11;
+ __u16 mos;
+ __le32 numd;
+ __le32 cdw12[4];
+};
+
+enum {
+ NVME_IO_MGMT_RECV_MO_RUHS = 1,
+};
+
+struct nvme_fdp_ruh_status_desc {
+ __le16 pid;
+ __le16 ruhid;
+ __le32 earutr;
+ __le64 ruamw;
+ __u8 reserved[16];
+};
+
+struct nvme_fdp_ruh_status {
+ __u8 rsvd0[14];
+ __le16 nruhsd;
+ struct nvme_fdp_ruh_status_desc ruhsd[];
+};
+
enum {
NVME_ZRA_ZONE_REPORT = 0,
NVME_ZRASF_ZONE_REPORT_ALL = 0,
@@ -1309,6 +1383,7 @@ enum {
NVME_FEAT_PLM_WINDOW = 0x14,
NVME_FEAT_HOST_BEHAVIOR = 0x16,
NVME_FEAT_SANITIZE = 0x17,
+ NVME_FEAT_FDP = 0x1d,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
NVME_FEAT_RESV_MASK = 0x82,
@@ -1329,6 +1404,7 @@ enum {
NVME_LOG_ANA = 0x0c,
NVME_LOG_FEATURES = 0x12,
NVME_LOG_RMI = 0x16,
+ NVME_LOG_FDP_CONFIGS = 0x20,
NVME_LOG_DISC = 0x70,
NVME_LOG_RESERVATION = 0x80,
NVME_FWACT_REPL = (0 << 3),
@@ -1923,6 +1999,7 @@ struct nvme_command {
struct nvmf_auth_receive_command auth_receive;
struct nvme_dbbuf dbbuf;
struct nvme_directive_cmd directive;
+ struct nvme_io_mgmt_recv_cmd imr;
};
};
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index e338282da652..f573423359f4 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -7,6 +7,7 @@
struct of_phandle_args;
struct reserved_mem_ops;
+struct resource;
struct reserved_mem {
const char *name;
@@ -39,6 +40,12 @@ int of_reserved_mem_device_init_by_name(struct device *dev,
void of_reserved_mem_device_release(struct device *dev);
struct reserved_mem *of_reserved_mem_lookup(struct device_node *np);
+int of_reserved_mem_region_to_resource(const struct device_node *np,
+ unsigned int idx, struct resource *res);
+int of_reserved_mem_region_to_resource_byname(const struct device_node *np,
+ const char *name, struct resource *res);
+int of_reserved_mem_region_count(const struct device_node *np);
+
#else
#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
@@ -63,6 +70,25 @@ static inline struct reserved_mem *of_reserved_mem_lookup(struct device_node *np
{
return NULL;
}
+
+static inline int of_reserved_mem_region_to_resource(const struct device_node *np,
+ unsigned int idx,
+ struct resource *res)
+{
+ return -ENOSYS;
+}
+
+static inline int of_reserved_mem_region_to_resource_byname(const struct device_node *np,
+ const char *name,
+ struct resource *res)
+{
+ return -ENOSYS;
+}
+
+static inline int of_reserved_mem_region_count(const struct device_node *np)
+{
+ return 0;
+}
#endif
/**
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 0c7e3dcfe867..7b7be27ca113 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -396,7 +396,7 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
* @name: Name for a variable to define.
* @member: Name of the array member.
* @count: Number of elements in the array; must be compile-time const.
- * @initializer: initializer expression (could be empty for no init).
+ * @initializer: Initializer expression (e.g., pass `= { }` at minimum).
*/
#define _DEFINE_FLEX(type, name, member, count, initializer...) \
_Static_assert(__builtin_constant_p(count), \
@@ -404,7 +404,7 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
union { \
u8 bytes[struct_size_t(type, member, count)]; \
type obj; \
- } name##_u initializer; \
+ } name##_u = { .obj initializer }; \
type *name = (type *)&name##_u
/**
@@ -419,6 +419,9 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
* Define a zeroed, on-stack, instance of @type structure with a trailing
* flexible array member.
* Use __struct_size(@name) to get compile-time size of it afterwards.
+ * Use __member_size(@name->member) to get compile-time size of @name members.
+ * Use STACK_FLEX_ARRAY_SIZE(@name, @member) to get compile-time number of
+ * elements in array @member.
*/
#define DEFINE_RAW_FLEX(type, name, member, count) \
_DEFINE_FLEX(type, name, member, count, = {})
@@ -436,8 +439,22 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
* Define a zeroed, on-stack, instance of @TYPE structure with a trailing
* flexible array member.
* Use __struct_size(@NAME) to get compile-time size of it afterwards.
+ * Use __member_size(@NAME->member) to get compile-time size of @NAME members.
+ * Use STACK_FLEX_ARRAY_SIZE(@name, @member) to get compile-time number of
+ * elements in array @member.
*/
#define DEFINE_FLEX(TYPE, NAME, MEMBER, COUNTER, COUNT) \
- _DEFINE_FLEX(TYPE, NAME, MEMBER, COUNT, = { .obj.COUNTER = COUNT, })
+ _DEFINE_FLEX(TYPE, NAME, MEMBER, COUNT, = { .COUNTER = COUNT, })
+
+/**
+ * STACK_FLEX_ARRAY_SIZE() - helper macro for DEFINE_FLEX() family.
+ * Returns the number of elements in @array.
+ *
+ * @name: Name for a variable defined in DEFINE_RAW_FLEX()/DEFINE_FLEX().
+ * @array: Name of the array member.
+ */
+#define STACK_FLEX_ARRAY_SIZE(name, array) \
+ (__member_size((name)->array) / sizeof(*(name)->array) + \
+ __must_be_array((name)->array))
#endif /* __LINUX_OVERFLOW_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e6a21b62dcce..3b814ce08331 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -615,6 +615,13 @@ FOLIO_FLAG(dropbehind, FOLIO_HEAD_PAGE)
PAGEFLAG_FALSE(HighMem, highmem)
#endif
+/* Does kmap_local_folio() only allow access to one page of the folio? */
+#ifdef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
+#define folio_test_partial_kmap(f) true
+#else
+#define folio_test_partial_kmap(f) folio_test_highmem(f)
+#endif
+
#ifdef CONFIG_SWAP
static __always_inline bool folio_test_swapcache(const struct folio *folio)
{
diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
index 6722941c7cb8..289620d4aad3 100644
--- a/include/linux/page_table_check.h
+++ b/include/linux/page_table_check.h
@@ -19,8 +19,10 @@ void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd);
void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud);
void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
unsigned int nr);
-void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd);
-void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud);
+void __page_table_check_pmds_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd,
+ unsigned int nr);
+void __page_table_check_puds_set(struct mm_struct *mm, pud_t *pudp, pud_t pud,
+ unsigned int nr);
void __page_table_check_pte_clear_range(struct mm_struct *mm,
unsigned long addr,
pmd_t pmd);
@@ -74,22 +76,22 @@ static inline void page_table_check_ptes_set(struct mm_struct *mm,
__page_table_check_ptes_set(mm, ptep, pte, nr);
}
-static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp,
- pmd_t pmd)
+static inline void page_table_check_pmds_set(struct mm_struct *mm,
+ pmd_t *pmdp, pmd_t pmd, unsigned int nr)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pmd_set(mm, pmdp, pmd);
+ __page_table_check_pmds_set(mm, pmdp, pmd, nr);
}
-static inline void page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp,
- pud_t pud)
+static inline void page_table_check_puds_set(struct mm_struct *mm,
+ pud_t *pudp, pud_t pud, unsigned int nr)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pud_set(mm, pudp, pud);
+ __page_table_check_puds_set(mm, pudp, pud, nr);
}
static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
@@ -129,13 +131,13 @@ static inline void page_table_check_ptes_set(struct mm_struct *mm,
{
}
-static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp,
- pmd_t pmd)
+static inline void page_table_check_pmds_set(struct mm_struct *mm,
+ pmd_t *pmdp, pmd_t pmd, unsigned int nr)
{
}
-static inline void page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp,
- pud_t pud)
+static inline void page_table_check_puds_set(struct mm_struct *mm,
+ pud_t *pudp, pud_t pud, unsigned int nr)
{
}
@@ -146,4 +148,8 @@ static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
}
#endif /* CONFIG_PAGE_TABLE_CHECK */
+
+#define page_table_check_pmd_set(mm, pmdp, pmd) page_table_check_pmds_set(mm, pmdp, pmd, 1)
+#define page_table_check_pud_set(mm, pudp, pud) page_table_check_puds_set(mm, pudp, pud, 1)
+
#endif /* __LINUX_PAGE_TABLE_CHECK_H */
diff --git a/include/linux/panic.h b/include/linux/panic.h
index 2494d51707ef..4adc65766935 100644
--- a/include/linux/panic.h
+++ b/include/linux/panic.h
@@ -20,8 +20,6 @@ extern bool panic_triggering_all_cpu_backtrace;
extern int panic_timeout;
extern unsigned long panic_print;
extern int panic_on_oops;
-extern int panic_on_unrecovered_nmi;
-extern int panic_on_io_nmi;
extern int panic_on_warn;
extern unsigned long panic_on_taint;
diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h
index c5e9cac0575e..eeeff2a04529 100644
--- a/include/linux/part_stat.h
+++ b/include/linux/part_stat.h
@@ -79,4 +79,6 @@ static inline void part_stat_set_all(struct block_device *part, int value)
#define part_stat_local_read_cpu(part, field, cpu) \
local_read(&(part_stat_get_cpu(part, field, cpu)))
+unsigned int bdev_count_inflight(struct block_device *part);
+
#endif /* _LINUX_PART_STAT_H */
diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h
index 2c07aa6b7665..075c20b161d9 100644
--- a/include/linux/pci-p2pdma.h
+++ b/include/linux/pci-p2pdma.h
@@ -104,4 +104,89 @@ static inline struct pci_dev *pci_p2pmem_find(struct device *client)
return pci_p2pmem_find_many(&client, 1);
}
+enum pci_p2pdma_map_type {
+ /*
+ * PCI_P2PDMA_MAP_UNKNOWN: Used internally as an initial state before
+ * the mapping type has been calculated. Exported routines for the API
+ * will never return this value.
+ */
+ PCI_P2PDMA_MAP_UNKNOWN = 0,
+
+ /*
+ * Not a PCI P2PDMA transfer.
+ */
+ PCI_P2PDMA_MAP_NONE,
+
+ /*
+ * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will
+ * traverse the host bridge and the host bridge is not in the
+ * allowlist. DMA Mapping routines should return an error when
+ * this is returned.
+ */
+ PCI_P2PDMA_MAP_NOT_SUPPORTED,
+
+ /*
+ * PCI_P2PDMA_MAP_BUS_ADDR: Indicates that two devices can talk to
+ * each other directly through a PCI switch and the transaction will
+ * not traverse the host bridge. Such a mapping should program
+ * the DMA engine with PCI bus addresses.
+ */
+ PCI_P2PDMA_MAP_BUS_ADDR,
+
+ /*
+ * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk
+ * to each other, but the transaction traverses a host bridge on the
+ * allowlist. In this case, a normal mapping either with CPU physical
+ * addresses (in the case of dma-direct) or IOVA addresses (in the
+ * case of IOMMUs) should be used to program the DMA engine.
+ */
+ PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
+};
+
+struct pci_p2pdma_map_state {
+ struct dev_pagemap *pgmap;
+ enum pci_p2pdma_map_type map;
+ u64 bus_off;
+};
+
+/* helper for pci_p2pdma_state(), do not use directly */
+void __pci_p2pdma_update_state(struct pci_p2pdma_map_state *state,
+ struct device *dev, struct page *page);
+
+/**
+ * pci_p2pdma_state - check the P2P transfer state of a page
+ * @state: P2P state structure
+ * @dev: device to transfer to/from
+ * @page: page to map
+ *
+ * Check if @page is a PCI P2PDMA page, and if yes of what kind. Returns the
+ * map type, and updates @state with all information needed for a P2P transfer.
+ */
+static inline enum pci_p2pdma_map_type
+pci_p2pdma_state(struct pci_p2pdma_map_state *state, struct device *dev,
+ struct page *page)
+{
+ if (IS_ENABLED(CONFIG_PCI_P2PDMA) && is_pci_p2pdma_page(page)) {
+ if (state->pgmap != page_pgmap(page))
+ __pci_p2pdma_update_state(state, dev, page);
+ return state->map;
+ }
+ return PCI_P2PDMA_MAP_NONE;
+}
+
+/**
+ * pci_p2pdma_bus_addr_map - Translate a physical address to a bus address
+ * for a PCI_P2PDMA_MAP_BUS_ADDR transfer.
+ * @state: P2P state structure
+ * @paddr: physical address to map
+ *
+ * Map a physically contiguous PCI_P2PDMA_MAP_BUS_ADDR transfer.
+ */
+static inline dma_addr_t
+pci_p2pdma_bus_addr_map(struct pci_p2pdma_map_state *state, phys_addr_t paddr)
+{
+ WARN_ON_ONCE(state->map != PCI_P2PDMA_MAP_BUS_ADDR);
+ return paddr + state->bus_off;
+}
+
#endif /* _LINUX_PCI_P2P_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 51e2bd6405cd..b231cbc67a35 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1671,7 +1671,7 @@ void pci_disable_msi(struct pci_dev *dev);
int pci_msix_vec_count(struct pci_dev *dev);
void pci_disable_msix(struct pci_dev *dev);
void pci_restore_msi_state(struct pci_dev *dev);
-int pci_msi_enabled(void);
+bool pci_msi_enabled(void);
int pci_enable_msi(struct pci_dev *dev);
int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
int minvec, int maxvec);
@@ -1704,7 +1704,7 @@ static inline void pci_disable_msi(struct pci_dev *dev) { }
static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
static inline void pci_disable_msix(struct pci_dev *dev) { }
static inline void pci_restore_msi_state(struct pci_dev *dev) { }
-static inline int pci_msi_enabled(void) { return 0; }
+static inline bool pci_msi_enabled(void) { return false; }
static inline int pci_enable_msi(struct pci_dev *dev)
{ return -ENOSYS; }
static inline int pci_enable_msix_range(struct pci_dev *dev,
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 2e28182c3af0..e2d71b6fdd84 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -3049,6 +3049,7 @@
#define PCI_DEVICE_ID_INTEL_HDA_DG1 0x490d
#define PCI_DEVICE_ID_INTEL_HDA_EHL_0 0x4b55
#define PCI_DEVICE_ID_INTEL_HDA_EHL_3 0x4b58
+#define PCI_DEVICE_ID_INTEL_HDA_WCL 0x4d28
#define PCI_DEVICE_ID_INTEL_HDA_JSL_N 0x4dc8
#define PCI_DEVICE_ID_INTEL_HDA_DG2_0 0x4f90
#define PCI_DEVICE_ID_INTEL_HDA_DG2_1 0x4f91
@@ -3070,6 +3071,7 @@
#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6
#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff
+#define PCI_DEVICE_ID_INTEL_HDA_FCL 0x67a8
#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
diff --git a/include/linux/pds/pds_adminq.h b/include/linux/pds/pds_adminq.h
index ddd111f04ca0..40ff0ec2b879 100644
--- a/include/linux/pds/pds_adminq.h
+++ b/include/linux/pds/pds_adminq.h
@@ -4,7 +4,7 @@
#ifndef _PDS_CORE_ADMINQ_H_
#define _PDS_CORE_ADMINQ_H_
-#define PDSC_ADMINQ_MAX_POLL_INTERVAL 256
+#define PDSC_ADMINQ_MAX_POLL_INTERVAL 256000 /* usecs */
enum pds_core_adminq_flags {
PDS_AQ_FLAG_FASTPOLL = BIT(1), /* completion poll at 1ms */
@@ -463,7 +463,6 @@ struct pds_core_lif_getattr_cmd {
* @rsvd: Word boundary padding
* @comp_index: Index in the descriptor ring for which this is the completion
* @state: LIF state (enum pds_core_lif_state)
- * @name: LIF name string, 0 terminated
* @features: Features (enum pds_core_hw_features)
* @rsvd2: Word boundary padding
* @color: Color bit
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index af7d75ede619..288f5235649a 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -43,9 +43,10 @@ is_static struct percpu_rw_semaphore name = { \
#define DEFINE_STATIC_PERCPU_RWSEM(name) \
__DEFINE_PERCPU_RWSEM(name, static)
-extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool);
+extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool, bool);
-static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+static inline void percpu_down_read_internal(struct percpu_rw_semaphore *sem,
+ bool freezable)
{
might_sleep();
@@ -63,7 +64,7 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
if (likely(rcu_sync_is_idle(&sem->rss)))
this_cpu_inc(*sem->read_count);
else
- __percpu_down_read(sem, false); /* Unconditional memory barrier */
+ __percpu_down_read(sem, false, freezable); /* Unconditional memory barrier */
/*
* The preempt_enable() prevents the compiler from
* bleeding the critical section out.
@@ -71,6 +72,17 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
preempt_enable();
}
+static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+{
+ percpu_down_read_internal(sem, false);
+}
+
+static inline void percpu_down_read_freezable(struct percpu_rw_semaphore *sem,
+ bool freeze)
+{
+ percpu_down_read_internal(sem, freeze);
+}
+
static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
{
bool ret = true;
@@ -82,7 +94,7 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
if (likely(rcu_sync_is_idle(&sem->rss)))
this_cpu_inc(*sem->read_count);
else
- ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
+ ret = __percpu_down_read(sem, true, false); /* Unconditional memory barrier */
preempt_enable();
/*
* The barrier() from preempt_enable() prevents the compiler from
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 52b5ea663b9f..85bf8dd9f087 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -15,11 +15,7 @@
/* enough to cover all DEFINE_PER_CPUs in modules */
#ifdef CONFIG_MODULES
-#ifdef CONFIG_MEM_ALLOC_PROFILING
-#define PERCPU_MODULE_RESERVE (8 << 13)
-#else
#define PERCPU_MODULE_RESERVE (8 << 10)
-#endif
#else
#define PERCPU_MODULE_RESERVE 0
#endif
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 0069ba6866a4..52dc7cfab0e0 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -26,18 +26,9 @@
# include <asm/local64.h>
#endif
-#define PERF_GUEST_ACTIVE 0x01
-#define PERF_GUEST_USER 0x02
-
-struct perf_guest_info_callbacks {
- unsigned int (*state)(void);
- unsigned long (*get_ip)(void);
- unsigned int (*handle_intel_pt_intr)(void);
-};
-
#ifdef CONFIG_HAVE_HW_BREAKPOINT
-#include <linux/rhashtable-types.h>
-#include <asm/hw_breakpoint.h>
+# include <linux/rhashtable-types.h>
+# include <asm/hw_breakpoint.h>
#endif
#include <linux/list.h>
@@ -62,19 +53,20 @@ struct perf_guest_info_callbacks {
#include <linux/security.h>
#include <linux/static_call.h>
#include <linux/lockdep.h>
+
#include <asm/local.h>
struct perf_callchain_entry {
- __u64 nr;
- __u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */
+ u64 nr;
+ u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */
};
struct perf_callchain_entry_ctx {
- struct perf_callchain_entry *entry;
- u32 max_stack;
- u32 nr;
- short contexts;
- bool contexts_maxed;
+ struct perf_callchain_entry *entry;
+ u32 max_stack;
+ u32 nr;
+ short contexts;
+ bool contexts_maxed;
};
typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
@@ -121,8 +113,8 @@ static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
* already stored in age order, the hw_idx should be 0.
*/
struct perf_branch_stack {
- __u64 nr;
- __u64 hw_idx;
+ u64 nr;
+ u64 hw_idx;
struct perf_branch_entry entries[];
};
@@ -132,10 +124,10 @@ struct task_struct;
* extra PMU register associated with an event
*/
struct hw_perf_event_extra {
- u64 config; /* register value */
- unsigned int reg; /* register address or index */
- int alloc; /* extra register already allocated */
- int idx; /* index in shared_regs->regs[] */
+ u64 config; /* register value */
+ unsigned int reg; /* register address or index */
+ int alloc; /* extra register already allocated */
+ int idx; /* index in shared_regs->regs[] */
};
/**
@@ -144,8 +136,8 @@ struct hw_perf_event_extra {
* PERF_EVENT_FLAG_ARCH bits are reserved for architecture-specific
* usage.
*/
-#define PERF_EVENT_FLAG_ARCH 0x000fffff
-#define PERF_EVENT_FLAG_USER_READ_CNT 0x80000000
+#define PERF_EVENT_FLAG_ARCH 0x0fffffff
+#define PERF_EVENT_FLAG_USER_READ_CNT 0x80000000
static_assert((PERF_EVENT_FLAG_USER_READ_CNT & PERF_EVENT_FLAG_ARCH) == 0);
@@ -157,7 +149,9 @@ struct hw_perf_event {
union {
struct { /* hardware */
u64 config;
+ u64 config1;
u64 last_tag;
+ u64 dyn_constraint;
unsigned long config_base;
unsigned long event_base;
int event_base_rdpmc;
@@ -225,9 +219,14 @@ struct hw_perf_event {
/*
* hw_perf_event::state flags; used to track the PERF_EF_* state.
*/
-#define PERF_HES_STOPPED 0x01 /* the counter is stopped */
-#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
-#define PERF_HES_ARCH 0x04
+
+/* the counter is stopped */
+#define PERF_HES_STOPPED 0x01
+
+/* event->count up-to-date */
+#define PERF_HES_UPTODATE 0x02
+
+#define PERF_HES_ARCH 0x04
int state;
@@ -276,7 +275,7 @@ struct hw_perf_event {
*/
u64 freq_time_stamp;
u64 freq_count_stamp;
-#endif
+#endif /* CONFIG_PERF_EVENTS */
};
struct perf_event;
@@ -285,28 +284,33 @@ struct perf_event_pmu_context;
/*
* Common implementation detail of pmu::{start,commit,cancel}_txn
*/
-#define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */
-#define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */
+
+/* txn to add/schedule event on PMU */
+#define PERF_PMU_TXN_ADD 0x1
+
+/* txn to read event group from PMU */
+#define PERF_PMU_TXN_READ 0x2
/**
* pmu::capabilities flags
*/
-#define PERF_PMU_CAP_NO_INTERRUPT 0x0001
-#define PERF_PMU_CAP_NO_NMI 0x0002
-#define PERF_PMU_CAP_AUX_NO_SG 0x0004
-#define PERF_PMU_CAP_EXTENDED_REGS 0x0008
-#define PERF_PMU_CAP_EXCLUSIVE 0x0010
-#define PERF_PMU_CAP_ITRACE 0x0020
-#define PERF_PMU_CAP_NO_EXCLUDE 0x0040
-#define PERF_PMU_CAP_AUX_OUTPUT 0x0080
-#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
-#define PERF_PMU_CAP_AUX_PAUSE 0x0200
+#define PERF_PMU_CAP_NO_INTERRUPT 0x0001
+#define PERF_PMU_CAP_NO_NMI 0x0002
+#define PERF_PMU_CAP_AUX_NO_SG 0x0004
+#define PERF_PMU_CAP_EXTENDED_REGS 0x0008
+#define PERF_PMU_CAP_EXCLUSIVE 0x0010
+#define PERF_PMU_CAP_ITRACE 0x0020
+#define PERF_PMU_CAP_NO_EXCLUDE 0x0040
+#define PERF_PMU_CAP_AUX_OUTPUT 0x0080
+#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
+#define PERF_PMU_CAP_AUX_PAUSE 0x0200
+#define PERF_PMU_CAP_AUX_PREFER_LARGE 0x0400
/**
* pmu::scope
*/
enum perf_pmu_scope {
- PERF_PMU_SCOPE_NONE = 0,
+ PERF_PMU_SCOPE_NONE = 0,
PERF_PMU_SCOPE_CORE,
PERF_PMU_SCOPE_DIE,
PERF_PMU_SCOPE_CLUSTER,
@@ -325,6 +329,9 @@ struct perf_output_handle;
struct pmu {
struct list_head entry;
+ spinlock_t events_lock;
+ struct list_head events;
+
struct module *module;
struct device *dev;
struct device *parent;
@@ -387,11 +394,21 @@ struct pmu {
* Flags for ->add()/->del()/ ->start()/->stop(). There are
* matching hw_perf_event::state flags.
*/
-#define PERF_EF_START 0x01 /* start the counter when adding */
-#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
-#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
-#define PERF_EF_PAUSE 0x08 /* AUX area event, pause tracing */
-#define PERF_EF_RESUME 0x10 /* AUX area event, resume tracing */
+
+/* start the counter when adding */
+#define PERF_EF_START 0x01
+
+/* reload the counter when starting */
+#define PERF_EF_RELOAD 0x02
+
+/* update the counter when stopping */
+#define PERF_EF_UPDATE 0x04
+
+/* AUX area event, pause tracing */
+#define PERF_EF_PAUSE 0x08
+
+/* AUX area event, resume tracing */
+#define PERF_EF_RESUME 0x10
/*
* Adds/Removes a counter to/from the PMU, can be done inside a
@@ -590,10 +607,10 @@ enum perf_addr_filter_action_t {
* This is a hardware-agnostic filter configuration as specified by the user.
*/
struct perf_addr_filter {
- struct list_head entry;
- struct path path;
- unsigned long offset;
- unsigned long size;
+ struct list_head entry;
+ struct path path;
+ unsigned long offset;
+ unsigned long size;
enum perf_addr_filter_action_t action;
};
@@ -608,23 +625,24 @@ struct perf_addr_filter {
* bundled together; see perf_event_addr_filters().
*/
struct perf_addr_filters_head {
- struct list_head list;
- raw_spinlock_t lock;
- unsigned int nr_file_filters;
+ struct list_head list;
+ raw_spinlock_t lock;
+ unsigned int nr_file_filters;
};
struct perf_addr_filter_range {
- unsigned long start;
- unsigned long size;
+ unsigned long start;
+ unsigned long size;
};
/**
* enum perf_event_state - the states of an event:
*/
enum perf_event_state {
- PERF_EVENT_STATE_DEAD = -4,
- PERF_EVENT_STATE_EXIT = -3,
- PERF_EVENT_STATE_ERROR = -2,
+ PERF_EVENT_STATE_DEAD = -5,
+ PERF_EVENT_STATE_REVOKED = -4, /* pmu gone, must not touch */
+ PERF_EVENT_STATE_EXIT = -3, /* task died, still inherit */
+ PERF_EVENT_STATE_ERROR = -2, /* scheduling error, can enable */
PERF_EVENT_STATE_OFF = -1,
PERF_EVENT_STATE_INACTIVE = 0,
PERF_EVENT_STATE_ACTIVE = 1,
@@ -662,24 +680,24 @@ struct swevent_hlist {
struct rcu_head rcu_head;
};
-#define PERF_ATTACH_CONTEXT 0x0001
-#define PERF_ATTACH_GROUP 0x0002
-#define PERF_ATTACH_TASK 0x0004
-#define PERF_ATTACH_TASK_DATA 0x0008
-#define PERF_ATTACH_GLOBAL_DATA 0x0010
-#define PERF_ATTACH_SCHED_CB 0x0020
-#define PERF_ATTACH_CHILD 0x0040
-#define PERF_ATTACH_EXCLUSIVE 0x0080
-#define PERF_ATTACH_CALLCHAIN 0x0100
-#define PERF_ATTACH_ITRACE 0x0200
+#define PERF_ATTACH_CONTEXT 0x0001
+#define PERF_ATTACH_GROUP 0x0002
+#define PERF_ATTACH_TASK 0x0004
+#define PERF_ATTACH_TASK_DATA 0x0008
+#define PERF_ATTACH_GLOBAL_DATA 0x0010
+#define PERF_ATTACH_SCHED_CB 0x0020
+#define PERF_ATTACH_CHILD 0x0040
+#define PERF_ATTACH_EXCLUSIVE 0x0080
+#define PERF_ATTACH_CALLCHAIN 0x0100
+#define PERF_ATTACH_ITRACE 0x0200
struct bpf_prog;
struct perf_cgroup;
struct perf_buffer;
struct pmu_event_list {
- raw_spinlock_t lock;
- struct list_head list;
+ raw_spinlock_t lock;
+ struct list_head list;
};
/*
@@ -689,12 +707,12 @@ struct pmu_event_list {
* disabled is sufficient since it will hold-off the IPIs.
*/
#ifdef CONFIG_PROVE_LOCKING
-#define lockdep_assert_event_ctx(event) \
+# define lockdep_assert_event_ctx(event) \
WARN_ON_ONCE(__lockdep_enabled && \
(this_cpu_read(hardirqs_enabled) && \
lockdep_is_held(&(event)->ctx->mutex) != LOCK_STATE_HELD))
#else
-#define lockdep_assert_event_ctx(event)
+# define lockdep_assert_event_ctx(event)
#endif
#define for_each_sibling_event(sibling, event) \
@@ -852,9 +870,9 @@ struct perf_event {
#ifdef CONFIG_EVENT_TRACING
struct trace_event_call *tp_event;
struct event_filter *filter;
-#ifdef CONFIG_FUNCTION_TRACER
+# ifdef CONFIG_FUNCTION_TRACER
struct ftrace_ops ftrace_ops;
-#endif
+# endif
#endif
#ifdef CONFIG_CGROUP_PERF
@@ -865,6 +883,7 @@ struct perf_event {
void *security;
#endif
struct list_head sb_list;
+ struct list_head pmu_list;
/*
* Certain events gets forwarded to another pmu internally by over-
@@ -872,7 +891,7 @@ struct perf_event {
* of it. event->orig_type contains original 'type' requested by
* user.
*/
- __u32 orig_type;
+ u32 orig_type;
#endif /* CONFIG_PERF_EVENTS */
};
@@ -937,8 +956,8 @@ static inline bool perf_pmu_ctx_is_active(struct perf_event_pmu_context *epc)
}
struct perf_event_groups {
- struct rb_root tree;
- u64 index;
+ struct rb_root tree;
+ u64 index;
};
@@ -1155,7 +1174,7 @@ extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
extern void perf_event_itrace_started(struct perf_event *event);
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
-extern void perf_pmu_unregister(struct pmu *pmu);
+extern int perf_pmu_unregister(struct pmu *pmu);
extern void __perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task);
@@ -1181,16 +1200,18 @@ extern void perf_pmu_resched(struct pmu *pmu);
extern int perf_event_refresh(struct perf_event *event, int refresh);
extern void perf_event_update_userpage(struct perf_event *event);
extern int perf_event_release_kernel(struct perf_event *event);
+
extern struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr,
- int cpu,
- struct task_struct *task,
- perf_overflow_handler_t callback,
- void *context);
+ int cpu,
+ struct task_struct *task,
+ perf_overflow_handler_t callback,
+ void *context);
+
extern void perf_pmu_migrate_context(struct pmu *pmu,
- int src_cpu, int dst_cpu);
-int perf_event_read_local(struct perf_event *event, u64 *value,
- u64 *enabled, u64 *running);
+ int src_cpu, int dst_cpu);
+extern int perf_event_read_local(struct perf_event *event, u64 *value,
+ u64 *enabled, u64 *running);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
@@ -1407,14 +1428,14 @@ static inline u32 perf_sample_data_size(struct perf_sample_data *data,
*/
static inline void perf_clear_branch_entry_bitfields(struct perf_branch_entry *br)
{
- br->mispred = 0;
- br->predicted = 0;
- br->in_tx = 0;
- br->abort = 0;
- br->cycles = 0;
- br->type = 0;
- br->spec = PERF_BR_SPEC_NA;
- br->reserved = 0;
+ br->mispred = 0;
+ br->predicted = 0;
+ br->in_tx = 0;
+ br->abort = 0;
+ br->cycles = 0;
+ br->type = 0;
+ br->spec = PERF_BR_SPEC_NA;
+ br->reserved = 0;
}
extern void perf_output_sample(struct perf_output_handle *handle,
@@ -1603,7 +1624,17 @@ extern void perf_event_bpf_event(struct bpf_prog *prog,
enum perf_bpf_event_type type,
u16 flags);
+#define PERF_GUEST_ACTIVE 0x01
+#define PERF_GUEST_USER 0x02
+
+struct perf_guest_info_callbacks {
+ unsigned int (*state)(void);
+ unsigned long (*get_ip)(void);
+ unsigned int (*handle_intel_pt_intr)(void);
+};
+
#ifdef CONFIG_GUEST_PERF_EVENTS
+
extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
DECLARE_STATIC_CALL(__perf_guest_state, *perf_guest_cbs->state);
@@ -1614,21 +1645,27 @@ static inline unsigned int perf_guest_state(void)
{
return static_call(__perf_guest_state)();
}
+
static inline unsigned long perf_guest_get_ip(void)
{
return static_call(__perf_guest_get_ip)();
}
+
static inline unsigned int perf_guest_handle_intel_pt_intr(void)
{
return static_call(__perf_guest_handle_intel_pt_intr)();
}
+
extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
extern void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
-#else
+
+#else /* !CONFIG_GUEST_PERF_EVENTS: */
+
static inline unsigned int perf_guest_state(void) { return 0; }
static inline unsigned long perf_guest_get_ip(void) { return 0; }
static inline unsigned int perf_guest_handle_intel_pt_intr(void) { return 0; }
-#endif /* CONFIG_GUEST_PERF_EVENTS */
+
+#endif /* !CONFIG_GUEST_PERF_EVENTS */
extern void perf_event_exec(void);
extern void perf_event_comm(struct task_struct *tsk, bool exec);
@@ -1658,6 +1695,7 @@ static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *
{
if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
struct perf_callchain_entry *entry = ctx->entry;
+
entry->ip[entry->nr++] = ip;
++ctx->contexts;
return 0;
@@ -1671,6 +1709,7 @@ static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64
{
if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
struct perf_callchain_entry *entry = ctx->entry;
+
entry->ip[entry->nr++] = ip;
++ctx->nr;
return 0;
@@ -1697,7 +1736,7 @@ static inline int perf_is_paranoid(void)
return sysctl_perf_event_paranoid > -1;
}
-int perf_allow_kernel(void);
+extern int perf_allow_kernel(void);
static inline int perf_allow_cpu(void)
{
@@ -1760,7 +1799,7 @@ static inline bool needs_branch_stack(struct perf_event *event)
static inline bool has_aux(struct perf_event *event)
{
- return event->pmu->setup_aux;
+ return event->pmu && event->pmu->setup_aux;
}
static inline bool has_aux_action(struct perf_event *event)
@@ -1819,7 +1858,7 @@ extern int perf_output_begin_backward(struct perf_output_handle *handle,
extern void perf_output_end(struct perf_output_handle *handle);
extern unsigned int perf_output_copy(struct perf_output_handle *handle,
- const void *buf, unsigned int len);
+ const void *buf, unsigned int len);
extern unsigned int perf_output_skip(struct perf_output_handle *handle,
unsigned int len);
extern long perf_output_copy_aux(struct perf_output_handle *aux_handle,
@@ -1836,7 +1875,9 @@ extern void perf_event_task_tick(void);
extern int perf_event_account_interrupt(struct perf_event *event);
extern int perf_event_period(struct perf_event *event, u64 value);
extern u64 perf_event_pause(struct perf_event *event, bool reset);
+
#else /* !CONFIG_PERF_EVENTS: */
+
static inline void *
perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *event) { return NULL; }
@@ -1914,19 +1955,14 @@ static inline void perf_event_disable(struct perf_event *event) { }
static inline int __perf_event_disable(void *info) { return -1; }
static inline void perf_event_task_tick(void) { }
static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
-static inline int perf_event_period(struct perf_event *event, u64 value)
-{
- return -EINVAL;
-}
-static inline u64 perf_event_pause(struct perf_event *event, bool reset)
-{
- return 0;
-}
-static inline int perf_exclude_event(struct perf_event *event, struct pt_regs *regs)
-{
- return 0;
-}
-#endif
+static inline int
+perf_event_period(struct perf_event *event, u64 value) { return -EINVAL; }
+static inline u64
+perf_event_pause(struct perf_event *event, bool reset) { return 0; }
+static inline int
+perf_exclude_event(struct perf_event *event, struct pt_regs *regs) { return 0; }
+
+#endif /* !CONFIG_PERF_EVENTS */
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
extern void perf_restore_debug_store(void);
@@ -1934,31 +1970,31 @@ extern void perf_restore_debug_store(void);
static inline void perf_restore_debug_store(void) { }
#endif
-#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
+#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
struct perf_pmu_events_attr {
- struct device_attribute attr;
- u64 id;
- const char *event_str;
+ struct device_attribute attr;
+ u64 id;
+ const char *event_str;
};
struct perf_pmu_events_ht_attr {
- struct device_attribute attr;
- u64 id;
- const char *event_str_ht;
- const char *event_str_noht;
+ struct device_attribute attr;
+ u64 id;
+ const char *event_str_ht;
+ const char *event_str_noht;
};
struct perf_pmu_events_hybrid_attr {
- struct device_attribute attr;
- u64 id;
- const char *event_str;
- u64 pmu_type;
+ struct device_attribute attr;
+ u64 id;
+ const char *event_str;
+ u64 pmu_type;
};
struct perf_pmu_format_hybrid_attr {
- struct device_attribute attr;
- u64 pmu_type;
+ struct device_attribute attr;
+ u64 pmu_type;
};
ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
@@ -2000,11 +2036,11 @@ static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
/* Performance counter hotplug functions */
#ifdef CONFIG_PERF_EVENTS
-int perf_event_init_cpu(unsigned int cpu);
-int perf_event_exit_cpu(unsigned int cpu);
+extern int perf_event_init_cpu(unsigned int cpu);
+extern int perf_event_exit_cpu(unsigned int cpu);
#else
-#define perf_event_init_cpu NULL
-#define perf_event_exit_cpu NULL
+# define perf_event_init_cpu NULL
+# define perf_event_exit_cpu NULL
#endif
extern void arch_perf_update_userpage(struct perf_event *event,
diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h
index c74077977830..8a7f4f802c57 100644
--- a/include/linux/pgalloc_tag.h
+++ b/include/linux/pgalloc_tag.h
@@ -188,6 +188,13 @@ static inline struct alloc_tag *__pgalloc_tag_get(struct page *page)
return tag;
}
+static inline struct alloc_tag *pgalloc_tag_get(struct page *page)
+{
+ if (mem_alloc_profiling_enabled())
+ return __pgalloc_tag_get(page);
+ return NULL;
+}
+
void pgalloc_tag_split(struct folio *folio, int old_order, int new_order);
void pgalloc_tag_swap(struct folio *new, struct folio *old);
@@ -199,6 +206,7 @@ static inline void clear_page_tag_ref(struct page *page) {}
static inline void alloc_tag_sec_init(void) {}
static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {}
static inline void pgalloc_tag_swap(struct folio *new, struct folio *old) {}
+static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; }
#endif /* CONFIG_MEM_ALLOC_PROFILING */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index a2bfae80c449..e194dad1623d 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -744,10 +744,7 @@ struct phy_device {
#define PHY_F_NO_IRQ 0x80000000
#define PHY_F_RXC_ALWAYS_ON 0x40000000
-static inline struct phy_device *to_phy_device(const struct device *dev)
-{
- return container_of(to_mdio_device(dev), struct phy_device, mdio);
-}
+#define to_phy_device(__dev) container_of_const(to_mdio_device(__dev), struct phy_device, mdio)
/**
* struct phy_tdr_config - Configuration of a TDR raw test
@@ -990,7 +987,8 @@ struct phy_driver {
* driver for the given phydev. If NULL, matching is based on
* phy_id and phy_id_mask.
*/
- int (*match_phy_device)(struct phy_device *phydev);
+ int (*match_phy_device)(struct phy_device *phydev,
+ const struct phy_driver *phydrv);
/**
* @set_wol: Some devices (e.g. qnap TS-119P II) require PHY
@@ -1753,56 +1751,13 @@ int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
bool is_c45,
struct phy_c45_device_ids *c45_ids);
-#if IS_ENABLED(CONFIG_PHYLIB)
int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id);
struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode);
struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode);
-struct phy_device *device_phy_find_device(struct device *dev);
struct fwnode_handle *fwnode_get_phy_node(const struct fwnode_handle *fwnode);
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
int phy_device_register(struct phy_device *phy);
void phy_device_free(struct phy_device *phydev);
-#else
-static inline int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id)
-{
- return 0;
-}
-static inline
-struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode)
-{
- return 0;
-}
-
-static inline
-struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode)
-{
- return NULL;
-}
-
-static inline struct phy_device *device_phy_find_device(struct device *dev)
-{
- return NULL;
-}
-
-static inline
-struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode)
-{
- return NULL;
-}
-
-static inline
-struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
-{
- return NULL;
-}
-
-static inline int phy_device_register(struct phy_device *phy)
-{
- return 0;
-}
-
-static inline void phy_device_free(struct phy_device *phydev) { }
-#endif /* CONFIG_PHYLIB */
void phy_device_remove(struct phy_device *phydev);
int phy_get_c45_ids(struct phy_device *phydev);
int phy_init_hw(struct phy_device *phydev);
@@ -1910,6 +1865,9 @@ char *phy_attached_info_irq(struct phy_device *phydev)
__malloc;
void phy_attached_info(struct phy_device *phydev);
+int genphy_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv);
+
/* Clause 22 PHY */
int genphy_read_abilities(struct phy_device *phydev);
int genphy_setup_forced(struct phy_device *phydev);
@@ -2046,6 +2004,9 @@ int phy_get_tx_amplitude_gain(struct phy_device *phydev, struct device *dev,
enum ethtool_link_mode_bit_indices linkmode,
u32 *val);
+int phy_get_mac_termination(struct phy_device *phydev, struct device *dev,
+ u32 *val);
+
void phy_resolve_pause(unsigned long *local_adv, unsigned long *partner_adv,
bool *tx_pause, bool *rx_pause);
@@ -2073,9 +2034,6 @@ int phy_ethtool_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *cmd);
int phy_ethtool_nway_reset(struct net_device *ndev);
-int __init mdio_bus_init(void);
-void mdio_bus_exit(void);
-
int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data);
int phy_ethtool_get_sset_count(struct phy_device *phydev);
int phy_ethtool_get_stats(struct phy_device *phydev,
@@ -2102,6 +2060,7 @@ int __phy_hwtstamp_set(struct phy_device *phydev,
struct netlink_ext_ack *extack);
extern const struct bus_type mdio_bus_type;
+extern const struct class mdio_bus_class;
struct mdio_board_info {
const char *bus_id;
@@ -2110,17 +2069,8 @@ struct mdio_board_info {
const void *platform_data;
};
-#if IS_ENABLED(CONFIG_MDIO_DEVICE)
int mdiobus_register_board_info(const struct mdio_board_info *info,
unsigned int n);
-#else
-static inline int mdiobus_register_board_info(const struct mdio_board_info *i,
- unsigned int n)
-{
- return 0;
-}
-#endif
-
/**
* phy_module_driver() - Helper macro for registering PHY drivers
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index 1acafd86ab13..5399b9e41e35 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -13,43 +13,27 @@ struct fixed_phy_status {
};
struct device_node;
-struct gpio_desc;
struct net_device;
#if IS_ENABLED(CONFIG_FIXED_PHY)
extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier);
-extern int fixed_phy_add(unsigned int irq, int phy_id,
- struct fixed_phy_status *status);
-extern struct phy_device *fixed_phy_register(unsigned int irq,
- struct fixed_phy_status *status,
- struct device_node *np);
-
-extern struct phy_device *
-fixed_phy_register_with_gpiod(unsigned int irq,
- struct fixed_phy_status *status,
- struct gpio_desc *gpiod);
+int fixed_phy_add(int phy_id, const struct fixed_phy_status *status);
+struct phy_device *fixed_phy_register(const struct fixed_phy_status *status,
+ struct device_node *np);
extern void fixed_phy_unregister(struct phy_device *phydev);
extern int fixed_phy_set_link_update(struct phy_device *phydev,
int (*link_update)(struct net_device *,
struct fixed_phy_status *));
#else
-static inline int fixed_phy_add(unsigned int irq, int phy_id,
- struct fixed_phy_status *status)
+static inline int fixed_phy_add(int phy_id,
+ const struct fixed_phy_status *status)
{
return -ENODEV;
}
-static inline struct phy_device *fixed_phy_register(unsigned int irq,
- struct fixed_phy_status *status,
- struct device_node *np)
-{
- return ERR_PTR(-ENODEV);
-}
-
static inline struct phy_device *
-fixed_phy_register_with_gpiod(unsigned int irq,
- struct fixed_phy_status *status,
- struct gpio_desc *gpiod)
+fixed_phy_register(const struct fixed_phy_status *status,
+ struct device_node *np)
{
return ERR_PTR(-ENODEV);
}
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 311ecebd7d56..453ae6d8a68d 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -77,7 +77,7 @@ struct file;
struct pid *pidfd_pid(const struct file *file);
struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags);
struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags);
-int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret);
+int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret_file);
void do_notify_pidfd(struct task_struct *task);
static inline struct pid *get_pid(struct pid *pid)
diff --git a/include/linux/pidfs.h b/include/linux/pidfs.h
index 05e6f8f4a026..77e7db194914 100644
--- a/include/linux/pidfs.h
+++ b/include/linux/pidfs.h
@@ -2,11 +2,19 @@
#ifndef _LINUX_PID_FS_H
#define _LINUX_PID_FS_H
+struct coredump_params;
+
struct file *pidfs_alloc_file(struct pid *pid, unsigned int flags);
void __init pidfs_init(void);
void pidfs_add_pid(struct pid *pid);
void pidfs_remove_pid(struct pid *pid);
void pidfs_exit(struct task_struct *tsk);
+#ifdef CONFIG_COREDUMP
+void pidfs_coredump(const struct coredump_params *cprm);
+#endif
extern const struct dentry_operations pidfs_dentry_operations;
+int pidfs_register_pid(struct pid *pid);
+void pidfs_get_pid(struct pid *pid);
+void pidfs_put_pid(struct pid *pid);
#endif /* _LINUX_PID_FS_H */
diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h
index 0b9f81a6f753..f6cca7a035c7 100644
--- a/include/linux/platform_data/mlxreg.h
+++ b/include/linux/platform_data/mlxreg.h
@@ -209,7 +209,7 @@ struct mlxreg_core_platform_data {
* @items: same type components with the hotplug capability;
* @irq: platform interrupt number;
* @regmap: register map of parent device;
- * @counter: number of the components with the hotplug capability;
+ * @count: number of the components with the hotplug capability;
* @cell: location of top aggregation interrupt register;
* @mask: top aggregation interrupt common mask;
* @cell_low: location of low aggregation interrupt register;
@@ -224,7 +224,7 @@ struct mlxreg_core_hotplug_platform_data {
struct mlxreg_core_item *items;
int irq;
void *regmap;
- int counter;
+ int count;
u32 cell;
u32 mask;
u32 cell_low;
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index 783e2a336861..8a515179113d 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -157,9 +157,28 @@
#define ASUS_WMI_DSTS_MAX_BRIGTH_MASK 0x0000FF00
#define ASUS_WMI_DSTS_LIGHTBAR_MASK 0x0000000F
+enum asus_ally_mcu_hack {
+ ASUS_WMI_ALLY_MCU_HACK_INIT,
+ ASUS_WMI_ALLY_MCU_HACK_ENABLED,
+ ASUS_WMI_ALLY_MCU_HACK_DISABLED,
+};
+
#if IS_REACHABLE(CONFIG_ASUS_WMI)
+void set_ally_mcu_hack(enum asus_ally_mcu_hack status);
+void set_ally_mcu_powersave(bool enabled);
+int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, u32 *retval);
int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval);
#else
+static inline void set_ally_mcu_hack(enum asus_ally_mcu_hack status)
+{
+}
+static inline void set_ally_mcu_powersave(bool enabled)
+{
+}
+static inline int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, u32 *retval)
+{
+ return -ENODEV;
+}
static inline int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1,
u32 *retval)
{
diff --git a/include/linux/platform_data/x86/int3472.h b/include/linux/platform_data/x86/int3472.h
new file mode 100644
index 000000000000..78276a11c48d
--- /dev/null
+++ b/include/linux/platform_data/x86/int3472.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel INT3472 ACPI camera sensor power-management support
+ *
+ * Author: Dan Scally <djrscally@gmail.com>
+ */
+
+#ifndef __PLATFORM_DATA_X86_INT3472_H
+#define __PLATFORM_DATA_X86_INT3472_H
+
+#include <linux/clk-provider.h>
+#include <linux/gpio/machine.h>
+#include <linux/leds.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/types.h>
+
+/* FIXME drop this once the I2C_DEV_NAME_FORMAT macro has been added to include/linux/i2c.h */
+#ifndef I2C_DEV_NAME_FORMAT
+#define I2C_DEV_NAME_FORMAT "i2c-%s"
+#endif
+
+/* PMIC GPIO Types */
+#define INT3472_GPIO_TYPE_RESET 0x00
+#define INT3472_GPIO_TYPE_POWERDOWN 0x01
+#define INT3472_GPIO_TYPE_POWER_ENABLE 0x0b
+#define INT3472_GPIO_TYPE_CLK_ENABLE 0x0c
+#define INT3472_GPIO_TYPE_PRIVACY_LED 0x0d
+#define INT3472_GPIO_TYPE_HANDSHAKE 0x12
+
+#define INT3472_PDEV_MAX_NAME_LEN 23
+#define INT3472_MAX_SENSOR_GPIOS 3
+#define INT3472_MAX_REGULATORS 3
+
+/* E.g. "avdd\0" */
+#define GPIO_SUPPLY_NAME_LENGTH 5
+/* 12 chars for acpi_dev_name() + "-", e.g. "ABCD1234:00-" */
+#define GPIO_REGULATOR_NAME_LENGTH (12 + GPIO_SUPPLY_NAME_LENGTH)
+/* lower- and upper-case mapping */
+#define GPIO_REGULATOR_SUPPLY_MAP_COUNT 2
+/*
+ * Ensure the GPIO is driven low/high for at least 2 ms before changing.
+ *
+ * 2 ms has been chosen because it is the minimum time ovXXXX sensors need to
+ * have their reset line driven logical high to properly register a reset.
+ */
+#define GPIO_REGULATOR_ENABLE_TIME (2 * USEC_PER_MSEC)
+#define GPIO_REGULATOR_OFF_ON_DELAY (2 * USEC_PER_MSEC)
+
+#define INT3472_LED_MAX_NAME_LEN 32
+
+#define CIO2_SENSOR_SSDB_MCLKSPEED_OFFSET 86
+
+#define INT3472_REGULATOR(_name, _ops, _enable_time, _off_on_delay) \
+ (const struct regulator_desc) { \
+ .name = _name, \
+ .type = REGULATOR_VOLTAGE, \
+ .ops = _ops, \
+ .owner = THIS_MODULE, \
+ .enable_time = _enable_time, \
+ .off_on_delay = _off_on_delay, \
+ }
+
+#define to_int3472_clk(hw) \
+ container_of(hw, struct int3472_clock, clk_hw)
+
+#define to_int3472_device(clk) \
+ container_of(clk, struct int3472_discrete_device, clock)
+
+struct acpi_device;
+struct dmi_system_id;
+struct i2c_client;
+struct platform_device;
+
+struct int3472_cldb {
+ u8 version;
+ /*
+ * control logic type
+ * 0: UNKNOWN
+ * 1: DISCRETE(CRD-D)
+ * 2: PMIC TPS68470
+ * 3: PMIC uP6641
+ */
+ u8 control_logic_type;
+ u8 control_logic_id;
+ u8 sensor_card_sku;
+ u8 reserved[10];
+ u8 clock_source;
+ u8 reserved2[17];
+};
+
+struct int3472_discrete_quirks {
+ /* For models where AVDD GPIO is shared between sensors */
+ const char *avdd_second_sensor;
+};
+
+struct int3472_gpio_regulator {
+ /* SUPPLY_MAP_COUNT * 2 to make room for second sensor mappings */
+ struct regulator_consumer_supply supply_map[GPIO_REGULATOR_SUPPLY_MAP_COUNT * 2];
+ char supply_name_upper[GPIO_SUPPLY_NAME_LENGTH];
+ char regulator_name[GPIO_REGULATOR_NAME_LENGTH];
+ struct gpio_desc *ena_gpio;
+ struct regulator_dev *rdev;
+ struct regulator_desc rdesc;
+};
+
+struct int3472_discrete_device {
+ struct acpi_device *adev;
+ struct device *dev;
+ struct acpi_device *sensor;
+ const char *sensor_name;
+
+ struct int3472_gpio_regulator regulators[INT3472_MAX_REGULATORS];
+
+ struct int3472_clock {
+ struct clk *clk;
+ struct clk_hw clk_hw;
+ struct clk_lookup *cl;
+ struct gpio_desc *ena_gpio;
+ u32 frequency;
+ u8 imgclk_index;
+ } clock;
+
+ struct int3472_pled {
+ struct led_classdev classdev;
+ struct led_lookup_data lookup;
+ char name[INT3472_LED_MAX_NAME_LEN];
+ struct gpio_desc *gpio;
+ } pled;
+
+ struct int3472_discrete_quirks quirks;
+
+ unsigned int ngpios; /* how many GPIOs have we seen */
+ unsigned int n_sensor_gpios; /* how many have we mapped to sensor */
+ unsigned int n_regulator_gpios; /* how many have we mapped to a regulator */
+ struct gpiod_lookup_table gpios;
+};
+
+extern const struct dmi_system_id skl_int3472_discrete_quirks[];
+
+union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev,
+ char *id);
+int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb);
+int skl_int3472_get_sensor_adev_and_name(struct device *dev,
+ struct acpi_device **sensor_adev_ret,
+ const char **name_ret);
+
+int int3472_discrete_parse_crs(struct int3472_discrete_device *int3472);
+void int3472_discrete_cleanup(struct int3472_discrete_device *int3472);
+
+int skl_int3472_register_gpio_clock(struct int3472_discrete_device *int3472,
+ struct gpio_desc *gpio);
+int skl_int3472_register_dsm_clock(struct int3472_discrete_device *int3472);
+void skl_int3472_unregister_clock(struct int3472_discrete_device *int3472);
+
+int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
+ struct gpio_desc *gpio,
+ unsigned int enable_time,
+ const char *supply_name,
+ const char *second_sensor);
+void skl_int3472_unregister_regulator(struct int3472_discrete_device *int3472);
+
+int skl_int3472_register_pled(struct int3472_discrete_device *int3472, struct gpio_desc *gpio);
+void skl_int3472_unregister_pled(struct int3472_discrete_device *int3472);
+
+#endif
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index d56a78af4af1..0b18160901a2 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -142,6 +142,8 @@ struct genpd_governor_data {
bool max_off_time_changed;
ktime_t next_wakeup;
ktime_t next_hrtimer;
+ ktime_t last_enter;
+ bool reflect_residency;
bool cached_power_down_ok;
bool cached_power_down_state_idx;
};
@@ -153,6 +155,8 @@ struct genpd_power_state {
s64 residency_ns;
u64 usage;
u64 rejected;
+ u64 above;
+ u64 below;
struct fwnode_handle *fwnode;
u64 idle_time;
void *data;
@@ -285,6 +289,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
int pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off);
int pm_genpd_remove(struct generic_pm_domain *genpd);
+void pm_genpd_inc_rejected(struct generic_pm_domain *genpd,
+ unsigned int state_idx);
struct device *dev_to_genpd_dev(struct device *dev);
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state);
int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb);
@@ -336,6 +342,10 @@ static inline int pm_genpd_remove(struct generic_pm_domain *genpd)
return -EOPNOTSUPP;
}
+static inline void pm_genpd_inc_rejected(struct generic_pm_domain *genpd,
+ unsigned int state_idx)
+{ }
+
static inline struct device *dev_to_genpd_dev(struct device *dev)
{
return ERR_PTR(-EOPNOTSUPP);
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index c247317aae38..cf477beae4bb 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -11,6 +11,7 @@
#ifndef __LINUX_OPP_H__
#define __LINUX_OPP_H__
+#include <linux/cleanup.h>
#include <linux/energy_model.h>
#include <linux/err.h>
#include <linux/notifier.h>
@@ -100,7 +101,7 @@ struct dev_pm_opp_data {
#if defined(CONFIG_PM_OPP)
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
-void dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table);
+struct opp_table *dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table);
void dev_pm_opp_put_opp_table(struct opp_table *opp_table);
unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index);
@@ -161,7 +162,7 @@ struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
unsigned int *bw, int index);
-void dev_pm_opp_get(struct dev_pm_opp *opp);
+struct dev_pm_opp *dev_pm_opp_get(struct dev_pm_opp *opp);
void dev_pm_opp_put(struct dev_pm_opp *opp);
int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *opp);
@@ -196,6 +197,7 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
void dev_pm_opp_remove_table(struct device *dev);
void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask);
int dev_pm_opp_sync_regulators(struct device *dev);
+
#else
static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
{
@@ -207,7 +209,10 @@ static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *
return ERR_PTR(-EOPNOTSUPP);
}
-static inline void dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table) {}
+static inline struct opp_table *dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table)
+{
+ return opp_table;
+}
static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {}
@@ -345,7 +350,10 @@ static inline struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
return ERR_PTR(-EOPNOTSUPP);
}
-static inline void dev_pm_opp_get(struct dev_pm_opp *opp) {}
+static inline struct dev_pm_opp *dev_pm_opp_get(struct dev_pm_opp *opp)
+{
+ return opp;
+}
static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {}
@@ -573,6 +581,12 @@ static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_ta
}
#endif
+/* Scope based cleanup macro for OPP reference counting */
+DEFINE_FREE(put_opp, struct dev_pm_opp *, if (!IS_ERR_OR_NULL(_T)) dev_pm_opp_put(_T))
+
+/* Scope based cleanup macro for OPP table reference counting */
+DEFINE_FREE(put_opp_table, struct opp_table *, if (!IS_ERR_OR_NULL(_T)) dev_pm_opp_put_opp_table(_T))
+
/* OPP Configuration helpers */
static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
@@ -704,4 +718,14 @@ static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
return dev_pm_opp_get_freq_indexed(opp, 0);
}
+static inline int dev_pm_opp_set_level(struct device *dev, unsigned int level)
+{
+ struct dev_pm_opp *opp __free(put_opp) = dev_pm_opp_find_level_exact(dev, level);
+
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ return dev_pm_opp_set_opp(dev, opp);
+}
+
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 7fb5a459847e..756b842dcd30 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -96,7 +96,9 @@ extern void pm_runtime_new_link(struct device *dev);
extern void pm_runtime_drop_link(struct device_link *link);
extern void pm_runtime_release_supplier(struct device_link *link);
+int devm_pm_runtime_set_active_enabled(struct device *dev);
extern int devm_pm_runtime_enable(struct device *dev);
+int devm_pm_runtime_get_noresume(struct device *dev);
/**
* pm_suspend_ignore_children - Set runtime PM behavior regarding children.
@@ -294,7 +296,9 @@ static inline bool pm_runtime_blocked(struct device *dev) { return true; }
static inline void pm_runtime_allow(struct device *dev) {}
static inline void pm_runtime_forbid(struct device *dev) {}
+static inline int devm_pm_runtime_set_active_enabled(struct device *dev) { return 0; }
static inline int devm_pm_runtime_enable(struct device *dev) { return 0; }
+static inline int devm_pm_runtime_get_noresume(struct device *dev) { return 0; }
static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
static inline void pm_runtime_get_noresume(struct device *dev) {}
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 51e0e8dd5f9e..c838b4a30f87 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -95,10 +95,6 @@ static inline void device_set_wakeup_path(struct device *dev)
}
/* drivers/base/power/wakeup.c */
-extern struct wakeup_source *wakeup_source_create(const char *name);
-extern void wakeup_source_destroy(struct wakeup_source *ws);
-extern void wakeup_source_add(struct wakeup_source *ws);
-extern void wakeup_source_remove(struct wakeup_source *ws);
extern struct wakeup_source *wakeup_source_register(struct device *dev,
const char *name);
extern void wakeup_source_unregister(struct wakeup_source *ws);
@@ -129,17 +125,6 @@ static inline bool device_can_wakeup(struct device *dev)
return dev->power.can_wakeup;
}
-static inline struct wakeup_source *wakeup_source_create(const char *name)
-{
- return NULL;
-}
-
-static inline void wakeup_source_destroy(struct wakeup_source *ws) {}
-
-static inline void wakeup_source_add(struct wakeup_source *ws) {}
-
-static inline void wakeup_source_remove(struct wakeup_source *ws) {}
-
static inline struct wakeup_source *wakeup_source_register(struct device *dev,
const char *name)
{
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 331a9a996fa8..8ca2235f78d5 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -70,6 +70,10 @@
#define KEY_DESTROY 0xbd
/********** net/core/page_pool.c **********/
+/*
+ * page_pool uses additional free bits within this value to store data, see the
+ * definition of PP_DMA_INDEX_MASK in mm.h
+ */
#define PP_SIGNATURE (0x40 + POISON_POINTER_DELTA)
/********** net/core/skbuff.c **********/
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 888824592953..7803edaa8ff8 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -71,6 +71,8 @@ enum {
POWER_SUPPLY_HEALTH_COOL,
POWER_SUPPLY_HEALTH_HOT,
POWER_SUPPLY_HEALTH_NO_BATTERY,
+ POWER_SUPPLY_HEALTH_BLOWN_FUSE,
+ POWER_SUPPLY_HEALTH_CELL_IMBALANCE,
};
enum {
@@ -212,6 +214,7 @@ enum power_supply_usb_type {
enum power_supply_charge_behaviour {
POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO = 0,
POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE,
+ POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE_AWAKE,
POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE,
};
@@ -288,6 +291,7 @@ struct power_supply_desc {
struct power_supply_ext {
const char *const name;
u8 charge_behaviours;
+ u32 charge_types;
const enum power_supply_property *properties;
size_t num_properties;
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
index f3cad182d4ef..0b3a36bdaa90 100644
--- a/include/linux/psp-sev.h
+++ b/include/linux/psp-sev.h
@@ -954,6 +954,7 @@ int sev_do_cmd(int cmd, void *data, int *psp_ret);
void *psp_copy_user_blob(u64 uaddr, u32 len);
void *snp_alloc_firmware_page(gfp_t mask);
void snp_free_firmware_page(void *addr);
+void sev_platform_shutdown(void);
#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
@@ -988,6 +989,8 @@ static inline void *snp_alloc_firmware_page(gfp_t mask)
static inline void snp_free_firmware_page(void *addr) { }
+static inline void sev_platform_shutdown(void) { }
+
#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
#endif /* __PSP_SEV_H__ */
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 0d68d09bedd1..eced7e9bf69a 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -68,6 +68,22 @@ struct ptp_system_timestamp {
* @n_per_out: The number of programmable periodic signals.
* @n_pins: The number of programmable pins.
* @pps: Indicates whether the clock supports a PPS callback.
+ *
+ * @supported_perout_flags: The set of flags the driver supports for the
+ * PTP_PEROUT_REQUEST ioctl. The PTP core will
+ * reject a request with any flag not specified
+ * here.
+ *
+ * @supported_extts_flags: The set of flags the driver supports for the
+ * PTP_EXTTS_REQUEST ioctl. The PTP core will use
+ * this list to reject unsupported requests.
+ * PTP_ENABLE_FEATURE is assumed and does not need to
+ * be included. If PTP_STRICT_FLAGS is *not* set,
+ * then both PTP_RISING_EDGE and PTP_FALLING_EDGE
+ * will be assumed. Note that PTP_STRICT_FLAGS must
+ * be set if the drivers wants to honor
+ * PTP_EXTTS_REQUEST2 and any future flags.
+ *
* @pin_config: Array of length 'n_pins'. If the number of
* programmable pins is nonzero, then drivers must
* allocate and initialize this array.
@@ -174,6 +190,8 @@ struct ptp_clock_info {
int n_per_out;
int n_pins;
int pps;
+ unsigned int supported_perout_flags;
+ unsigned int supported_extts_flags;
struct ptp_pin_desc *pin_config;
int (*adjfine)(struct ptp_clock_info *ptp, long scaled_ppm);
int (*adjphase)(struct ptp_clock_info *ptp, s32 phase);
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 9ece4e5d3815..63a17d2b4ec8 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -218,6 +218,8 @@ static inline void pwm_init_state(const struct pwm_device *pwm,
*
* pwm_get_state(pwm, &state);
* duty = pwm_get_relative_duty_cycle(&state, 100);
+ *
+ * Returns: rounded relative duty cycle multiplied by @scale
*/
static inline unsigned int
pwm_get_relative_duty_cycle(const struct pwm_state *state, unsigned int scale)
@@ -244,8 +246,8 @@ pwm_get_relative_duty_cycle(const struct pwm_state *state, unsigned int scale)
* pwm_set_relative_duty_cycle(&state, 50, 100);
* pwm_apply_might_sleep(pwm, &state);
*
- * This functions returns -EINVAL if @duty_cycle and/or @scale are
- * inconsistent (@scale == 0 or @duty_cycle > @scale).
+ * Returns: 0 on success or ``-EINVAL`` if @duty_cycle and/or @scale are
+ * inconsistent (@scale == 0 or @duty_cycle > @scale)
*/
static inline int
pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle,
@@ -351,7 +353,7 @@ struct pwm_chip {
* pwmchip_supports_waveform() - checks if the given chip supports waveform callbacks
* @chip: The pwm_chip to test
*
- * Returns true iff the pwm chip support the waveform functions like
+ * Returns: true iff the pwm chip support the waveform functions like
* pwm_set_waveform_might_sleep() and pwm_round_waveform_might_sleep()
*/
static inline bool pwmchip_supports_waveform(struct pwm_chip *chip)
@@ -369,7 +371,7 @@ static inline struct device *pwmchip_parent(const struct pwm_chip *chip)
return chip->dev.parent;
}
-static inline void *pwmchip_get_drvdata(struct pwm_chip *chip)
+static inline void *pwmchip_get_drvdata(const struct pwm_chip *chip)
{
return dev_get_drvdata(&chip->dev);
}
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index b17e0cd0a30c..7aaad158ee37 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -22,16 +22,43 @@ static inline void ratelimit_default_init(struct ratelimit_state *rs)
DEFAULT_RATELIMIT_BURST);
}
+static inline void ratelimit_state_inc_miss(struct ratelimit_state *rs)
+{
+ atomic_inc(&rs->missed);
+}
+
+static inline int ratelimit_state_get_miss(struct ratelimit_state *rs)
+{
+ return atomic_read(&rs->missed);
+}
+
+static inline int ratelimit_state_reset_miss(struct ratelimit_state *rs)
+{
+ return atomic_xchg_relaxed(&rs->missed, 0);
+}
+
+static inline void ratelimit_state_reset_interval(struct ratelimit_state *rs, int interval_init)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&rs->lock, flags);
+ rs->interval = interval_init;
+ rs->flags &= ~RATELIMIT_INITIALIZED;
+ atomic_set(&rs->rs_n_left, rs->burst);
+ ratelimit_state_reset_miss(rs);
+ raw_spin_unlock_irqrestore(&rs->lock, flags);
+}
+
static inline void ratelimit_state_exit(struct ratelimit_state *rs)
{
+ int m;
+
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE))
return;
- if (rs->missed) {
- pr_warn("%s: %d output lines suppressed due to ratelimiting\n",
- current->comm, rs->missed);
- rs->missed = 0;
- }
+ m = ratelimit_state_reset_miss(rs);
+ if (m)
+ pr_warn("%s: %d output lines suppressed due to ratelimiting\n", current->comm, m);
}
static inline void
diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h
index 765232ce0b5e..b19c4354540a 100644
--- a/include/linux/ratelimit_types.h
+++ b/include/linux/ratelimit_types.h
@@ -11,14 +11,15 @@
/* issue num suppressed message on exit */
#define RATELIMIT_MSG_ON_RELEASE BIT(0)
+#define RATELIMIT_INITIALIZED BIT(1)
struct ratelimit_state {
raw_spinlock_t lock; /* protect the state */
int interval;
int burst;
- int printed;
- int missed;
+ atomic_t rs_n_left;
+ atomic_t missed;
unsigned int flags;
unsigned long begin;
};
diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h
index 6322d8c1c6b4..2fb2af6d9824 100644
--- a/include/linux/rcuref.h
+++ b/include/linux/rcuref.h
@@ -30,7 +30,11 @@ static inline void rcuref_init(rcuref_t *ref, unsigned int cnt)
* rcuref_read - Read the number of held reference counts of a rcuref
* @ref: Pointer to the reference count
*
- * Return: The number of held references (0 ... N)
+ * Return: The number of held references (0 ... N). The value 0 does not
+ * indicate that it is safe to schedule the object, protected by this reference
+ * counter, for deconstruction.
+ * If you want to know if the reference counter has been marked DEAD (as
+ * signaled by rcuref_put()) please use rcuread_is_dead().
*/
static inline unsigned int rcuref_read(rcuref_t *ref)
{
@@ -40,6 +44,22 @@ static inline unsigned int rcuref_read(rcuref_t *ref)
return c >= RCUREF_RELEASED ? 0 : c + 1;
}
+/**
+ * rcuref_is_dead - Check if the rcuref has been already marked dead
+ * @ref: Pointer to the reference count
+ *
+ * Return: True if the object has been marked DEAD. This signals that a previous
+ * invocation of rcuref_put() returned true on this reference counter meaning
+ * the protected object can safely be scheduled for deconstruction.
+ * Otherwise, returns false.
+ */
+static inline bool rcuref_is_dead(rcuref_t *ref)
+{
+ unsigned int c = atomic_read(&ref->refcnt);
+
+ return (c >= RCUREF_RELEASED) && (c < RCUREF_NOREF);
+}
+
extern __must_check bool rcuref_get_slowpath(rcuref_t *ref);
/**
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index d17c5ea3d55d..02b83f5499b8 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -1641,6 +1641,8 @@ struct regmap_irq_chip_data;
* @ack_invert: Inverted ack register: cleared bits for ack.
* @clear_ack: Use this to set 1 and 0 or vice-versa to clear interrupts.
* @status_invert: Inverted status register: cleared bits are active interrupts.
+ * @status_is_level: Status register is actuall signal level: Xor status
+ * register with previous value to get active interrupts.
* @wake_invert: Inverted wake register: cleared bits are wake enabled.
* @type_in_mask: Use the mask registers for controlling irq type. Use this if
* the hardware provides separate bits for rising/falling edge
@@ -1704,6 +1706,7 @@ struct regmap_irq_chip {
unsigned int ack_invert:1;
unsigned int clear_ack:1;
unsigned int status_invert:1;
+ unsigned int status_is_level:1;
unsigned int wake_invert:1;
unsigned int type_in_mask:1;
unsigned int clear_on_unmask:1;
diff --git a/include/linux/regulator/max8952.h b/include/linux/regulator/max8952.h
index 8712c091abf0..61dcd8e00a2f 100644
--- a/include/linux/regulator/max8952.h
+++ b/include/linux/regulator/max8952.h
@@ -2,7 +2,7 @@
/*
* max8952.h - Voltage regulation for the Maxim 8952
*
- * Copyright (C) 2010 Samsung Electrnoics
+ * Copyright (C) 2010 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@samsung.com>
*/
diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h
index b427b5873de1..85b4fecc10d8 100644
--- a/include/linux/regulator/pca9450.h
+++ b/include/linux/regulator/pca9450.h
@@ -35,6 +35,8 @@ enum {
PCA9450_DVS_LEVEL_MAX,
};
+#define PCA9450_RESTART_HANDLER_PRIORITY 130
+
#define PCA9450_BUCK1_VOLTAGE_NUM 0x80
#define PCA9450_BUCK2_VOLTAGE_NUM 0x80
#define PCA9450_BUCK3_VOLTAGE_NUM 0x80
@@ -235,4 +237,7 @@ enum {
#define I2C_LT_ON_RUN 0x02
#define I2C_LT_FORCE_ENABLE 0x03
+/* PCA9450_REG_SW_RST command */
+#define SW_RST_COMMAND 0x14
+
#endif /* __LINUX_REG_PCA9450_H__ */
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index 880351ca3dfc..9ba771f2ddea 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -8,6 +8,10 @@
#include <linux/pid.h>
#include <linux/resctrl_types.h>
+#ifdef CONFIG_ARCH_HAS_CPU_RESCTRL
+#include <asm/resctrl.h>
+#endif
+
/* CLOSID, RMID value used by the default control group */
#define RESCTRL_RESERVED_CLOSID 0
#define RESCTRL_RESERVED_RMID 0
@@ -44,6 +48,16 @@ int proc_resctrl_show(struct seq_file *m,
for_each_rdt_resource((r)) \
if ((r)->mon_capable)
+enum resctrl_res_level {
+ RDT_RESOURCE_L3,
+ RDT_RESOURCE_L2,
+ RDT_RESOURCE_MBA,
+ RDT_RESOURCE_SMBA,
+
+ /* Must be the last */
+ RDT_NUM_RESOURCES,
+};
+
/**
* enum resctrl_conf_type - The type of configuration.
* @CDP_NONE: No prioritisation, both code and data are controlled or monitored.
@@ -358,7 +372,7 @@ u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
u32 resctrl_arch_system_num_rmid_idx(void);
int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
-__init bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt);
+bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt);
/**
* resctrl_arch_mon_event_config_write() - Write the config for an event.
@@ -399,6 +413,9 @@ static inline u32 resctrl_get_config_index(u32 closid,
}
}
+bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l);
+int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable);
+
/*
* Update the ctrl_val and apply this config right now.
* Must be called on one of the domain's CPUs.
@@ -514,7 +531,20 @@ void resctrl_arch_reset_all_ctrls(struct rdt_resource *r);
extern unsigned int resctrl_rmid_realloc_threshold;
extern unsigned int resctrl_rmid_realloc_limit;
-int __init resctrl_init(void);
-void __exit resctrl_exit(void);
-
+int resctrl_init(void);
+void resctrl_exit(void);
+
+#ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK
+u64 resctrl_arch_get_prefetch_disable_bits(void);
+int resctrl_arch_pseudo_lock_fn(void *_plr);
+int resctrl_arch_measure_cycles_lat_fn(void *_plr);
+int resctrl_arch_measure_l2_residency(void *_plr);
+int resctrl_arch_measure_l3_residency(void *_plr);
+#else
+static inline u64 resctrl_arch_get_prefetch_disable_bits(void) { return 0; }
+static inline int resctrl_arch_pseudo_lock_fn(void *_plr) { return 0; }
+static inline int resctrl_arch_measure_cycles_lat_fn(void *_plr) { return 0; }
+static inline int resctrl_arch_measure_l2_residency(void *_plr) { return 0; }
+static inline int resctrl_arch_measure_l3_residency(void *_plr) { return 0; }
+#endif /* CONFIG_RESCTRL_FS_PSEUDO_LOCK */
#endif /* _RESCTRL_H */
diff --git a/include/linux/resctrl_types.h b/include/linux/resctrl_types.h
index f26450b3326b..a25fb9c4070d 100644
--- a/include/linux/resctrl_types.h
+++ b/include/linux/resctrl_types.h
@@ -7,6 +7,9 @@
#ifndef __LINUX_RESCTRL_TYPES_H
#define __LINUX_RESCTRL_TYPES_H
+#define MAX_MBA_BW 100u
+#define MBM_OVERFLOW_INTERVAL 1000
+
/* Reads to Local DRAM Memory */
#define READS_TO_LOCAL_MEM BIT(0)
@@ -31,16 +34,6 @@
/* Max event bits supported */
#define MAX_EVT_CONFIG_BITS GENMASK(6, 0)
-enum resctrl_res_level {
- RDT_RESOURCE_L3,
- RDT_RESOURCE_L2,
- RDT_RESOURCE_MBA,
- RDT_RESOURCE_SMBA,
-
- /* Must be the last */
- RDT_NUM_RESOURCES,
-};
-
/*
* Event IDs, the values match those used to program IA32_QM_EVTSEL before
* reading IA32_QM_CTR on RDT systems.
@@ -49,6 +42,9 @@ enum resctrl_event_id {
QOS_L3_OCCUP_EVENT_ID = 0x01,
QOS_L3_MBM_TOTAL_EVENT_ID = 0x02,
QOS_L3_MBM_LOCAL_EVENT_ID = 0x03,
+
+ /* Must be the last */
+ QOS_NUM_EVENTS,
};
#endif /* __LINUX_RESCTRL_TYPES_H */
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
index 13f17676c5f4..7e50bbc94e47 100644
--- a/include/linux/restart_block.h
+++ b/include/linux/restart_block.h
@@ -26,7 +26,7 @@ struct restart_block {
unsigned long arch_data;
long (*fn)(struct restart_block *);
union {
- /* For futex_wait and futex_wait_requeue_pi */
+ /* For futex_wait() */
struct {
u32 __user *uaddr;
u32 val;
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 56e27263acf8..cd7f0ae26615 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -192,6 +192,7 @@ void ring_buffer_record_off(struct trace_buffer *buffer);
void ring_buffer_record_on(struct trace_buffer *buffer);
bool ring_buffer_record_is_on(struct trace_buffer *buffer);
bool ring_buffer_record_is_set_on(struct trace_buffer *buffer);
+bool ring_buffer_record_is_on_cpu(struct trace_buffer *buffer, int cpu);
void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f96ac1982893..1f054f1f11b5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -44,7 +44,6 @@
#include <linux/seqlock_types.h>
#include <linux/kcsan.h>
#include <linux/rv.h>
-#include <linux/livepatch_sched.h>
#include <linux/uidgid_types.h>
#include <linux/tracepoint-defs.h>
#include <asm/kmap_size.h>
@@ -1044,6 +1043,7 @@ struct task_struct {
/* delay due to memory thrashing */
unsigned in_thrashing:1;
#endif
+ unsigned in_nf_duplicate:1;
#ifdef CONFIG_PREEMPT_RT
struct netdev_xmit net_xmit;
#endif
@@ -1646,22 +1646,15 @@ struct task_struct {
struct user_event_mm *user_event_mm;
#endif
- /*
- * New fields for task_struct should be added above here, so that
- * they are included in the randomized portion of task_struct.
- */
- randomized_struct_fields_end
-
/* CPU-specific state of this task: */
struct thread_struct thread;
/*
- * WARNING: on x86, 'thread_struct' contains a variable-sized
- * structure. It *MUST* be at the end of 'task_struct'.
- *
- * Do not put anything below here!
+ * New fields for task_struct should be added above here, so that
+ * they are included in the randomized portion of task_struct.
*/
-};
+ randomized_struct_fields_end
+} __attribute__ ((aligned (64)));
#define TASK_REPORT_IDLE (TASK_REPORT + 1)
#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
@@ -2089,9 +2082,6 @@ extern int __cond_resched(void);
#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-void sched_dynamic_klp_enable(void);
-void sched_dynamic_klp_disable(void);
-
DECLARE_STATIC_CALL(cond_resched, __cond_resched);
static __always_inline int _cond_resched(void)
@@ -2112,7 +2102,6 @@ static __always_inline int _cond_resched(void)
static inline int _cond_resched(void)
{
- klp_sched_try_switch();
return __cond_resched();
}
@@ -2122,7 +2111,6 @@ static inline int _cond_resched(void)
static inline int _cond_resched(void)
{
- klp_sched_try_switch();
return 0;
}
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 7b4301b7235f..198bb5cc1774 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -195,6 +195,8 @@ struct sched_domain_topology_level {
};
extern void __init set_sched_topology(struct sched_domain_topology_level *tl);
+extern void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio);
+
# define SD_INIT_NAME(type) .name = #type
@@ -223,6 +225,10 @@ static inline bool cpus_share_resources(int this_cpu, int that_cpu)
return true;
}
+static inline void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio)
+{
+}
+
#endif /* !CONFIG_SMP */
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
index 6a4a3cec4638..923d68e07679 100644
--- a/include/linux/screen_info.h
+++ b/include/linux/screen_info.h
@@ -126,8 +126,17 @@ static inline unsigned int screen_info_video_type(const struct screen_info *si)
return VIDEO_TYPE_CGA;
}
+static inline u32 __screen_info_vesapm_info_base(const struct screen_info *si)
+{
+ if (si->vesapm_seg < 0xc000)
+ return 0;
+ return (si->vesapm_seg << 4) + si->vesapm_off;
+}
+
ssize_t screen_info_resources(const struct screen_info *si, struct resource *r, size_t num);
+u32 __screen_info_lfb_bits_per_pixel(const struct screen_info *si);
+
#if defined(CONFIG_PCI)
void screen_info_apply_fixups(void);
struct pci_dev *screen_info_pci_dev(const struct screen_info *si);
diff --git a/include/linux/security.h b/include/linux/security.h
index cc9b54d95d22..dba349629229 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -563,7 +563,6 @@ int security_setselfattr(unsigned int attr, struct lsm_ctx __user *ctx,
int security_getprocattr(struct task_struct *p, int lsmid, const char *name,
char **value);
int security_setprocattr(int lsmid, const char *name, void *value, size_t size);
-int security_netlink_send(struct sock *sk, struct sk_buff *skb);
int security_ismaclabel(const char *name);
int security_secid_to_secctx(u32 secid, struct lsm_context *cp);
int security_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp);
@@ -1527,11 +1526,6 @@ static inline int security_setprocattr(int lsmid, char *name, void *value,
return -EINVAL;
}
-static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb)
-{
- return 0;
-}
-
static inline int security_ismaclabel(const char *name)
{
return 0;
@@ -1629,6 +1623,7 @@ static inline int security_watch_key(struct key *key)
#ifdef CONFIG_SECURITY_NETWORK
+int security_netlink_send(struct sock *sk, struct sk_buff *skb);
int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk);
int security_unix_may_send(struct socket *sock, struct socket *other);
int security_socket_create(int family, int type, int protocol, int kern);
@@ -1684,6 +1679,11 @@ int security_sctp_assoc_established(struct sctp_association *asoc,
int security_mptcp_add_subflow(struct sock *sk, struct sock *ssk);
#else /* CONFIG_SECURITY_NETWORK */
+static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb)
+{
+ return 0;
+}
+
static inline int security_unix_stream_connect(struct sock *sock,
struct sock *other,
struct sock *newsk)
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 0b273a7b9f01..5f03a39a26f7 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -104,10 +104,11 @@ static inline bool shmem_mapping(struct address_space *mapping)
return false;
}
#endif /* CONFIG_SHMEM */
-extern void shmem_unlock_mapping(struct address_space *mapping);
-extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
+void shmem_unlock_mapping(struct address_space *mapping);
+struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
-extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
+int shmem_writeout(struct folio *folio, struct writeback_control *wbc);
+void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
int shmem_unuse(unsigned int type);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b974a277975a..5520524c93bf 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -274,7 +274,6 @@
SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
-struct ahash_request;
struct net_device;
struct scatterlist;
struct pipe_inode_info;
@@ -481,9 +480,6 @@ enum {
/* generate software time stamp on packet tx completion */
SKBTX_COMPLETION_TSTAMP = 1 << 3,
- /* generate wifi status information (where possible) */
- SKBTX_WIFI_STATUS = 1 << 4,
-
/* determine hardware time stamp based on time or cycles */
SKBTX_HW_TSTAMP_NETDEV = 1 << 5,
@@ -1710,13 +1706,16 @@ static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
extern const struct ubuf_info_ops msg_zerocopy_ubuf_ops;
struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
- struct ubuf_info *uarg);
+ struct ubuf_info *uarg, bool devmem);
void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
+struct net_devmem_dmabuf_binding;
+
int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb, struct iov_iter *from,
- size_t length);
+ size_t length,
+ struct net_devmem_dmabuf_binding *binding);
int zerocopy_fill_skb_from_iter(struct sk_buff *skb,
struct iov_iter *from, size_t length);
@@ -1724,12 +1723,14 @@ int zerocopy_fill_skb_from_iter(struct sk_buff *skb,
static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb,
struct msghdr *msg, int len)
{
- return __zerocopy_sg_from_iter(msg, skb->sk, skb, &msg->msg_iter, len);
+ return __zerocopy_sg_from_iter(msg, skb->sk, skb, &msg->msg_iter, len,
+ NULL);
}
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
struct msghdr *msg, int len,
- struct ubuf_info *uarg);
+ struct ubuf_info *uarg,
+ struct net_devmem_dmabuf_binding *binding);
/* Internal */
#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
@@ -3700,6 +3701,10 @@ static inline dma_addr_t __skb_frag_dma_map(struct device *dev,
size_t offset, size_t size,
enum dma_data_direction dir)
{
+ if (skb_frag_is_net_iov(frag)) {
+ return netmem_to_net_iov(frag->netmem)->dma_addr + offset +
+ frag->offset;
+ }
return dma_map_page(dev, skb_frag_page(frag),
skb_frag_off(frag) + offset, size, dir);
}
@@ -4105,8 +4110,7 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
int *err, long *timeo_p,
const struct sk_buff *skb);
-struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
- struct sk_buff_head *queue,
+struct sk_buff *__skb_try_recv_from_queue(struct sk_buff_head *queue,
unsigned int flags,
int *off, int *err,
struct sk_buff **last);
@@ -4129,9 +4133,8 @@ static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
}
int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
struct msghdr *msg);
-int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
- struct iov_iter *to, int len,
- struct ahash_request *hash);
+int skb_copy_and_crc32c_datagram_iter(const struct sk_buff *skb, int offset,
+ struct iov_iter *to, int len, u32 *crcp);
int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
struct iov_iter *from, int len);
int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
@@ -4146,6 +4149,8 @@ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
unsigned int flags);
int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
int len);
+int skb_send_sock_locked_with_flags(struct sock *sk, struct sk_buff *skb,
+ int offset, int len, int flags);
int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
@@ -4185,17 +4190,9 @@ static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
}
-struct skb_checksum_ops {
- __wsum (*update)(const void *mem, int len, __wsum wsum);
- __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
-};
-
-extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
-
-__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
- __wsum csum, const struct skb_checksum_ops *ops);
__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum);
+u32 skb_crc32c(const struct sk_buff *skb, int offset, int len, u32 crc);
static inline void * __must_check
__skb_header_pointer(const struct sk_buff *skb, int offset, int len,
diff --git a/include/linux/skbuff_ref.h b/include/linux/skbuff_ref.h
index 0f3c58007488..9e49372ef1a0 100644
--- a/include/linux/skbuff_ref.h
+++ b/include/linux/skbuff_ref.h
@@ -17,7 +17,7 @@
*/
static inline void __skb_frag_ref(skb_frag_t *frag)
{
- get_page(skb_frag_page(frag));
+ get_netmem(skb_frag_netmem(frag));
}
/**
@@ -40,7 +40,7 @@ static inline void skb_page_unref(netmem_ref netmem, bool recycle)
if (recycle && napi_pp_put_page(netmem))
return;
#endif
- put_page(netmem_to_page(netmem));
+ put_netmem(netmem);
}
/**
diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
index a476648858a6..d8949a4ed0dc 100644
--- a/include/linux/soc/mediatek/mtk_wed.h
+++ b/include/linux/soc/mediatek/mtk_wed.h
@@ -192,7 +192,7 @@ struct mtk_wed_device {
};
struct mtk_wed_ops {
- int (*attach)(struct mtk_wed_device *dev);
+ int (*attach)(struct mtk_wed_device *dev) __releases(RCU);
int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
void __iomem *regs, bool reset);
int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
diff --git a/include/linux/socket.h b/include/linux/socket.h
index c3322eb3d686..3b262487ec06 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -168,7 +168,7 @@ static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr
return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg);
}
-static inline size_t msg_data_left(struct msghdr *msg)
+static inline size_t msg_data_left(const struct msghdr *msg)
{
return iov_iter_count(&msg->msg_iter);
}
diff --git a/include/linux/sony-laptop.h b/include/linux/sony-laptop.h
deleted file mode 100644
index 1e3c92feea6e..000000000000
--- a/include/linux/sony-laptop.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _SONYLAPTOP_H_
-#define _SONYLAPTOP_H_
-
-#include <linux/types.h>
-
-#ifdef __KERNEL__
-
-/* used only for communication between v4l and sony-laptop */
-
-#define SONY_PIC_COMMAND_GETCAMERA 1 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERA 2
-#define SONY_PIC_COMMAND_GETCAMERABRIGHTNESS 3 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERABRIGHTNESS 4
-#define SONY_PIC_COMMAND_GETCAMERACONTRAST 5 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERACONTRAST 6
-#define SONY_PIC_COMMAND_GETCAMERAHUE 7 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERAHUE 8
-#define SONY_PIC_COMMAND_GETCAMERACOLOR 9 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERACOLOR 10
-#define SONY_PIC_COMMAND_GETCAMERASHARPNESS 11 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERASHARPNESS 12
-#define SONY_PIC_COMMAND_GETCAMERAPICTURE 13 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERAPICTURE 14
-#define SONY_PIC_COMMAND_GETCAMERAAGC 15 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERAAGC 16
-#define SONY_PIC_COMMAND_GETCAMERADIRECTION 17 /* obsolete */
-#define SONY_PIC_COMMAND_GETCAMERAROMVERSION 18 /* obsolete */
-#define SONY_PIC_COMMAND_GETCAMERAREVISION 19 /* obsolete */
-
-#if IS_ENABLED(CONFIG_SONY_LAPTOP)
-int sony_pic_camera_command(int command, u8 value);
-#else
-static inline int sony_pic_camera_command(int command, u8 value) { return 0; }
-#endif
-
-#endif /* __KERNEL__ */
-
-#endif /* _SONYLAPTOP_H_ */
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index 493d9de4e472..dc6ebaee3d18 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -365,7 +365,7 @@ struct sdw_intel_res {
* on e.g. which machine driver to select (I2S mode, HDaudio or
* SoundWire).
*/
-int sdw_intel_acpi_scan(acpi_handle *parent_handle,
+int sdw_intel_acpi_scan(acpi_handle parent_handle,
struct sdw_intel_acpi_info *info);
void sdw_intel_process_wakeen_event(struct sdw_intel_ctx *ctx);
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
index f950d280461b..9fbef3fd4056 100644
--- a/include/linux/spi/sh_msiof.h
+++ b/include/linux/spi/sh_msiof.h
@@ -2,6 +2,131 @@
#ifndef __SPI_SH_MSIOF_H__
#define __SPI_SH_MSIOF_H__
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
+#define SITMDR1 0x00 /* Transmit Mode Register 1 */
+#define SITMDR2 0x04 /* Transmit Mode Register 2 */
+#define SITMDR3 0x08 /* Transmit Mode Register 3 */
+#define SIRMDR1 0x10 /* Receive Mode Register 1 */
+#define SIRMDR2 0x14 /* Receive Mode Register 2 */
+#define SIRMDR3 0x18 /* Receive Mode Register 3 */
+#define SITSCR 0x20 /* Transmit Clock Select Register */
+#define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
+#define SICTR 0x28 /* Control Register */
+#define SIFCTR 0x30 /* FIFO Control Register */
+#define SISTR 0x40 /* Status Register */
+#define SIIER 0x44 /* Interrupt Enable Register */
+#define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
+#define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
+#define SITFDR 0x50 /* Transmit FIFO Data Register */
+#define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
+#define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
+#define SIRFDR 0x60 /* Receive FIFO Data Register */
+
+/* SITMDR1 and SIRMDR1 */
+#define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
+#define SIMDR1_SYNCMD GENMASK(29, 28) /* SYNC Mode */
+#define SIMDR1_SYNCMD_PULSE 0U /* Frame start sync pulse */
+#define SIMDR1_SYNCMD_SPI 2U /* Level mode/SPI */
+#define SIMDR1_SYNCMD_LR 3U /* L/R mode */
+#define SIMDR1_SYNCAC BIT(25) /* Sync Polarity (1 = Active-low) */
+#define SIMDR1_BITLSB BIT(24) /* MSB/LSB First (1 = LSB first) */
+#define SIMDR1_DTDL GENMASK(22, 20) /* Data Pin Bit Delay for MSIOF_SYNC */
+#define SIMDR1_SYNCDL GENMASK(18, 16) /* Frame Sync Signal Timing Delay */
+#define SIMDR1_FLD GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
+#define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
+/* SITMDR1 */
+#define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */
+#define SITMDR1_SYNCCH GENMASK(27, 26) /* Sync Signal Channel Select */
+ /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
+
+/* SITMDR2 and SIRMDR2 */
+#define SIMDR2_GRP GENMASK(31, 30) /* Group Count */
+#define SIMDR2_BITLEN1 GENMASK(28, 24) /* Data Size (8-32 bits) */
+#define SIMDR2_WDLEN1 GENMASK(23, 16) /* Word Count (1-64/256 (SH, A1))) */
+#define SIMDR2_GRPMASK GENMASK(3, 0) /* Group Output Mask 1-4 (SH, A1) */
+
+/* SITMDR3 and SIRMDR3 */
+#define SIMDR3_BITLEN2 GENMASK(28, 24) /* Data Size (8-32 bits) */
+#define SIMDR3_WDLEN2 GENMASK(23, 16) /* Word Count (1-64/256 (SH, A1))) */
+
+/* SITSCR and SIRSCR */
+#define SISCR_BRPS GENMASK(12, 8) /* Prescaler Setting (1-32) */
+#define SISCR_BRDV GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
+
+/* SICTR */
+#define SICTR_TSCKIZ GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
+#define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
+#define SICTR_TSCKIZ_POL BIT(30) /* Transmit Clock Polarity */
+#define SICTR_RSCKIZ GENMASK(29, 28) /* Receive Clock Polarity Select */
+#define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
+#define SICTR_RSCKIZ_POL BIT(28) /* Receive Clock Polarity */
+#define SICTR_TEDG BIT(27) /* Transmit Timing (1 = falling edge) */
+#define SICTR_REDG BIT(26) /* Receive Timing (1 = falling edge) */
+#define SICTR_TXDIZ GENMASK(23, 22) /* Pin Output When TX is Disabled */
+#define SICTR_TXDIZ_LOW 0U /* 0 */
+#define SICTR_TXDIZ_HIGH 1U /* 1 */
+#define SICTR_TXDIZ_HIZ 2U /* High-impedance */
+#define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
+#define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
+#define SICTR_TXE BIT(9) /* Transmit Enable */
+#define SICTR_RXE BIT(8) /* Receive Enable */
+#define SICTR_TXRST BIT(1) /* Transmit Reset */
+#define SICTR_RXRST BIT(0) /* Receive Reset */
+
+/* SIFCTR */
+#define SIFCTR_TFWM GENMASK(31, 29) /* Transmit FIFO Watermark */
+#define SIFCTR_TFWM_64 0U /* Transfer Request when 64 empty stages */
+#define SIFCTR_TFWM_32 1U /* Transfer Request when 32 empty stages */
+#define SIFCTR_TFWM_24 2U /* Transfer Request when 24 empty stages */
+#define SIFCTR_TFWM_16 3U /* Transfer Request when 16 empty stages */
+#define SIFCTR_TFWM_12 4U /* Transfer Request when 12 empty stages */
+#define SIFCTR_TFWM_8 5U /* Transfer Request when 8 empty stages */
+#define SIFCTR_TFWM_4 6U /* Transfer Request when 4 empty stages */
+#define SIFCTR_TFWM_1 7U /* Transfer Request when 1 empty stage */
+#define SIFCTR_TFUA GENMASK(28, 20) /* Transmit FIFO Usable Area */
+#define SIFCTR_RFWM GENMASK(15, 13) /* Receive FIFO Watermark */
+#define SIFCTR_RFWM_1 0U /* Transfer Request when 1 valid stages */
+#define SIFCTR_RFWM_4 1U /* Transfer Request when 4 valid stages */
+#define SIFCTR_RFWM_8 2U /* Transfer Request when 8 valid stages */
+#define SIFCTR_RFWM_16 3U /* Transfer Request when 16 valid stages */
+#define SIFCTR_RFWM_32 4U /* Transfer Request when 32 valid stages */
+#define SIFCTR_RFWM_64 5U /* Transfer Request when 64 valid stages */
+#define SIFCTR_RFWM_128 6U /* Transfer Request when 128 valid stages */
+#define SIFCTR_RFWM_256 7U /* Transfer Request when 256 valid stages */
+#define SIFCTR_RFUA GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
+
+/* SISTR */
+#define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */
+#define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */
+#define SISTR_TEOF BIT(23) /* Frame Transmission End */
+#define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
+#define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */
+#define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */
+#define SISTR_RFFUL BIT(13) /* Receive FIFO Full */
+#define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */
+#define SISTR_REOF BIT(7) /* Frame Reception End */
+#define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
+#define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */
+#define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */
+
+/* SIIER */
+#define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
+#define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
+#define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
+#define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */
+#define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
+#define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
+#define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
+#define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
+#define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */
+#define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
+#define SIIER_REOFE BIT(7) /* Frame Reception End Enable */
+#define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
+#define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
+#define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
+
enum {
MSIOF_SPI_HOST,
MSIOF_SPI_TARGET,
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 0ba5e49bace4..4789f91dae94 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -136,13 +136,6 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* @max_speed_hz: Maximum clock rate to be used with this chip
* (on this board); may be changed by the device's driver.
* The spi_transfer.speed_hz can override this for each transfer.
- * @chip_select: Array of physical chipselect, spi->chipselect[i] gives
- * the corresponding physical CS for logical CS i.
- * @mode: The spi mode defines how data is clocked out and in.
- * This may be changed by the device's driver.
- * The "active low" default for chipselect mode can be overridden
- * (by specifying SPI_CS_HIGH) as can the "MSB first" default for
- * each word in a transfer (by specifying SPI_LSB_FIRST).
* @bits_per_word: Data transfers involve one or more words; word sizes
* like eight or 12 bits are common. In-memory wordsizes are
* powers of two bytes (e.g. 20 bit samples use 32 bits).
@@ -150,6 +143,11 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* default (0) indicating protocol words are eight bit bytes.
* The spi_transfer.bits_per_word can override this for each transfer.
* @rt: Make the pump thread real time priority.
+ * @mode: The spi mode defines how data is clocked out and in.
+ * This may be changed by the device's driver.
+ * The "active low" default for chipselect mode can be overridden
+ * (by specifying SPI_CS_HIGH) as can the "MSB first" default for
+ * each word in a transfer (by specifying SPI_LSB_FIRST).
* @irq: Negative, or the number passed to request_irq() to receive
* interrupts from this device.
* @controller_state: Controller's runtime state
@@ -162,8 +160,7 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* the device will bind to the named driver and only the named driver.
* Do not set directly, because core frees it; use driver_set_override() to
* set or clear it.
- * @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines
- * (optional, NULL when not using a GPIO line)
+ * @pcpu_statistics: statistics for the spi_device
* @word_delay: delay to be inserted between consecutive
* words of a transfer
* @cs_setup: delay to be introduced by the controller after CS is asserted
@@ -171,8 +168,11 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* @cs_inactive: delay to be introduced by the controller after CS is
* deasserted. If @cs_change_delay is used from @spi_transfer, then the
* two delays will be added up.
- * @pcpu_statistics: statistics for the spi_device
+ * @chip_select: Array of physical chipselect, spi->chipselect[i] gives
+ * the corresponding physical CS for logical CS i.
* @cs_index_mask: Bit mask of the active chipselect(s) in the chipselect array
+ * @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines
+ * (optional, NULL when not using a GPIO line)
*
* A @spi_device is used to interchange data between an SPI target device
* (usually a discrete chip) and CPU memory.
@@ -187,7 +187,6 @@ struct spi_device {
struct device dev;
struct spi_controller *controller;
u32 max_speed_hz;
- u8 chip_select[SPI_CS_CNT_MAX];
u8 bits_per_word;
bool rt;
#define SPI_NO_TX BIT(31) /* No transmit wire */
@@ -218,23 +217,29 @@ struct spi_device {
void *controller_data;
char modalias[SPI_NAME_SIZE];
const char *driver_override;
- struct gpio_desc *cs_gpiod[SPI_CS_CNT_MAX]; /* Chip select gpio desc */
+
+ /* The statistics */
+ struct spi_statistics __percpu *pcpu_statistics;
+
struct spi_delay word_delay; /* Inter-word delay */
+
/* CS delays */
struct spi_delay cs_setup;
struct spi_delay cs_hold;
struct spi_delay cs_inactive;
- /* The statistics */
- struct spi_statistics __percpu *pcpu_statistics;
+ u8 chip_select[SPI_CS_CNT_MAX];
- /* Bit mask of the chipselect(s) that the driver need to use from
- * the chipselect array.When the controller is capable to handle
+ /*
+ * Bit mask of the chipselect(s) that the driver need to use from
+ * the chipselect array. When the controller is capable to handle
* multiple chip selects & memories are connected in parallel
* then more than one bit need to be set in cs_index_mask.
*/
u32 cs_index_mask : SPI_CS_CNT_MAX;
+ struct gpio_desc *cs_gpiod[SPI_CS_CNT_MAX]; /* Chip select gpio desc */
+
/*
* Likely need more hooks for more protocol options affecting how
* the controller talks to each chip, like:
@@ -249,10 +254,7 @@ struct spi_device {
static_assert((SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK) == 0,
"SPI_MODE_USER_MASK & SPI_MODE_KERNEL_MASK must not overlap");
-static inline struct spi_device *to_spi_device(const struct device *dev)
-{
- return dev ? container_of(dev, struct spi_device, dev) : NULL;
-}
+#define to_spi_device(__dev) container_of_const(__dev, struct spi_device, dev)
/* Most drivers won't need to care about device refcounting */
static inline struct spi_device *spi_dev_get(struct spi_device *spi)
@@ -503,6 +505,8 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* found.
* @put_offload: release the offload instance acquired by @get_offload.
* @mem_caps: controller capabilities for the handling of memory operations.
+ * @dtr_caps: true if controller has dtr(single/dual transfer rate) capability.
+ * QSPI based controller should fill this based on controller's capability.
* @unprepare_message: undo any work done by prepare_message().
* @target_abort: abort the ongoing transfer request on an SPI target controller
* @cs_gpiods: Array of GPIO descriptors to use as chip select lines; one per CS
@@ -746,6 +750,9 @@ struct spi_controller {
const struct spi_controller_mem_ops *mem_ops;
const struct spi_controller_mem_caps *mem_caps;
+ /* SPI or QSPI controller can set to true if supports SDR/DDR transfer rate */
+ bool dtr_caps;
+
struct spi_offload *(*get_offload)(struct spi_device *spi,
const struct spi_offload_config *config);
void (*put_offload)(struct spi_offload *offload);
@@ -998,6 +1005,7 @@ struct spi_res {
* processed the word, i.e. the "pre" timestamp should be taken before
* transmitting the "pre" word, and the "post" timestamp after receiving
* transmit confirmation from the controller for the "post" word.
+ * @dtr_mode: true if supports double transfer rate.
* @timestamped: true if the transfer has been timestamped
* @error: Error status logged by SPI controller driver.
*
@@ -1049,6 +1057,9 @@ struct spi_res {
* two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x)
* SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer.
*
+ * User may also set dtr_mode to true to use dual transfer mode if desired. if
+ * not, default considered as single transfer mode.
+ *
* The code that submits an spi_message (and its spi_transfers)
* to the lower layers is responsible for managing its memory.
* Zero-initialize every field you don't set up explicitly, to
@@ -1083,6 +1094,7 @@ struct spi_transfer {
unsigned tx_nbits:4;
unsigned rx_nbits:4;
unsigned timestamped:1;
+ bool dtr_mode;
#define SPI_NBITS_SINGLE 0x01 /* 1-bit transfer */
#define SPI_NBITS_DUAL 0x02 /* 2-bit transfer */
#define SPI_NBITS_QUAD 0x04 /* 4-bit transfer */
@@ -1326,6 +1338,32 @@ static inline bool spi_is_bpw_supported(struct spi_device *spi, u32 bpw)
}
/**
+ * spi_bpw_to_bytes - Covert bits per word to bytes
+ * @bpw: Bits per word
+ *
+ * This function converts the given @bpw to bytes. The result is always
+ * power-of-two, e.g.,
+ *
+ * =============== =================
+ * Input (in bits) Output (in bytes)
+ * =============== =================
+ * 5 1
+ * 9 2
+ * 21 4
+ * 37 8
+ * =============== =================
+ *
+ * It will return 0 for the 0 input.
+ *
+ * Returns:
+ * Bytes for the given @bpw.
+ */
+static inline u32 spi_bpw_to_bytes(u32 bpw)
+{
+ return roundup_pow_of_two(BITS_TO_BYTES(bpw));
+}
+
+/**
* spi_controller_xfer_timeout - Compute a suitable timeout value
* @ctlr: SPI device
* @xfer: Transfer descriptor
diff --git a/include/linux/stat.h b/include/linux/stat.h
index be7496a6a0dd..e3d00e7bb26d 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -57,6 +57,7 @@ struct kstat {
u32 dio_read_offset_align;
u32 atomic_write_unit_min;
u32 atomic_write_unit_max;
+ u32 atomic_write_unit_max_opt;
u32 atomic_write_segments_max;
};
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index c4ec8bb8144e..26ddf95d23f9 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -233,13 +233,14 @@ struct plat_stmmacenet_data {
u8 tx_sched_algorithm;
struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES];
struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES];
+ void (*get_interfaces)(struct stmmac_priv *priv, void *bsp_priv,
+ unsigned long *interfaces);
int (*set_clk_tx_rate)(void *priv, struct clk *clk_tx_i,
phy_interface_t interface, int speed);
void (*fix_mac_speed)(void *priv, int speed, unsigned int mode);
int (*fix_soc_reset)(void *priv, void __iomem *ioaddr);
int (*serdes_powerup)(struct net_device *ndev, void *priv);
void (*serdes_powerdown)(struct net_device *ndev, void *priv);
- void (*speed_mode_2500)(struct net_device *ndev, void *priv);
int (*mac_finish)(struct net_device *ndev,
void *priv,
unsigned int mode,
@@ -276,7 +277,6 @@ struct plat_stmmacenet_data {
int mac_port_sel_speed;
int has_xgmac;
u8 vlan_fail_q;
- unsigned long eee_usecs_rate;
struct pci_dev *pdev;
int int_snapshot_num;
int msi_mac_vec;
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index e93fbb5b0c01..3fb88a1e9898 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -31,6 +31,7 @@ enum string_size_units {
int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
char *buf, int len);
+int parse_int_array(const char *buf, size_t count, int **array);
int parse_int_array_user(const char __user *from, size_t count, int **array);
#define UNESCAPE_SPACE BIT(0)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 74658cca0f38..48666b83fe68 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -119,14 +119,14 @@ void svc_destroy(struct svc_serv **svcp);
* Linux limit; someone who cares more about NFS/UDP performance
* can test a larger number.
*
- * For TCP transports we have more freedom. A size of 1MB is
- * chosen to match the client limit. Other OSes are known to
- * have larger limits, but those numbers are probably beyond
- * the point of diminishing returns.
+ * For non-UDP transports we have more freedom. A size of 4MB is
+ * chosen to accommodate clients that support larger I/O sizes.
*/
-#define RPCSVC_MAXPAYLOAD (1*1024*1024u)
-#define RPCSVC_MAXPAYLOAD_TCP RPCSVC_MAXPAYLOAD
-#define RPCSVC_MAXPAYLOAD_UDP (32*1024u)
+enum {
+ RPCSVC_MAXPAYLOAD = 4 * 1024 * 1024,
+ RPCSVC_MAXPAYLOAD_TCP = RPCSVC_MAXPAYLOAD,
+ RPCSVC_MAXPAYLOAD_UDP = 32 * 1024,
+};
extern u32 svc_max_payload(const struct svc_rqst *rqstp);
@@ -150,14 +150,24 @@ extern u32 svc_max_payload(const struct svc_rqst *rqstp);
* list. xdr_buf.tail points to the end of the first page.
* This assumes that the non-page part of an rpc reply will fit
* in a page - NFSd ensures this. lockd also has no trouble.
+ */
+
+/**
+ * svc_serv_maxpages - maximum count of pages needed for one RPC message
+ * @serv: RPC service context
+ *
+ * Returns a count of pages or vectors that can hold the maximum
+ * size RPC message for @serv.
*
- * Each request/reply pair can have at most one "payload", plus two pages,
- * one for the request, and one for the reply.
- * We using ->sendfile to return read data, we might need one extra page
- * if the request is not page-aligned. So add another '1'.
+ * Each request/reply pair can have at most one "payload", plus two
+ * pages, one for the request, and one for the reply.
+ * nfsd_splice_actor() might need an extra page when a READ payload
+ * is not page-aligned.
*/
-#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \
- + 2 + 1)
+static inline unsigned long svc_serv_maxpages(const struct svc_serv *serv)
+{
+ return DIV_ROUND_UP(serv->sv_max_mesg, PAGE_SIZE) + 2 + 1;
+}
/*
* The context of a single thread, including the request currently being
@@ -188,14 +198,14 @@ struct svc_rqst {
struct xdr_stream rq_res_stream;
struct page *rq_scratch_page;
struct xdr_buf rq_res;
- struct page *rq_pages[RPCSVC_MAXPAGES + 1];
+ unsigned long rq_maxpages; /* num of entries in rq_pages */
+ struct page * *rq_pages;
struct page * *rq_respages; /* points into rq_pages */
struct page * *rq_next_page; /* next reply page to use */
struct page * *rq_page_end; /* one past the last page */
struct folio_batch rq_fbatch;
- struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
- struct bio_vec rq_bvec[RPCSVC_MAXPAGES];
+ struct bio_vec *rq_bvec;
__be32 rq_xid; /* transmission id */
u32 rq_prog; /* program number */
@@ -452,8 +462,6 @@ const char * svc_proc_name(const struct svc_rqst *rqstp);
int svc_encode_result_payload(struct svc_rqst *rqstp,
unsigned int offset,
unsigned int length);
-unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
- struct xdr_buf *payload);
char *svc_fill_symlink_pathname(struct svc_rqst *rqstp,
struct kvec *first, void *p,
size_t total);
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 619fc0bd837a..22704c2e5b9b 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -202,7 +202,8 @@ struct svc_rdma_recv_ctxt {
struct svc_rdma_pcl rc_reply_pcl;
unsigned int rc_page_count;
- struct page *rc_pages[RPCSVC_MAXPAGES];
+ unsigned long rc_maxpages;
+ struct page *rc_pages[] __counted_by(rc_maxpages);
};
/*
@@ -244,7 +245,8 @@ struct svc_rdma_send_ctxt {
void *sc_xprt_buf;
int sc_page_count;
int sc_cur_sge_no;
- struct page *sc_pages[RPCSVC_MAXPAGES];
+ unsigned long sc_maxpages;
+ struct page **sc_pages;
struct ib_sge sc_sges[];
};
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 72be60952579..369a89aea186 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -53,6 +53,7 @@ struct svc_xprt {
struct svc_xprt_class *xpt_class;
const struct svc_xprt_ops *xpt_ops;
struct kref xpt_ref;
+ ktime_t xpt_qtime;
struct list_head xpt_list;
struct lwq_node xpt_ready;
unsigned long xpt_flags;
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index bf45d9e8492a..963bbe251e52 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -40,7 +40,9 @@ struct svc_sock {
struct completion sk_handshake_done;
- struct page * sk_pages[RPCSVC_MAXPAGES]; /* received data */
+ /* received data */
+ unsigned long sk_maxpages;
+ struct page * sk_pages[] __counted_by(sk_maxpages);
};
static inline u32 svc_sock_reclen(struct svc_sock *svsk)
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index da6ebca3ff77..b1c76c8f2c82 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -298,6 +298,11 @@ static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {}
static inline void s2idle_wake(void) {}
#endif /* !CONFIG_SUSPEND */
+static inline bool pm_suspend_in_progress(void)
+{
+ return pm_suspend_target_state != PM_SUSPEND_ON;
+}
+
/* struct pbe is used for creating lists of pages that should be restored
* atomically during the resume from disk, because the page frames they have
* occupied before the suspend are in use.
@@ -470,6 +475,8 @@ extern void pm_print_active_wakeup_sources(void);
extern unsigned int lock_system_sleep(void);
extern void unlock_system_sleep(unsigned int);
+extern bool pm_sleep_transition_in_progress(void);
+
#else /* !CONFIG_PM_SLEEP */
static inline int register_pm_notifier(struct notifier_block *nb)
@@ -498,6 +505,8 @@ static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
static inline unsigned int lock_system_sleep(void) { return 0; }
static inline void unlock_system_sleep(unsigned int flags) {}
+static inline bool pm_sleep_transition_in_progress(void) { return false; }
+
#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_SLEEP_DEBUG
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 18f7e1fd093c..f418aae4f113 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -107,7 +107,7 @@ struct attribute_group {
int);
struct attribute **attrs;
union {
- struct bin_attribute **bin_attrs;
+ const struct bin_attribute *const *bin_attrs;
const struct bin_attribute *const *bin_attrs_new;
};
};
@@ -306,11 +306,11 @@ struct bin_attribute {
size_t size;
void *private;
struct address_space *(*f_mapping)(void);
- ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *,
+ ssize_t (*read)(struct file *, struct kobject *, const struct bin_attribute *,
char *, loff_t, size_t);
ssize_t (*read_new)(struct file *, struct kobject *, const struct bin_attribute *,
char *, loff_t, size_t);
- ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *,
+ ssize_t (*write)(struct file *, struct kobject *, const struct bin_attribute *,
char *, loff_t, size_t);
ssize_t (*write_new)(struct file *, struct kobject *,
const struct bin_attribute *, char *, loff_t, size_t);
@@ -332,28 +332,11 @@ struct bin_attribute {
*/
#define sysfs_bin_attr_init(bin_attr) sysfs_attr_init(&(bin_attr)->attr)
-typedef ssize_t __sysfs_bin_rw_handler_new(struct file *, struct kobject *,
- const struct bin_attribute *, char *, loff_t, size_t);
-
/* macros to create static binary attributes easier */
#define __BIN_ATTR(_name, _mode, _read, _write, _size) { \
.attr = { .name = __stringify(_name), .mode = _mode }, \
- .read = _Generic(_read, \
- __sysfs_bin_rw_handler_new * : NULL, \
- default : _read \
- ), \
- .read_new = _Generic(_read, \
- __sysfs_bin_rw_handler_new * : _read, \
- default : NULL \
- ), \
- .write = _Generic(_write, \
- __sysfs_bin_rw_handler_new * : NULL, \
- default : _write \
- ), \
- .write_new = _Generic(_write, \
- __sysfs_bin_rw_handler_new * : _write, \
- default : NULL \
- ), \
+ .read = _read, \
+ .write = _write, \
.size = _size, \
}
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 1669d95bb0f9..29f59d50dc73 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -340,7 +340,7 @@ struct tcp_sock {
} rcv_rtt_est;
/* Receiver queue space */
struct {
- u32 space;
+ int space;
u32 seq;
u64 time;
} rcvq_space;
@@ -385,7 +385,8 @@ struct tcp_sock {
syn_fastopen:1, /* SYN includes Fast Open option */
syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
syn_fastopen_ch:1, /* Active TFO re-enabling probe */
- syn_data_acked:1;/* data in SYN is acked by SYN-ACK */
+ syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
+ syn_fastopen_child:1; /* created TFO passive child socket */
u8 keepalive_probes; /* num of allowed keep alive probes */
u32 tcp_tx_delay; /* delay (in usec) added to TX packets */
diff --git a/include/linux/tfrc.h b/include/linux/tfrc.h
deleted file mode 100644
index a5acc768085d..000000000000
--- a/include/linux/tfrc.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _LINUX_TFRC_H_
-#define _LINUX_TFRC_H_
-/*
- * TFRC - Data Structures for the TCP-Friendly Rate Control congestion
- * control mechanism as specified in RFC 3448.
- *
- * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
- * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz>
- * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
- * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
- */
-#include <linux/types.h>
-
-/** tfrc_rx_info - TFRC Receiver Data Structure
- *
- * @tfrcrx_x_recv: receiver estimate of sending rate (3.2.2)
- * @tfrcrx_rtt: round-trip-time (communicated by sender)
- * @tfrcrx_p: current estimate of loss event rate (3.2.2)
- */
-struct tfrc_rx_info {
- __u32 tfrcrx_x_recv;
- __u32 tfrcrx_rtt;
- __u32 tfrcrx_p;
-};
-
-/** tfrc_tx_info - TFRC Sender Data Structure
- *
- * @tfrctx_x: computed transmit rate (4.3 (4))
- * @tfrctx_x_recv: receiver estimate of send rate (4.3)
- * @tfrctx_x_calc: return value of throughput equation (3.1)
- * @tfrctx_rtt: (moving average) estimate of RTT (4.3)
- * @tfrctx_p: current loss event rate (5.4)
- * @tfrctx_rto: estimate of RTO, equals 4*RTT (4.3)
- * @tfrctx_ipi: inter-packet interval (4.6)
- *
- * Note: X and X_recv are both maintained in units of 64 * bytes/second. This
- * enables a finer resolution of sending rates and avoids problems with
- * integer arithmetic; u32 is not sufficient as scaling consumes 6 bits.
- */
-struct tfrc_tx_info {
- __u64 tfrctx_x;
- __u64 tfrctx_x_recv;
- __u32 tfrctx_x_calc;
- __u32 tfrctx_rtt;
- __u32 tfrctx_p;
- __u32 tfrctx_rto;
- __u32 tfrctx_ipi;
-};
-
-#endif /* _LINUX_TFRC_H_ */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index b8ddc8e631a3..ac76ae9fa36d 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -195,12 +195,6 @@ static inline bool tick_nohz_full_enabled(void)
__ret; \
})
-static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
-{
- if (tick_nohz_full_enabled())
- cpumask_or(mask, mask, tick_nohz_full_mask);
-}
-
extern void tick_nohz_dep_set(enum tick_dep_bits bit);
extern void tick_nohz_dep_clear(enum tick_dep_bits bit);
extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit);
@@ -281,7 +275,6 @@ extern void __init tick_nohz_full_setup(cpumask_var_t cpumask);
#else
static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
-static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 10596d7c3a34..f636f55c427d 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -67,44 +67,44 @@
/*
* LOCKDEP and DEBUG timer interfaces.
*/
-void init_timer_key(struct timer_list *timer,
+void timer_init_key(struct timer_list *timer,
void (*func)(struct timer_list *), unsigned int flags,
const char *name, struct lock_class_key *key);
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
-extern void init_timer_on_stack_key(struct timer_list *timer,
+extern void timer_init_key_on_stack(struct timer_list *timer,
void (*func)(struct timer_list *),
unsigned int flags, const char *name,
struct lock_class_key *key);
#else
-static inline void init_timer_on_stack_key(struct timer_list *timer,
+static inline void timer_init_key_on_stack(struct timer_list *timer,
void (*func)(struct timer_list *),
unsigned int flags,
const char *name,
struct lock_class_key *key)
{
- init_timer_key(timer, func, flags, name, key);
+ timer_init_key(timer, func, flags, name, key);
}
#endif
#ifdef CONFIG_LOCKDEP
-#define __init_timer(_timer, _fn, _flags) \
+#define __timer_init(_timer, _fn, _flags) \
do { \
static struct lock_class_key __key; \
- init_timer_key((_timer), (_fn), (_flags), #_timer, &__key);\
+ timer_init_key((_timer), (_fn), (_flags), #_timer, &__key);\
} while (0)
-#define __init_timer_on_stack(_timer, _fn, _flags) \
+#define __timer_init_on_stack(_timer, _fn, _flags) \
do { \
static struct lock_class_key __key; \
- init_timer_on_stack_key((_timer), (_fn), (_flags), \
+ timer_init_key_on_stack((_timer), (_fn), (_flags), \
#_timer, &__key); \
} while (0)
#else
-#define __init_timer(_timer, _fn, _flags) \
- init_timer_key((_timer), (_fn), (_flags), NULL, NULL)
-#define __init_timer_on_stack(_timer, _fn, _flags) \
- init_timer_on_stack_key((_timer), (_fn), (_flags), NULL, NULL)
+#define __timer_init(_timer, _fn, _flags) \
+ timer_init_key((_timer), (_fn), (_flags), NULL, NULL)
+#define __timer_init_on_stack(_timer, _fn, _flags) \
+ timer_init_key_on_stack((_timer), (_fn), (_flags), NULL, NULL)
#endif
/**
@@ -115,18 +115,18 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
*
* Regular timer initialization should use either DEFINE_TIMER() above,
* or timer_setup(). For timers on the stack, timer_setup_on_stack() must
- * be used and must be balanced with a call to destroy_timer_on_stack().
+ * be used and must be balanced with a call to timer_destroy_on_stack().
*/
#define timer_setup(timer, callback, flags) \
- __init_timer((timer), (callback), (flags))
+ __timer_init((timer), (callback), (flags))
#define timer_setup_on_stack(timer, callback, flags) \
- __init_timer_on_stack((timer), (callback), (flags))
+ __timer_init_on_stack((timer), (callback), (flags))
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
-extern void destroy_timer_on_stack(struct timer_list *timer);
+extern void timer_destroy_on_stack(struct timer_list *timer);
#else
-static inline void destroy_timer_on_stack(struct timer_list *timer) { }
+static inline void timer_destroy_on_stack(struct timer_list *timer) { }
#endif
#define from_timer(var, callback_timer, timer_fieldname) \
@@ -156,28 +156,26 @@ extern int timer_reduce(struct timer_list *timer, unsigned long expires);
* The jiffies value which is added to now, when there is no timer
* in the timer wheel:
*/
-#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1)
+#define TIMER_NEXT_MAX_DELTA ((1UL << 30) - 1)
extern void add_timer(struct timer_list *timer);
extern void add_timer_local(struct timer_list *timer);
extern void add_timer_global(struct timer_list *timer);
-extern int try_to_del_timer_sync(struct timer_list *timer);
+extern int timer_delete_sync_try(struct timer_list *timer);
extern int timer_delete_sync(struct timer_list *timer);
extern int timer_delete(struct timer_list *timer);
extern int timer_shutdown_sync(struct timer_list *timer);
extern int timer_shutdown(struct timer_list *timer);
-extern void init_timers(void);
+extern void timers_init(void);
struct hrtimer;
extern enum hrtimer_restart it_real_fn(struct hrtimer *);
-unsigned long __round_jiffies(unsigned long j, int cpu);
unsigned long __round_jiffies_relative(unsigned long j, int cpu);
unsigned long round_jiffies(unsigned long j);
unsigned long round_jiffies_relative(unsigned long j);
-unsigned long __round_jiffies_up(unsigned long j, int cpu);
unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
unsigned long round_jiffies_up(unsigned long j);
unsigned long round_jiffies_up_relative(unsigned long j);
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 24e715f0f6d2..cd6b4bdc9cfd 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -332,4 +332,13 @@ sched_numa_hop_mask(unsigned int node, unsigned int hops)
!IS_ERR_OR_NULL(mask); \
__hops++)
+DECLARE_PER_CPU(unsigned long, cpu_scale);
+
+static inline unsigned long topology_get_cpu_scale(int cpu)
+{
+ return per_cpu(cpu_scale, cpu);
+}
+
+void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
+
#endif /* _LINUX_TOPOLOGY_H */
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 6c3125300c00..a3d8305e88a5 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -224,7 +224,7 @@ enum tpm2_const {
enum tpm2_timeouts {
TPM2_TIMEOUT_A = 750,
- TPM2_TIMEOUT_B = 2000,
+ TPM2_TIMEOUT_B = 4000,
TPM2_TIMEOUT_C = 200,
TPM2_TIMEOUT_D = 30,
TPM2_DURATION_SHORT = 20,
@@ -257,6 +257,7 @@ enum tpm2_return_codes {
TPM2_RC_TESTING = 0x090A, /* RC_WARN */
TPM2_RC_REFERENCE_H0 = 0x0910,
TPM2_RC_RETRY = 0x0922,
+ TPM2_RC_SESSION_MEMORY = 0x0903,
};
enum tpm2_command_codes {
@@ -437,6 +438,24 @@ static inline u32 tpm2_rc_value(u32 rc)
return (rc & BIT(7)) ? rc & 0xbf : rc;
}
+/*
+ * Convert a return value from tpm_transmit_cmd() to POSIX error code.
+ */
+static inline ssize_t tpm_ret_to_err(ssize_t ret)
+{
+ if (ret < 0)
+ return ret;
+
+ switch (tpm2_rc_value(ret)) {
+ case TPM2_RC_SUCCESS:
+ return 0;
+ case TPM2_RC_SESSION_MEMORY:
+ return -ENOMEM;
+ default:
+ return -EFAULT;
+ }
+}
+
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
extern int tpm_is_tpm2(struct tpm_chip *chip);
diff --git a/include/linux/tpm_svsm.h b/include/linux/tpm_svsm.h
new file mode 100644
index 000000000000..38e341f9761a
--- /dev/null
+++ b/include/linux/tpm_svsm.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 James.Bottomley@HansenPartnership.com
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ *
+ * Helpers for the SVSM_VTPM_CMD calls used by the vTPM protocol defined by the
+ * AMD SVSM spec [1].
+ *
+ * The vTPM protocol follows the Official TPM 2.0 Reference Implementation
+ * (originally by Microsoft, now part of the TCG) simulator protocol.
+ *
+ * [1] "Secure VM Service Module for SEV-SNP Guests"
+ * Publication # 58019 Revision: 1.00
+ */
+#ifndef _TPM_SVSM_H_
+#define _TPM_SVSM_H_
+
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#define SVSM_VTPM_MAX_BUFFER 4096 /* max req/resp buffer size */
+
+/**
+ * struct svsm_vtpm_request - Generic request for single word command
+ * @cmd: The command to send
+ *
+ * Defined by AMD SVSM spec [1] in section "8.2 SVSM_VTPM_CMD Call" -
+ * Table 15: vTPM Common Request/Response Structure
+ * Byte Size     In/Out    Description
+ * Offset    (Bytes)
+ * 0x000     4          In        Platform command
+ *                         Out       Platform command response size
+ */
+struct svsm_vtpm_request {
+ u32 cmd;
+};
+
+/**
+ * struct svsm_vtpm_response - Generic response
+ * @size: The response size (zero if nothing follows)
+ *
+ * Defined by AMD SVSM spec [1] in section "8.2 SVSM_VTPM_CMD Call" -
+ * Table 15: vTPM Common Request/Response Structure
+ * Byte Size     In/Out    Description
+ * Offset    (Bytes)
+ * 0x000     4          In        Platform command
+ *                         Out       Platform command response size
+ *
+ * Note: most TCG Simulator commands simply return zero here with no indication
+ * of success or failure.
+ */
+struct svsm_vtpm_response {
+ u32 size;
+};
+
+/**
+ * struct svsm_vtpm_cmd_request - Structure for a TPM_SEND_COMMAND request
+ * @cmd: The command to send (must be TPM_SEND_COMMAND)
+ * @locality: The locality
+ * @buf_size: The size of the input buffer following
+ * @buf: A buffer of size buf_size
+ *
+ * Defined by AMD SVSM spec [1] in section "8.2 SVSM_VTPM_CMD Call" -
+ * Table 16: TPM_SEND_COMMAND Request Structure
+ * Byte Size Meaning
+ * Offset    (Bytes)
+ * 0x000     4          Platform command (8)
+ * 0x004     1          Locality (must-be-0)
+ * 0x005     4          TPM Command size (in bytes)
+ * 0x009     Variable   TPM Command
+ *
+ * Note: the TCG Simulator expects @buf_size to be equal to the size of the
+ * specific TPM command, otherwise an TPM_RC_COMMAND_SIZE error is returned.
+ */
+struct svsm_vtpm_cmd_request {
+ u32 cmd;
+ u8 locality;
+ u32 buf_size;
+ u8 buf[];
+} __packed;
+
+/**
+ * struct svsm_vtpm_cmd_response - Structure for a TPM_SEND_COMMAND response
+ * @buf_size: The size of the output buffer following
+ * @buf: A buffer of size buf_size
+ *
+ * Defined by AMD SVSM spec [1] in section "8.2 SVSM_VTPM_CMD Call" -
+ * Table 17: TPM_SEND_COMMAND Response Structure
+ * Byte Size Meaning
+ * Offset    (Bytes)
+ * 0x000     4          Response size (in bytes)
+ * 0x004     Variable   Response
+ */
+struct svsm_vtpm_cmd_response {
+ u32 buf_size;
+ u8 buf[];
+};
+
+/**
+ * svsm_vtpm_cmd_request_fill() - Fill a TPM_SEND_COMMAND request to be sent to SVSM
+ * @req: The struct svsm_vtpm_cmd_request to fill
+ * @locality: The locality
+ * @buf: The buffer from where to copy the payload of the command
+ * @len: The size of the buffer
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static inline int
+svsm_vtpm_cmd_request_fill(struct svsm_vtpm_cmd_request *req, u8 locality,
+ const u8 *buf, size_t len)
+{
+ if (len > SVSM_VTPM_MAX_BUFFER - sizeof(*req))
+ return -EINVAL;
+
+ req->cmd = 8; /* TPM_SEND_COMMAND */
+ req->locality = locality;
+ req->buf_size = len;
+
+ memcpy(req->buf, buf, len);
+
+ return 0;
+}
+
+/**
+ * svsm_vtpm_cmd_response_parse() - Parse a TPM_SEND_COMMAND response received from SVSM
+ * @resp: The struct svsm_vtpm_cmd_response to parse
+ * @buf: The buffer where to copy the response
+ * @len: The size of the buffer
+ *
+ * Return: buffer size filled with the response on success, negative error
+ * code on failure.
+ */
+static inline int
+svsm_vtpm_cmd_response_parse(const struct svsm_vtpm_cmd_response *resp, u8 *buf,
+ size_t len)
+{
+ if (len < resp->buf_size)
+ return -E2BIG;
+
+ if (resp->buf_size > SVSM_VTPM_MAX_BUFFER - sizeof(*resp))
+ return -EINVAL; // Invalid response from the platform TPM
+
+ memcpy(buf, resp->buf, resp->buf_size);
+
+ return resp->buf_size;
+}
+
+#endif /* _TPM_SVSM_H_ */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index a351763e6965..826ce3f8e1f8 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -464,16 +464,30 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#endif
#define DECLARE_TRACE(name, proto, args) \
- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
+ __DECLARE_TRACE(name##_tp, PARAMS(proto), PARAMS(args), \
cpu_online(raw_smp_processor_id()), \
PARAMS(void *__data, proto))
#define DECLARE_TRACE_CONDITION(name, proto, args, cond) \
- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
+ __DECLARE_TRACE(name##_tp, PARAMS(proto), PARAMS(args), \
cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \
PARAMS(void *__data, proto))
#define DECLARE_TRACE_SYSCALL(name, proto, args) \
+ __DECLARE_TRACE_SYSCALL(name##_tp, PARAMS(proto), PARAMS(args), \
+ PARAMS(void *__data, proto))
+
+#define DECLARE_TRACE_EVENT(name, proto, args) \
+ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
+ cpu_online(raw_smp_processor_id()), \
+ PARAMS(void *__data, proto))
+
+#define DECLARE_TRACE_EVENT_CONDITION(name, proto, args, cond) \
+ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
+ cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \
+ PARAMS(void *__data, proto))
+
+#define DECLARE_TRACE_EVENT_SYSCALL(name, proto, args) \
__DECLARE_TRACE_SYSCALL(name, PARAMS(proto), PARAMS(args), \
PARAMS(void *__data, proto))
@@ -591,32 +605,32 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print)
#define DEFINE_EVENT(template, name, proto, args) \
- DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
+ DECLARE_TRACE_EVENT(name, PARAMS(proto), PARAMS(args))
#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg)\
- DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
+ DECLARE_TRACE_EVENT(name, PARAMS(proto), PARAMS(args))
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
+ DECLARE_TRACE_EVENT(name, PARAMS(proto), PARAMS(args))
#define DEFINE_EVENT_CONDITION(template, name, proto, \
args, cond) \
- DECLARE_TRACE_CONDITION(name, PARAMS(proto), \
+ DECLARE_TRACE_EVENT_CONDITION(name, PARAMS(proto), \
PARAMS(args), PARAMS(cond))
#define TRACE_EVENT(name, proto, args, struct, assign, print) \
- DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
+ DECLARE_TRACE_EVENT(name, PARAMS(proto), PARAMS(args))
#define TRACE_EVENT_FN(name, proto, args, struct, \
assign, print, reg, unreg) \
- DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
-#define TRACE_EVENT_FN_COND(name, proto, args, cond, struct, \
+ DECLARE_TRACE_EVENT(name, PARAMS(proto), PARAMS(args))
+#define TRACE_EVENT_FN_COND(name, proto, args, cond, struct, \
assign, print, reg, unreg) \
- DECLARE_TRACE_CONDITION(name, PARAMS(proto), \
+ DECLARE_TRACE_EVENT_CONDITION(name, PARAMS(proto), \
PARAMS(args), PARAMS(cond))
#define TRACE_EVENT_CONDITION(name, proto, args, cond, \
struct, assign, print) \
- DECLARE_TRACE_CONDITION(name, PARAMS(proto), \
+ DECLARE_TRACE_EVENT_CONDITION(name, PARAMS(proto), \
PARAMS(args), PARAMS(cond))
#define TRACE_EVENT_SYSCALL(name, proto, args, struct, assign, \
print, reg, unreg) \
- DECLARE_TRACE_SYSCALL(name, PARAMS(proto), PARAMS(args))
+ DECLARE_TRACE_EVENT_SYSCALL(name, PARAMS(proto), PARAMS(args))
#define TRACE_EVENT_FLAGS(event, flag)
diff --git a/include/linux/tsm-mr.h b/include/linux/tsm-mr.h
new file mode 100644
index 000000000000..50a521f4ac97
--- /dev/null
+++ b/include/linux/tsm-mr.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __TSM_MR_H
+#define __TSM_MR_H
+
+#include <crypto/hash_info.h>
+
+/**
+ * struct tsm_measurement_register - describes an architectural measurement
+ * register (MR)
+ * @mr_name: name of the MR
+ * @mr_value: buffer containing the current value of the MR
+ * @mr_size: size of the MR - typically the digest size of @mr_hash
+ * @mr_flags: bitwise OR of one or more flags, detailed below
+ * @mr_hash: optional hash identifier defined in include/uapi/linux/hash_info.h.
+ *
+ * A CC guest driver encloses an array of this structure in struct
+ * tsm_measurements to detail the measurement facility supported by the
+ * underlying CC hardware.
+ *
+ * @mr_name and @mr_value must stay valid until this structure is no longer in
+ * use.
+ *
+ * @mr_flags is the bitwise-OR of zero or more of the flags below.
+ *
+ * * %TSM_MR_F_READABLE - the sysfs attribute corresponding to this MR is readable.
+ * * %TSM_MR_F_WRITABLE - the sysfs attribute corresponding to this MR is writable.
+ * The semantics is typically to extend the MR but could vary depending on the
+ * architecture and the MR.
+ * * %TSM_MR_F_LIVE - this MR's value may differ from the last value written, so
+ * must be read back from the underlying CC hardware/firmware.
+ * * %TSM_MR_F_RTMR - bitwise-OR of %TSM_MR_F_LIVE and %TSM_MR_F_WRITABLE.
+ * * %TSM_MR_F_NOHASH - this MR does NOT have an associated hash algorithm.
+ * @mr_hash will be ignored when this flag is set.
+ */
+struct tsm_measurement_register {
+ const char *mr_name;
+ void *mr_value;
+ u32 mr_size;
+ u32 mr_flags;
+ enum hash_algo mr_hash;
+};
+
+#define TSM_MR_F_NOHASH 1
+#define TSM_MR_F_WRITABLE 2
+#define TSM_MR_F_READABLE 4
+#define TSM_MR_F_LIVE 8
+#define TSM_MR_F_RTMR (TSM_MR_F_LIVE | TSM_MR_F_WRITABLE)
+
+#define TSM_MR_(mr, hash) \
+ .mr_name = #mr, .mr_size = hash##_DIGEST_SIZE, \
+ .mr_hash = HASH_ALGO_##hash, .mr_flags = TSM_MR_F_READABLE
+
+/**
+ * struct tsm_measurements - defines the CC architecture specific measurement
+ * facility and methods for updating measurement registers (MRs)
+ * @mrs: Array of MR definitions.
+ * @nr_mrs: Number of elements in @mrs.
+ * @refresh: Callback function to load/sync all MRs from TVM hardware/firmware
+ * into the kernel cache.
+ * @write: Callback function to write to the MR specified by the parameter @mr.
+ * Typically, writing to an MR extends the input buffer to that MR.
+ *
+ * The @refresh callback is invoked when an MR with %TSM_MR_F_LIVE set is being
+ * read and the cache is stale. It must reload all MRs with %TSM_MR_F_LIVE set.
+ * The function parameter @tm is a pointer pointing back to this structure.
+ *
+ * The @write callback is invoked whenever an MR is being written. It takes two
+ * additional parameters besides @tm:
+ *
+ * * @mr - points to the MR (an element of @tm->mrs) being written.
+ * * @data - contains the bytes to write and whose size is @mr->mr_size.
+ *
+ * Both @refresh and @write should return 0 on success and an appropriate error
+ * code on failure.
+ */
+struct tsm_measurements {
+ const struct tsm_measurement_register *mrs;
+ size_t nr_mrs;
+ int (*refresh)(const struct tsm_measurements *tm);
+ int (*write)(const struct tsm_measurements *tm,
+ const struct tsm_measurement_register *mr, const u8 *data);
+};
+
+const struct attribute_group *
+tsm_mr_create_attribute_group(const struct tsm_measurements *tm);
+void tsm_mr_free_attribute_group(const struct attribute_group *attr_grp);
+
+#endif
diff --git a/include/linux/tsm.h b/include/linux/tsm.h
index 11b0c525be30..431054810dca 100644
--- a/include/linux/tsm.h
+++ b/include/linux/tsm.h
@@ -6,17 +6,17 @@
#include <linux/types.h>
#include <linux/uuid.h>
-#define TSM_INBLOB_MAX 64
-#define TSM_OUTBLOB_MAX SZ_32K
+#define TSM_REPORT_INBLOB_MAX 64
+#define TSM_REPORT_OUTBLOB_MAX SZ_32K
/*
* Privilege level is a nested permission concept to allow confidential
* guests to partition address space, 4-levels are supported.
*/
-#define TSM_PRIVLEVEL_MAX 3
+#define TSM_REPORT_PRIVLEVEL_MAX 3
/**
- * struct tsm_desc - option descriptor for generating tsm report blobs
+ * struct tsm_report_desc - option descriptor for generating tsm report blobs
* @privlevel: optional privilege level to associate with @outblob
* @inblob_len: sizeof @inblob
* @inblob: arbitrary input data
@@ -24,10 +24,10 @@
* @service_guid: optional service-provider service guid to attest
* @service_manifest_version: optional service-provider service manifest version requested
*/
-struct tsm_desc {
+struct tsm_report_desc {
unsigned int privlevel;
size_t inblob_len;
- u8 inblob[TSM_INBLOB_MAX];
+ u8 inblob[TSM_REPORT_INBLOB_MAX];
char *service_provider;
guid_t service_guid;
unsigned int service_manifest_version;
@@ -44,7 +44,7 @@ struct tsm_desc {
* @manifestblob: (optional) manifest data associated with the report
*/
struct tsm_report {
- struct tsm_desc desc;
+ struct tsm_report_desc desc;
size_t outblob_len;
u8 *outblob;
size_t auxblob_len;
@@ -88,7 +88,7 @@ enum tsm_bin_attr_index {
};
/**
- * struct tsm_ops - attributes and operations for tsm instances
+ * struct tsm_report_ops - attributes and operations for tsm_report instances
* @name: tsm id reflected in /sys/kernel/config/tsm/report/$report/provider
* @privlevel_floor: convey base privlevel for nested scenarios
* @report_new: Populate @report with the report blob and auxblob
@@ -99,7 +99,7 @@ enum tsm_bin_attr_index {
* Implementation specific ops, only one is expected to be registered at
* a time i.e. only one of "sev-guest", "tdx-guest", etc.
*/
-struct tsm_ops {
+struct tsm_report_ops {
const char *name;
unsigned int privlevel_floor;
int (*report_new)(struct tsm_report *report, void *data);
@@ -107,6 +107,6 @@ struct tsm_ops {
bool (*report_bin_attr_visible)(int n);
};
-int tsm_register(const struct tsm_ops *ops, void *priv);
-int tsm_unregister(const struct tsm_ops *ops);
+int tsm_report_register(const struct tsm_report_ops *ops, void *priv);
+int tsm_report_unregister(const struct tsm_report_ops *ops);
#endif /* __TSM_H */
diff --git a/include/linux/ubsan.h b/include/linux/ubsan.h
index d8219cbe09ff..3ab8d38aedb8 100644
--- a/include/linux/ubsan.h
+++ b/include/linux/ubsan.h
@@ -2,10 +2,10 @@
#ifndef _LINUX_UBSAN_H
#define _LINUX_UBSAN_H
-#ifdef CONFIG_UBSAN_TRAP
-const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type);
+#if defined(CONFIG_UBSAN_TRAP) || defined(CONFIG_UBSAN_KVM_EL2)
+const char *report_ubsan_failure(u32 check_type);
#else
-static inline const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type)
+static inline const char *report_ubsan_failure(u32 check_type)
{
return NULL;
}
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 0807e21cfec9..4e1a672af4c5 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -101,6 +101,13 @@ struct udp_sock {
/* Cache friendly copy of sk->sk_peek_off >= 0 */
bool peeking_with_offset;
+
+ /*
+ * Accounting for the tunnel GRO fastpath.
+ * Unprotected by compilers guard, as it uses space available in
+ * the last UDP socket cacheline.
+ */
+ struct hlist_node tunnel_list;
};
#define udp_test_bit(nr, sk) \
@@ -209,6 +216,9 @@ static inline void udp_allow_gso(struct sock *sk)
#define udp_portaddr_for_each_entry(__sk, list) \
hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node)
+#define udp_portaddr_for_each_entry_from(__sk) \
+ hlist_for_each_entry_from(__sk, __sk_common.skc_portaddr_node)
+
#define udp_portaddr_for_each_entry_rcu(__sk, list) \
hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node)
@@ -219,4 +229,13 @@ static inline void udp_allow_gso(struct sock *sk)
#define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE)
+static inline struct sock *udp_tunnel_sk(const struct net *net, bool is_ipv6)
+{
+#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
+ return rcu_dereference(net->ipv4.udp_tunnel_gro[is_ipv6].sk);
+#else
+ return NULL;
+#endif
+}
+
#endif /* _LINUX_UDP_H */
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 49ece9e1888f..393d0622cc28 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -99,7 +99,13 @@ static inline const struct iovec *iter_iov(const struct iov_iter *iter)
}
#define iter_iov_addr(iter) (iter_iov(iter)->iov_base + (iter)->iov_offset)
-#define iter_iov_len(iter) (iter_iov(iter)->iov_len - (iter)->iov_offset)
+
+static inline size_t iter_iov_len(const struct iov_iter *i)
+{
+ if (i->iter_type == ITER_UBUF)
+ return i->count;
+ return iter_iov(i)->iov_len - i->iov_offset;
+}
static inline enum iter_type iov_iter_type(const struct iov_iter *i)
{
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
index 939ceabcaf06..335c360d4f9b 100644
--- a/include/linux/vermagic.h
+++ b/include/linux/vermagic.h
@@ -33,7 +33,6 @@
#define MODULE_VERMAGIC_MODVERSIONS ""
#endif
#ifdef RANDSTRUCT
-#include <generated/randstruct_hash.h>
#define MODULE_RANDSTRUCT "RANDSTRUCT_" RANDSTRUCT_HASHED_SEED
#else
#define MODULE_RANDSTRUCT
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 169c7d367fac..b3e1d30c765b 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -329,6 +329,8 @@ static inline
bool virtio_get_shm_region(struct virtio_device *vdev,
struct virtio_shm_region *region, u8 id)
{
+ if (!region->len)
+ return false;
if (!vdev->config->get_shm_region)
return false;
return vdev->config->get_shm_region(vdev, region, id);
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 0387d64e2c66..36fb3edfa403 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -140,6 +140,7 @@ struct virtio_vsock_sock {
u32 last_fwd_cnt;
u32 rx_bytes;
u32 buf_alloc;
+ u32 buf_used;
struct sk_buff_head rx_queue;
u32 msg_count;
};
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 5ca8d4dd149d..fdc9aeb74a44 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -114,6 +114,14 @@ static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, uns
}
#endif
+#ifndef arch_vmap_pte_range_unmap_size
+static inline unsigned long arch_vmap_pte_range_unmap_size(unsigned long addr,
+ pte_t *ptep)
+{
+ return PAGE_SIZE;
+}
+#endif
+
#ifndef arch_vmap_pte_supported_shift
static inline int arch_vmap_pte_supported_shift(unsigned long size)
{
@@ -169,8 +177,13 @@ void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_m
int node, const void *caller) __alloc_size(1);
#define __vmalloc_node(...) alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__))
-void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
-#define vmalloc_huge(...) alloc_hooks(vmalloc_huge_noprof(__VA_ARGS__))
+void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node) __alloc_size(1);
+#define vmalloc_huge_node(...) alloc_hooks(vmalloc_huge_node_noprof(__VA_ARGS__))
+
+static inline void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
+{
+ return vmalloc_huge_node(size, gfp_mask, NUMA_NO_NODE);
+}
extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
#define __vmalloc_array(...) alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__))
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index b0dc957c3e56..6e30f275da77 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -316,7 +316,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
do { \
INIT_WORK(&(_work)->work, (_func)); \
- __init_timer(&(_work)->timer, \
+ __timer_init(&(_work)->timer, \
delayed_work_timer_fn, \
(_tflags) | TIMER_IRQSAFE); \
} while (0)
@@ -324,7 +324,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
do { \
INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
- __init_timer_on_stack(&(_work)->timer, \
+ __timer_init_on_stack(&(_work)->timer, \
delayed_work_timer_fn, \
(_tflags) | TIMER_IRQSAFE); \
} while (0)
@@ -480,7 +480,7 @@ void workqueue_softirq_dead(unsigned int cpu);
* executing at most one work item for the workqueue.
*
* For unbound workqueues, @max_active limits the number of in-flight work items
- * for the whole system. e.g. @max_active of 16 indicates that that there can be
+ * for the whole system. e.g. @max_active of 16 indicates that there can be
* at most 16 work items executing for the workqueue in the whole system.
*
* As sharing the same active counter for an unbound workqueue across multiple