summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acbuffer.h2
-rw-r--r--include/acpi/acconfig.h2
-rw-r--r--include/acpi/acexcep.h2
-rw-r--r--include/acpi/acnames.h2
-rw-r--r--include/acpi/acoutput.h2
-rw-r--r--include/acpi/acpi.h2
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/acpixf.h4
-rw-r--r--include/acpi/acrestyp.h2
-rw-r--r--include/acpi/actbl.h8
-rw-r--r--include/acpi/actbl1.h47
-rw-r--r--include/acpi/actbl2.h335
-rw-r--r--include/acpi/actbl3.h4
-rw-r--r--include/acpi/actypes.h10
-rw-r--r--include/acpi/acuuid.h2
-rw-r--r--include/acpi/cppc_acpi.h30
-rw-r--r--include/acpi/platform/acenv.h2
-rw-r--r--include/acpi/platform/acenvex.h2
-rw-r--r--include/acpi/platform/acgcc.h10
-rw-r--r--include/acpi/platform/acgccex.h2
-rw-r--r--include/acpi/platform/aclinux.h2
-rw-r--r--include/acpi/platform/aclinuxex.h2
-rw-r--r--include/acpi/platform/aczephyr.h2
-rw-r--r--include/asm-generic/simd.h8
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--include/crypto/acompress.h109
-rw-r--r--include/crypto/algapi.h37
-rw-r--r--include/crypto/blake2b.h31
-rw-r--r--include/crypto/chacha.h89
-rw-r--r--include/crypto/ctr.h50
-rw-r--r--include/crypto/ghash.h4
-rw-r--r--include/crypto/hash.h176
-rw-r--r--include/crypto/internal/acompress.h128
-rw-r--r--include/crypto/internal/blake2b.h92
-rw-r--r--include/crypto/internal/blockhash.h52
-rw-r--r--include/crypto/internal/chacha.h43
-rw-r--r--include/crypto/internal/engine.h5
-rw-r--r--include/crypto/internal/geniv.h1
-rw-r--r--include/crypto/internal/hash.h117
-rw-r--r--include/crypto/internal/poly1305.h28
-rw-r--r--include/crypto/internal/scompress.h17
-rw-r--r--include/crypto/internal/sha2.h66
-rw-r--r--include/crypto/internal/simd.h10
-rw-r--r--include/crypto/internal/skcipher.h49
-rw-r--r--include/crypto/md5.h3
-rw-r--r--include/crypto/null.h3
-rw-r--r--include/crypto/poly1305.h67
-rw-r--r--include/crypto/polyval.h8
-rw-r--r--include/crypto/rng.h8
-rw-r--r--include/crypto/scatterwalk.h65
-rw-r--r--include/crypto/sha1.h9
-rw-r--r--include/crypto/sha1_base.h81
-rw-r--r--include/crypto/sha2.h62
-rw-r--r--include/crypto/sha256_base.h135
-rw-r--r--include/crypto/sha3.h20
-rw-r--r--include/crypto/sha512_base.h88
-rw-r--r--include/crypto/sig.h2
-rw-r--r--include/crypto/sm3.h4
-rw-r--r--include/crypto/sm3_base.h92
-rw-r--r--include/crypto/streebog.h5
-rw-r--r--include/drm/Makefile2
-rw-r--r--include/drm/bridge/analogix_dp.h7
-rw-r--r--include/drm/display/drm_dp.h1
-rw-r--r--include/drm/display/drm_dp_helper.h101
-rw-r--r--include/drm/display/drm_hdmi_helper.h6
-rw-r--r--include/drm/drm_atomic.h3
-rw-r--r--include/drm/drm_bridge.h195
-rw-r--r--include/drm/drm_bridge_helper.h12
-rw-r--r--include/drm/drm_device.h41
-rw-r--r--include/drm/drm_drv.h5
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_file.h3
-rw-r--r--include/drm/drm_gem.h15
-rw-r--r--include/drm/drm_gem_shmem_helper.h45
-rw-r--r--include/drm/drm_kunit_helpers.h8
-rw-r--r--include/drm/drm_mipi_dsi.h23
-rw-r--r--include/drm/drm_mode_config.h10
-rw-r--r--include/drm/drm_panel.h49
-rw-r--r--include/drm/drm_panic.h12
-rw-r--r--include/drm/drm_plane.h17
-rw-r--r--include/drm/drm_print.h20
-rw-r--r--include/drm/drm_probe_helper.h2
-rw-r--r--include/drm/gpu_scheduler.h112
-rw-r--r--include/drm/intel/intel-gtt.h2
-rw-r--r--include/drm/intel/pciids.h4
-rw-r--r--include/drm/ttm/ttm_bo.h2
-rw-r--r--include/dt-bindings/power/mediatek,mt6893-power.h35
-rw-r--r--include/dt-bindings/power/rockchip,rk3562-power.h35
-rw-r--r--include/dt-bindings/sound/cs48l32.h20
-rw-r--r--include/hyperv/hvgdk_mini.h2
-rw-r--r--include/linux/acpi.h18
-rw-r--r--include/linux/alloc_tag.h12
-rw-r--r--include/linux/arch_topology.h8
-rw-r--r--include/linux/binfmts.h1
-rw-r--r--include/linux/bio.h25
-rw-r--r--include/linux/blk-mq.h10
-rw-r--r--include/linux/blk_types.h10
-rw-r--r--include/linux/blkdev.h24
-rw-r--r--include/linux/bpf-cgroup.h9
-rw-r--r--include/linux/cgroup-defs.h100
-rw-r--r--include/linux/cgroup.h50
-rw-r--r--include/linux/cleanup.h19
-rw-r--r--include/linux/codetag.h8
-rw-r--r--include/linux/compiler-version.h30
-rw-r--r--include/linux/compiler_types.h5
-rw-r--r--include/linux/configfs.h8
-rw-r--r--include/linux/coredump.h1
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/cpufreq.h22
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/cpumask.h75
-rw-r--r--include/linux/crc16.h9
-rw-r--r--include/linux/crc32.h5
-rw-r--r--include/linux/crypto.h85
-rw-r--r--include/linux/dcache.h4
-rw-r--r--include/linux/device.h38
-rw-r--r--include/linux/device/devres.h41
-rw-r--r--include/linux/device_cgroup.h7
-rw-r--r--include/linux/dma-buf.h27
-rw-r--r--include/linux/dma-fence-unwrap.h2
-rw-r--r--include/linux/dma-fence.h25
-rw-r--r--include/linux/dma-map-ops.h54
-rw-r--r--include/linux/dma-mapping.h85
-rw-r--r--include/linux/dmapool.h21
-rw-r--r--include/linux/energy_model.h2
-rw-r--r--include/linux/entry-common.h43
-rw-r--r--include/linux/file.h2
-rw-r--r--include/linux/find.h25
-rw-r--r--include/linux/fs.h48
-rw-r--r--include/linux/fs_parser.h7
-rw-r--r--include/linux/ftrace.h9
-rw-r--r--include/linux/futex.h36
-rw-r--r--include/linux/gpio/consumer.h11
-rw-r--r--include/linux/gpio/driver.h5
-rw-r--r--include/linux/highmem-internal.h13
-rw-r--r--include/linux/highmem.h10
-rw-r--r--include/linux/hugetlb.h5
-rw-r--r--include/linux/ima.h3
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/io.h21
-rw-r--r--include/linux/io_uring/cmd.h9
-rw-r--r--include/linux/io_uring_types.h15
-rw-r--r--include/linux/iommu.h4
-rw-r--r--include/linux/irq.h28
-rw-r--r--include/linux/irqchip/irq-msi-lib.h27
-rw-r--r--include/linux/irqdomain.h499
-rw-r--r--include/linux/jiffies.h2
-rw-r--r--include/linux/kexec.h9
-rw-r--r--include/linux/livepatch_sched.h14
-rw-r--r--include/linux/mfd/max77759.h165
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mm_types.h7
-rw-r--r--include/linux/mman.h2
-rw-r--r--include/linux/mmap_lock.h4
-rw-r--r--include/linux/mmc/card.h1
-rw-r--r--include/linux/mmc/slot-gpio.h4
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/mount.h87
-rw-r--r--include/linux/msi.h23
-rw-r--r--include/linux/namei.h17
-rw-r--r--include/linux/net.h4
-rw-r--r--include/linux/nvme.h77
-rw-r--r--include/linux/overflow.h23
-rw-r--r--include/linux/page-flags.h7
-rw-r--r--include/linux/panic.h2
-rw-r--r--include/linux/part_stat.h2
-rw-r--r--include/linux/pci-p2pdma.h85
-rw-r--r--include/linux/pci.h4
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/percpu-rwsem.h20
-rw-r--r--include/linux/percpu.h4
-rw-r--r--include/linux/perf_event.h298
-rw-r--r--include/linux/pid.h2
-rw-r--r--include/linux/pidfs.h8
-rw-r--r--include/linux/pm_domain.h10
-rw-r--r--include/linux/pm_opp.h32
-rw-r--r--include/linux/pm_runtime.h4
-rw-r--r--include/linux/power_supply.h1
-rw-r--r--include/linux/psp-sev.h3
-rw-r--r--include/linux/pwm.h10
-rw-r--r--include/linux/ratelimit.h37
-rw-r--r--include/linux/ratelimit_types.h5
-rw-r--r--include/linux/rcuref.h22
-rw-r--r--include/linux/regmap.h3
-rw-r--r--include/linux/regulator/max8952.h2
-rw-r--r--include/linux/regulator/pca9450.h5
-rw-r--r--include/linux/resctrl.h38
-rw-r--r--include/linux/resctrl_types.h16
-rw-r--r--include/linux/restart_block.h2
-rw-r--r--include/linux/sched.h21
-rw-r--r--include/linux/sched/topology.h6
-rw-r--r--include/linux/screen_info.h9
-rw-r--r--include/linux/security.h12
-rw-r--r--include/linux/shmem_fs.h7
-rw-r--r--include/linux/spi/sh_msiof.h125
-rw-r--r--include/linux/spi/spi.h78
-rw-r--r--include/linux/stat.h1
-rw-r--r--include/linux/string_helpers.h1
-rw-r--r--include/linux/suspend.h9
-rw-r--r--include/linux/tick.h7
-rw-r--r--include/linux/timer.h42
-rw-r--r--include/linux/topology.h9
-rw-r--r--include/linux/tpm_svsm.h149
-rw-r--r--include/linux/vermagic.h1
-rw-r--r--include/linux/vmalloc.h9
-rw-r--r--include/linux/workqueue.h6
-rw-r--r--include/media/v4l2-common.h58
-rw-r--r--include/media/vsp1.h4
-rw-r--r--include/memory/renesas-rpc-if.h4
-rw-r--r--include/scsi/scsi_host.h2
-rw-r--r--include/sound/core.h1
-rw-r--r--include/sound/cs-amp-lib.h2
-rw-r--r--include/sound/cs35l56.h29
-rw-r--r--include/sound/cs42l52.h29
-rw-r--r--include/sound/cs42l56.h45
-rw-r--r--include/sound/cs42l73.h19
-rw-r--r--include/sound/cs48l32.h47
-rw-r--r--include/sound/cs48l32_registers.h530
-rw-r--r--include/sound/dmaengine_pcm.h2
-rw-r--r--include/sound/gus.h22
-rw-r--r--include/sound/hdaudio.h6
-rw-r--r--include/sound/hdaudio_ext.h6
-rw-r--r--include/sound/jack.h6
-rw-r--r--include/sound/pcm.h2
-rw-r--r--include/sound/sdca_asoc.h42
-rw-r--r--include/sound/sdca_function.h71
-rw-r--r--include/sound/snd_wavefront.h4
-rw-r--r--include/sound/soc-acpi.h13
-rw-r--r--include/sound/soc-dapm.h4
-rw-r--r--include/sound/soc.h15
-rw-r--r--include/sound/soc_sdw_utils.h5
-rw-r--r--include/sound/sof.h1
-rw-r--r--include/sound/tas2781-comlib-i2c.h37
-rw-r--r--include/sound/tas2781.h81
-rw-r--r--include/sound/tpa6130a2-plat.h17
-rw-r--r--include/trace/events/block.h17
-rw-r--r--include/trace/events/btrfs.h89
-rw-r--r--include/trace/events/cgroup.h12
-rw-r--r--include/trace/events/erofs.h2
-rw-r--r--include/trace/events/exceptions.h43
-rw-r--r--include/trace/events/io_uring.h2
-rw-r--r--include/trace/events/sched.h34
-rw-r--r--include/uapi/drm/amdgpu_drm.h317
-rw-r--r--include/uapi/drm/asahi_drm.h1194
-rw-r--r--include/uapi/drm/drm.h4
-rw-r--r--include/uapi/drm/drm_fourcc.h45
-rw-r--r--include/uapi/drm/nova_drm.h101
-rw-r--r--include/uapi/drm/panthor_drm.h23
-rw-r--r--include/uapi/drm/virtgpu_drm.h6
-rw-r--r--include/uapi/drm/xe_drm.h6
-rw-r--r--include/uapi/linux/blktrace_api.h2
-rw-r--r--include/uapi/linux/cec-funcs.h40
-rw-r--r--include/uapi/linux/fscrypt.h6
-rw-r--r--include/uapi/linux/futex.h9
-rw-r--r--include/uapi/linux/io_uring.h12
-rw-r--r--include/uapi/linux/kfd_ioctl.h5
-rw-r--r--include/uapi/linux/media/amlogic/c3-isp-config.h564
-rw-r--r--include/uapi/linux/perf_event.h657
-rw-r--r--include/uapi/linux/pidfd.h18
-rw-r--r--include/uapi/linux/prctl.h7
-rw-r--r--include/uapi/linux/stat.h8
-rw-r--r--include/uapi/linux/taskstats.h47
-rw-r--r--include/uapi/linux/ublk_cmd.h128
-rw-r--r--include/uapi/linux/videodev2.h18
-rw-r--r--include/uapi/linux/virtio_gpu.h3
-rw-r--r--include/video/pixel_format.h41
267 files changed, 8168 insertions, 2658 deletions
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h
index 252b235dce5a..cbc9aeabcd99 100644
--- a/include/acpi/acbuffer.h
+++ b/include/acpi/acbuffer.h
@@ -3,7 +3,7 @@
*
* Name: acbuffer.h - Support for buffers returned by ACPI predefined names
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 2da5f4a6e814..521d4bfa6ef0 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -3,7 +3,7 @@
*
* Name: acconfig.h - Global configuration constants
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index c5ecd0a0170c..53c98f5fe3c3 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -3,7 +3,7 @@
*
* Name: acexcep.h - Exception codes returned by the ACPI subsystem
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 76aa6aa346ba..cb6a4dcc4e8e 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -3,7 +3,7 @@
*
* Name: acnames.h - Global names and strings
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 5e0346142f98..3584f33e352c 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -3,7 +3,7 @@
*
* Name: acoutput.h -- debug output
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index 8b4a497c1300..92bf80937e5f 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -3,7 +3,7 @@
*
* Name: acpi.h - Master public include file used to interface to ACPICA
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 914c029f64c9..65c5737b6286 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -5,7 +5,7 @@
* interfaces must be implemented by OSL to interface the
* ACPI components to the host operating system.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 78b24b090488..b49396aa4058 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -3,7 +3,7 @@
*
* Name: acpixf.h - External interfaces to the ACPI subsystem
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20240827
+#define ACPI_CA_VERSION 0x20250404
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index efef208b0324..842f932e2c2b 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -3,7 +3,7 @@
*
* Name: acrestyp.h - Defines, types, and structures for resource descriptors
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 2fc89704be17..243097a3da63 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -3,7 +3,7 @@
*
* Name: actbl.h - Basic ACPI Table Definitions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -66,12 +66,12 @@
******************************************************************************/
struct acpi_table_header {
- char signature[ACPI_NAMESEG_SIZE] __nonstring; /* ASCII table signature */
+ char signature[ACPI_NAMESEG_SIZE] ACPI_NONSTRING; /* ASCII table signature */
u32 length; /* Length of table in bytes, including this header */
u8 revision; /* ACPI Specification minor version number */
u8 checksum; /* To make sum of entire table == 0 */
- char oem_id[ACPI_OEM_ID_SIZE]; /* ASCII OEM identification */
- char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; /* ASCII OEM table identification */
+ char oem_id[ACPI_OEM_ID_SIZE] ACPI_NONSTRING; /* ASCII OEM identification */
+ char oem_table_id[ACPI_OEM_TABLE_ID_SIZE] ACPI_NONSTRING; /* ASCII OEM table identification */
u32 oem_revision; /* OEM revision number */
char asl_compiler_id[ACPI_NAMESEG_SIZE]; /* ASCII ASL compiler vendor ID */
u32 asl_compiler_revision; /* ASL compiler version */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 387fc821703a..99fd1588ff38 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -3,7 +3,7 @@
*
* Name: actbl1.h - Additional ACPI table definitions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -155,6 +155,13 @@ struct acpi_aspt_acpi_mbox_regs {
u64 reserved2[2];
};
+/* Larger subtable header (when Length can exceed 255) */
+
+struct acpi_subtbl_hdr_16 {
+ u16 type;
+ u16 length;
+};
+
/*******************************************************************************
*
* ASF - Alert Standard Format table (Signature "ASF!")
@@ -819,7 +826,8 @@ enum acpi_dmar_type {
ACPI_DMAR_TYPE_HARDWARE_AFFINITY = 3,
ACPI_DMAR_TYPE_NAMESPACE = 4,
ACPI_DMAR_TYPE_SATC = 5,
- ACPI_DMAR_TYPE_RESERVED = 6 /* 6 and greater are reserved */
+ ACPI_DMAR_TYPE_SIDP = 6,
+ ACPI_DMAR_TYPE_RESERVED = 7 /* 7 and greater are reserved */
};
/* DMAR Device Scope structure */
@@ -827,7 +835,8 @@ enum acpi_dmar_type {
struct acpi_dmar_device_scope {
u8 entry_type;
u8 length;
- u16 reserved;
+ u8 flags;
+ u8 reserved;
u8 enumeration_id;
u8 bus;
};
@@ -923,6 +932,15 @@ struct acpi_dmar_satc {
u8 reserved;
u16 segment;
};
+
+/* 6: so_c Integrated Device Property Reporting Structure */
+
+struct acpi_dmar_sidp {
+ struct acpi_dmar_header header;
+ u16 reserved;
+ u16 segment;
+};
+
/*******************************************************************************
*
* DRTM - Dynamic Root of Trust for Measurement table
@@ -1024,17 +1042,18 @@ struct acpi_einj_entry {
/* Values for Action field above */
enum acpi_einj_actions {
- ACPI_EINJ_BEGIN_OPERATION = 0,
- ACPI_EINJ_GET_TRIGGER_TABLE = 1,
- ACPI_EINJ_SET_ERROR_TYPE = 2,
- ACPI_EINJ_GET_ERROR_TYPE = 3,
- ACPI_EINJ_END_OPERATION = 4,
- ACPI_EINJ_EXECUTE_OPERATION = 5,
- ACPI_EINJ_CHECK_BUSY_STATUS = 6,
- ACPI_EINJ_GET_COMMAND_STATUS = 7,
- ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS = 8,
- ACPI_EINJ_GET_EXECUTE_TIMINGS = 9,
- ACPI_EINJ_ACTION_RESERVED = 10, /* 10 and greater are reserved */
+ ACPI_EINJ_BEGIN_OPERATION = 0x0,
+ ACPI_EINJ_GET_TRIGGER_TABLE = 0x1,
+ ACPI_EINJ_SET_ERROR_TYPE = 0x2,
+ ACPI_EINJ_GET_ERROR_TYPE = 0x3,
+ ACPI_EINJ_END_OPERATION = 0x4,
+ ACPI_EINJ_EXECUTE_OPERATION = 0x5,
+ ACPI_EINJ_CHECK_BUSY_STATUS = 0x6,
+ ACPI_EINJ_GET_COMMAND_STATUS = 0x7,
+ ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS = 0x8,
+ ACPI_EINJ_GET_EXECUTE_TIMINGS = 0x9,
+ ACPI_EINJV2_GET_ERROR_TYPE = 0x11,
+ ACPI_EINJ_ACTION_RESERVED = 0x12, /* 0x12 and greater are reserved */
ACPI_EINJ_TRIGGER_ERROR = 0xFF /* Except for this value */
};
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 2e917a8f8bca..048f5f47f8b8 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
- * Name: actbl2.h - ACPI Table Definitions (tables not in ACPI spec)
+ * Name: actbl2.h - ACPI Table Definitions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -29,6 +29,7 @@
#define ACPI_SIG_BDAT "BDAT" /* BIOS Data ACPI Table */
#define ACPI_SIG_CCEL "CCEL" /* CC Event Log Table */
#define ACPI_SIG_CDAT "CDAT" /* Coherent Device Attribute Table */
+#define ACPI_SIG_ERDT "ERDT" /* Enhanced Resource Director Technology */
#define ACPI_SIG_IORT "IORT" /* IO Remapping Table */
#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */
#define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */
@@ -37,6 +38,7 @@
#define ACPI_SIG_MCHI "MCHI" /* Management Controller Host Interface table */
#define ACPI_SIG_MPAM "MPAM" /* Memory System Resource Partitioning and Monitoring Table */
#define ACPI_SIG_MPST "MPST" /* Memory Power State Table */
+#define ACPI_SIG_MRRM "MRRM" /* Memory Range and Region Mapping table */
#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */
#define ACPI_SIG_NHLT "NHLT" /* Non HD Audio Link Table */
@@ -50,6 +52,7 @@
#define ACPI_SIG_RAS2 "RAS2" /* RAS2 Feature table */
#define ACPI_SIG_RGRT "RGRT" /* Regulatory Graphics Resource Table */
#define ACPI_SIG_RHCT "RHCT" /* RISC-V Hart Capabilities Table */
+#define ACPI_SIG_RIMT "RIMT" /* RISC-V IO Mapping Table */
#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
#define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */
#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */
@@ -450,6 +453,195 @@ struct acpi_table_ccel {
/*******************************************************************************
*
+ * ERDT - Enhanced Resource Director Technology (ERDT) table
+ *
+ * Conforms to "Intel Resource Director Technology Architecture Specification"
+ * Version 1.1, January 2025
+ *
+ ******************************************************************************/
+
+struct acpi_table_erdt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 max_clos; /* Maximum classes of service */
+ u8 reserved[24];
+ u8 erdt_substructures[];
+};
+
+/* Values for subtable type in struct acpi_subtbl_hdr_16 */
+
+enum acpi_erdt_type {
+ ACPI_ERDT_TYPE_RMDD = 0,
+ ACPI_ERDT_TYPE_CACD = 1,
+ ACPI_ERDT_TYPE_DACD = 2,
+ ACPI_ERDT_TYPE_CMRC = 3,
+ ACPI_ERDT_TYPE_MMRC = 4,
+ ACPI_ERDT_TYPE_MARC = 5,
+ ACPI_ERDT_TYPE_CARC = 6,
+ ACPI_ERDT_TYPE_CMRD = 7,
+ ACPI_ERDT_TYPE_IBRD = 8,
+ ACPI_ERDT_TYPE_IBAD = 9,
+ ACPI_ERDT_TYPE_CARD = 10,
+ ACPI_ERDT_TYPE_RESERVED = 11 /* 11 and above are reserved */
+};
+
+/*
+ * ERDT Subtables, correspond to Type in struct acpi_subtbl_hdr_16
+ */
+
+/* 0: RMDD - Resource Management Domain Description */
+
+struct acpi_erdt_rmdd {
+ struct acpi_subtbl_hdr_16 header;
+ u16 flags;
+ u16 IO_l3_slices; /* Number of slices in IO cache */
+ u8 IO_l3_sets; /* Number of sets in IO cache */
+ u8 IO_l3_ways; /* Number of ways in IO cache */
+ u64 reserved;
+ u16 domain_id; /* Unique domain ID */
+ u32 max_rmid; /* Maximun RMID supported */
+ u64 creg_base; /* Control Register Base Address */
+ u16 creg_size; /* Control Register Size (4K pages) */
+ u8 rmdd_structs[];
+};
+
+/* 1: CACD - CPU Agent Collection Description */
+
+struct acpi_erdt_cacd {
+ struct acpi_subtbl_hdr_16 header;
+ u16 reserved;
+ u16 domain_id; /* Unique domain ID */
+ u32 X2APICIDS[];
+};
+
+/* 2: DACD - Device Agent Collection Description */
+
+struct acpi_erdt_dacd {
+ struct acpi_subtbl_hdr_16 header;
+ u16 reserved;
+ u16 domain_id; /* Unique domain ID */
+ u8 dev_paths[];
+};
+
+struct acpi_erdt_dacd_dev_paths {
+ struct acpi_subtable_header header;
+ u16 segment;
+ u8 reserved;
+ u8 start_bus;
+ u8 path[];
+};
+
+/* 3: CMRC - Cache Monitoring Registers for CPU Agents */
+
+struct acpi_erdt_cmrc {
+ struct acpi_subtbl_hdr_16 header;
+ u32 reserved1;
+ u32 flags;
+ u8 index_fn;
+ u8 reserved2[11];
+ u64 cmt_reg_base;
+ u32 cmt_reg_size;
+ u16 clump_size;
+ u16 clump_stride;
+ u64 up_scale;
+};
+
+/* 4: MMRC - Memory-bandwidth Monitoring Registers for CPU Agents */
+
+struct acpi_erdt_mmrc {
+ struct acpi_subtbl_hdr_16 header;
+ u32 reserved1;
+ u32 flags;
+ u8 index_fn;
+ u8 reserved2[11];
+ u64 reg_base;
+ u32 reg_size;
+ u8 counter_width;
+ u64 up_scale;
+ u8 reserved3[7];
+ u32 corr_factor_list_len;
+ u32 corr_factor_list[];
+};
+
+/* 5: MARC - Memory-bandwidth Allocation Registers for CPU Agents */
+
+struct acpi_erdt_marc {
+ struct acpi_subtbl_hdr_16 header;
+ u16 reserved1;
+ u16 flags;
+ u8 index_fn;
+ u8 reserved2[7];
+ u64 reg_base_opt;
+ u64 reg_base_min;
+ u64 reg_base_max;
+ u32 mba_reg_size;
+ u32 mba_ctrl_range;
+};
+
+/* 6: CARC - Cache Allocation Registers for CPU Agents */
+
+struct acpi_erdt_carc {
+ struct acpi_subtbl_hdr_16 header;
+};
+
+/* 7: CMRD - Cache Monitoring Registers for Device Agents */
+
+struct acpi_erdt_cmrd {
+ struct acpi_subtbl_hdr_16 header;
+ u32 reserved1;
+ u32 flags;
+ u8 index_fn;
+ u8 reserved2[11];
+ u64 reg_base;
+ u32 reg_size;
+ u16 cmt_reg_off;
+ u16 cmt_clump_size;
+ u64 up_scale;
+};
+
+/* 8: IBRD - Cache Monitoring Registers for Device Agents */
+
+struct acpi_erdt_ibrd {
+ struct acpi_subtbl_hdr_16 header;
+ u32 reserved1;
+ u32 flags;
+ u8 index_fn;
+ u8 reserved2[11];
+ u64 reg_base;
+ u32 reg_size;
+ u16 total_bw_offset;
+ u16 Iomiss_bw_offset;
+ u16 total_bw_clump;
+ u16 Iomiss_bw_clump;
+ u8 reserved3[7];
+ u8 counter_width;
+ u64 up_scale;
+ u32 corr_factor_list_len;
+ u32 corr_factor_list[];
+};
+
+/* 9: IBAD - IO bandwidth Allocation Registers for device agents */
+
+struct acpi_erdt_ibad {
+ struct acpi_subtbl_hdr_16 header;
+};
+
+/* 10: CARD - IO bandwidth Allocation Registers for Device Agents */
+
+struct acpi_erdt_card {
+ struct acpi_subtbl_hdr_16 header;
+ u32 reserved1;
+ u32 flags;
+ u32 contention_mask;
+ u8 index_fn;
+ u8 reserved2[7];
+ u64 reg_base;
+ u32 reg_size;
+ u16 cat_reg_offset;
+ u16 cat_reg_block_size;
+};
+
+/*******************************************************************************
+ *
* IORT - IO Remapping Table
*
* Conforms to "IO Remapping Table System Software on ARM Platforms",
@@ -1738,6 +1930,47 @@ struct acpi_msct_proximity {
/*******************************************************************************
*
+ * MRRM - Memory Range and Region Mapping (MRRM) table
+ * Conforms to "Intel Resource Director Technology Architecture Specification"
+ * Version 1.1, January 2025
+ *
+ ******************************************************************************/
+
+struct acpi_table_mrrm {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 max_mem_region; /* Max Memory Regions supported */
+ u8 flags; /* Region assignment type */
+ u8 reserved[26];
+ u8 memory_range_entry[];
+};
+
+/* Flags */
+#define ACPI_MRRM_FLAGS_REGION_ASSIGNMENT_OS (1<<0)
+
+/*******************************************************************************
+ *
+ * Memory Range entry - Memory Range entry in MRRM table
+ *
+ ******************************************************************************/
+
+struct acpi_mrrm_mem_range_entry {
+ struct acpi_subtbl_hdr_16 header;
+ u32 reserved0; /* Reserved */
+ u64 addr_base; /* Base addr of the mem range */
+ u64 addr_len; /* Length of the mem range */
+ u16 region_id_flags; /* Valid local or remote Region-ID */
+ u8 local_region_id; /* Platform-assigned static local Region-ID */
+ u8 remote_region_id; /* Platform-assigned static remote Region-ID */
+ u32 reserved1; /* Reserved */
+ /* Region-ID Programming Registers[] */
+};
+
+/* Values for region_id_flags above */
+#define ACPI_MRRM_VALID_REGION_ID_FLAGS_LOCAL (1<<0)
+#define ACPI_MRRM_VALID_REGION_ID_FLAGS_REMOTE (1<<1)
+
+/*******************************************************************************
+ *
* MSDM - Microsoft Data Management table
*
* Conforms to "Microsoft Software Licensing Tables (SLIC and MSDM)",
@@ -2802,15 +3035,15 @@ struct acpi_ras2_pcc_desc {
/* RAS2 Platform Communication Channel Shared Memory Region */
-struct acpi_ras2_shared_memory {
+struct acpi_ras2_shmem {
u32 signature;
u16 command;
u16 status;
u16 version;
u8 features[16];
- u8 set_capabilities[16];
- u16 num_parameter_blocks;
- u32 set_capabilities_status;
+ u8 set_caps[16];
+ u16 num_param_blks;
+ u32 set_caps_status;
};
/* RAS2 Parameter Block Structure for PATROL_SCRUB */
@@ -2823,11 +3056,11 @@ struct acpi_ras2_parameter_block {
/* RAS2 Parameter Block Structure for PATROL_SCRUB */
-struct acpi_ras2_patrol_scrub_parameter {
+struct acpi_ras2_patrol_scrub_param {
struct acpi_ras2_parameter_block header;
- u16 patrol_scrub_command;
- u64 requested_address_range[2];
- u64 actual_address_range[2];
+ u16 command;
+ u64 req_addr_range[2];
+ u64 actl_addr_range[2];
u32 flags;
u32 scrub_params_out;
u32 scrub_params_in;
@@ -3004,6 +3237,88 @@ struct acpi_rhct_hart_info {
/*******************************************************************************
*
+ * RIMT - RISC-V IO Remapping Table
+ *
+ * https://github.com/riscv-non-isa/riscv-acpi-rimt
+ *
+ ******************************************************************************/
+
+struct acpi_table_rimt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 num_nodes; /* Number of RIMT Nodes */
+ u32 node_offset; /* Offset to RIMT Node Array */
+ u32 reserved;
+};
+
+struct acpi_rimt_node {
+ u8 type;
+ u8 revision;
+ u16 length;
+ u16 reserved;
+ u16 id;
+ char node_data[];
+};
+
+enum acpi_rimt_node_type {
+ ACPI_RIMT_NODE_TYPE_IOMMU = 0x0,
+ ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX = 0x1,
+ ACPI_RIMT_NODE_TYPE_PLAT_DEVICE = 0x2,
+};
+
+struct acpi_rimt_iommu {
+ u8 hardware_id[8]; /* Hardware ID */
+ u64 base_address; /* Base Address */
+ u32 flags; /* Flags */
+ u32 proximity_domain; /* Proximity Domain */
+ u16 pcie_segment_number; /* PCIe Segment number */
+ u16 pcie_bdf; /* PCIe B/D/F */
+ u16 num_interrupt_wires; /* Number of interrupt wires */
+ u16 interrupt_wire_offset; /* Interrupt wire array offset */
+ u64 interrupt_wire[]; /* Interrupt wire array */
+};
+
+/* IOMMU Node Flags */
+#define ACPI_RIMT_IOMMU_FLAGS_PCIE (1)
+#define ACPI_RIMT_IOMMU_FLAGS_PXM_VALID (1 << 1)
+
+/* Interrupt Wire Structure */
+struct acpi_rimt_iommu_wire_gsi {
+ u32 irq_num; /* Interrupt Number */
+ u32 flags; /* Flags */
+};
+
+/* Interrupt Wire Flags */
+#define ACPI_RIMT_GSI_LEVEL_TRIGGERRED (1)
+#define ACPI_RIMT_GSI_ACTIVE_HIGH (1 << 1)
+
+struct acpi_rimt_id_mapping {
+ u32 source_id_base; /* Source ID Base */
+ u32 num_ids; /* Number of IDs */
+ u32 dest_id_base; /* Destination Device ID Base */
+ u32 dest_offset; /* Destination IOMMU Offset */
+ u32 flags; /* Flags */
+};
+
+struct acpi_rimt_pcie_rc {
+ u32 flags; /* Flags */
+ u16 reserved; /* Reserved */
+ u16 pcie_segment_number; /* PCIe Segment number */
+ u16 id_mapping_offset; /* ID mapping array offset */
+ u16 num_id_mappings; /* Number of ID mappings */
+};
+
+/* PCIe Root Complex Node Flags */
+#define ACPI_RIMT_PCIE_ATS_SUPPORTED (1)
+#define ACPI_RIMT_PCIE_PRI_SUPPORTED (1 << 1)
+
+struct acpi_rimt_platform_device {
+ u16 id_mapping_offset; /* ID Mapping array offset */
+ u16 num_id_mappings; /* Number of ID mappings */
+ char device_name[]; /* Device Object Name */
+};
+
+/*******************************************************************************
+ *
* SBST - Smart Battery Specification Table
* Version 1
*
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index a97b1dbab975..79d3aa5a4bad 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -3,7 +3,7 @@
*
* Name: actbl3.h - ACPI Table Definitions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -269,7 +269,7 @@ struct acpi_srat_gicc_affinity {
#define ACPI_SRAT_GICC_ENABLED (1) /* 00: Use affinity structure */
-/* 4: GCC ITS Affinity (ACPI 6.2) */
+/* 4: GIC ITS Affinity (ACPI 6.2) */
struct acpi_srat_gic_its_affinity {
struct acpi_subtable_header header;
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 80767e8bf3ad..8fe893d776dd 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -3,7 +3,7 @@
*
* Name: actypes.h - Common data types for the entire ACPI subsystem
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -522,12 +522,12 @@ typedef u64 acpi_integer;
#define ACPI_COPY_NAMESEG(dest,src) (*ACPI_CAST_PTR (u32, (dest)) = *ACPI_CAST_PTR (u32, (src)))
#else
#define ACPI_COMPARE_NAMESEG(a,b) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAMESEG_SIZE))
-#define ACPI_COPY_NAMESEG(dest,src) (strncpy (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAMESEG_SIZE))
+#define ACPI_COPY_NAMESEG(dest,src) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAMESEG_SIZE))
#endif
/* Support for the special RSDP signature (8 characters) */
-#define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8))
+#define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, (sizeof(a) < 8) ? ACPI_NAMESEG_SIZE : 8))
#define ACPI_MAKE_RSDP_SIG(dest) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
/* Support for OEMx signature (x can be any character) */
@@ -1327,4 +1327,8 @@ typedef enum {
#define ACPI_FLEX_ARRAY(TYPE, NAME) TYPE NAME[0]
#endif
+#ifndef ACPI_NONSTRING
+#define ACPI_NONSTRING /* No terminating NUL character */
+#endif
+
#endif /* __ACTYPES_H__ */
diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h
index 52a84523bfac..25dd3e998727 100644
--- a/include/acpi/acuuid.h
+++ b/include/acpi/acuuid.h
@@ -3,7 +3,7 @@
*
* Name: acuuid.h - ACPI-related UUID/GUID definitions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 62d368bcd9ec..325e9543e08f 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -32,6 +32,15 @@
#define CMD_READ 0
#define CMD_WRITE 1
+#define CPPC_AUTO_ACT_WINDOW_SIG_BIT_SIZE (7)
+#define CPPC_AUTO_ACT_WINDOW_EXP_BIT_SIZE (3)
+#define CPPC_AUTO_ACT_WINDOW_MAX_SIG ((1 << CPPC_AUTO_ACT_WINDOW_SIG_BIT_SIZE) - 1)
+#define CPPC_AUTO_ACT_WINDOW_MAX_EXP ((1 << CPPC_AUTO_ACT_WINDOW_EXP_BIT_SIZE) - 1)
+/* CPPC_AUTO_ACT_WINDOW_MAX_SIG is 127, so 128 and 129 will decay to 127 when writing */
+#define CPPC_AUTO_ACT_WINDOW_SIG_CARRY_THRESH 129
+
+#define CPPC_ENERGY_PERF_MAX (0xFF)
+
/* Each register has the folowing format. */
struct cpc_reg {
u8 descriptor;
@@ -159,7 +168,10 @@ extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val);
extern int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val);
extern int cppc_get_epp_perf(int cpunum, u64 *epp_perf);
extern int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable);
-extern int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps);
+extern int cppc_set_epp(int cpu, u64 epp_val);
+extern int cppc_get_auto_act_window(int cpu, u64 *auto_act_window);
+extern int cppc_set_auto_act_window(int cpu, u64 auto_act_window);
+extern int cppc_get_auto_sel(int cpu, bool *enable);
extern int cppc_set_auto_sel(int cpu, bool enable);
extern int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf);
extern int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator);
@@ -229,11 +241,23 @@ static inline int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
{
return -EOPNOTSUPP;
}
-static inline int cppc_set_auto_sel(int cpu, bool enable)
+static inline int cppc_set_epp(int cpu, u64 epp_val)
{
return -EOPNOTSUPP;
}
-static inline int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
+static inline int cppc_get_auto_act_window(int cpu, u64 *auto_act_window)
+{
+ return -EOPNOTSUPP;
+}
+static inline int cppc_set_auto_act_window(int cpu, u64 auto_act_window)
+{
+ return -EOPNOTSUPP;
+}
+static inline int cppc_get_auto_sel(int cpu, bool *enable)
+{
+ return -EOPNOTSUPP;
+}
+static inline int cppc_set_auto_sel(int cpu, bool enable)
{
return -EOPNOTSUPP;
}
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 3f31df09a9d6..a11fa83955f8 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -3,7 +3,7 @@
*
* Name: acenv.h - Host and compiler configuration
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index 7e67e3503f7b..8ffc4e1c87cf 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -3,7 +3,7 @@
*
* Name: acenvex.h - Extra host and compiler configuration
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 04b4bf620517..8e4cf2f6b383 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -3,7 +3,7 @@
*
* Name: acgcc.h - GCC specific defines, etc.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -72,4 +72,12 @@
TYPE NAME[]; \
}
+/*
+ * Explicitly mark strings that lack a terminating NUL character so
+ * that ACPICA can be built with -Wunterminated-string-initialization.
+ */
+#if __has_attribute(__nonstring__)
+#define ACPI_NONSTRING __attribute__((__nonstring__))
+#endif
+
#endif /* __ACGCC_H__ */
diff --git a/include/acpi/platform/acgccex.h b/include/acpi/platform/acgccex.h
index 7c9f10e9633a..4a3c019a4d03 100644
--- a/include/acpi/platform/acgccex.h
+++ b/include/acpi/platform/acgccex.h
@@ -3,7 +3,7 @@
*
* Name: acgccex.h - Extra GCC specific defines, etc.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index f3249b7df5cb..edbbc9061d1e 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -3,7 +3,7 @@
*
* Name: aclinux.h - OS specific defines, etc. for Linux
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h
index eeff40295b4b..73265650f46b 100644
--- a/include/acpi/platform/aclinuxex.h
+++ b/include/acpi/platform/aclinuxex.h
@@ -3,7 +3,7 @@
*
* Name: aclinuxex.h - Extra OS specific defines, etc. for Linux
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/aczephyr.h b/include/acpi/platform/aczephyr.h
index 703db4dc740d..03d9a4a39c80 100644
--- a/include/acpi/platform/aczephyr.h
+++ b/include/acpi/platform/aczephyr.h
@@ -3,7 +3,7 @@
*
* Module Name: aczephyr.h - OS specific defines, etc.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h
index d0343d58a74a..ac29a22eb7cf 100644
--- a/include/asm-generic/simd.h
+++ b/include/asm-generic/simd.h
@@ -1,6 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_SIMD_H
+#define _ASM_GENERIC_SIMD_H
-#include <linux/hardirq.h>
+#include <linux/compiler_attributes.h>
+#include <linux/preempt.h>
+#include <linux/types.h>
/*
* may_use_simd - whether it is allowable at this time to issue SIMD
@@ -13,3 +17,5 @@ static __must_check inline bool may_use_simd(void)
{
return !in_interrupt();
}
+
+#endif /* _ASM_GENERIC_SIMD_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 58a635a6d5bd..66409bc3a4e0 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -108,13 +108,13 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
#define TEXT_MAIN .text
#endif
#if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG)
-#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L*
+#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data.rel.* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L*
#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L*
#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..L* .bss..compoundliteral*
#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
#else
-#define DATA_MAIN .data
+#define DATA_MAIN .data .data.rel .data.rel.local
#define SDATA_MAIN .sdata
#define RODATA_MAIN .rodata
#define BSS_MAIN .bss
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index c497c73baf13..9eacb9fa375d 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -32,30 +32,28 @@
/* Set this bit for if virtual address destination cannot be used for DMA. */
#define CRYPTO_ACOMP_REQ_DST_NONDMA 0x00000010
-/* Set this bit if source is a folio. */
-#define CRYPTO_ACOMP_REQ_SRC_FOLIO 0x00000020
-
-/* Set this bit if destination is a folio. */
-#define CRYPTO_ACOMP_REQ_DST_FOLIO 0x00000040
+/* Private flags that should not be touched by the user. */
+#define CRYPTO_ACOMP_REQ_PRIVATE \
+ (CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | \
+ CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA)
#define CRYPTO_ACOMP_DST_MAX 131072
#define MAX_SYNC_COMP_REQSIZE 0
-#define ACOMP_REQUEST_ALLOC(name, tfm, gfp) \
+#define ACOMP_REQUEST_ON_STACK(name, tfm) \
char __##name##_req[sizeof(struct acomp_req) + \
MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
struct acomp_req *name = acomp_request_on_stack_init( \
- __##name##_req, (tfm), (gfp), false)
+ __##name##_req, (tfm))
+
+#define ACOMP_REQUEST_CLONE(name, gfp) \
+ acomp_request_clone(name, sizeof(__##name##_req), gfp)
struct acomp_req;
struct folio;
struct acomp_req_chain {
- struct list_head head;
- struct acomp_req *req0;
- struct acomp_req *cur;
- int (*op)(struct acomp_req *req);
crypto_completion_t compl;
void *data;
struct scatterlist ssg;
@@ -68,8 +66,6 @@ struct acomp_req_chain {
u8 *dst;
struct folio *dfolio;
};
- size_t soff;
- size_t doff;
u32 flags;
};
@@ -81,10 +77,6 @@ struct acomp_req_chain {
* @dst: Destination scatterlist
* @svirt: Source virtual address
* @dvirt: Destination virtual address
- * @sfolio: Source folio
- * @soff: Source folio offset
- * @dfolio: Destination folio
- * @doff: Destination folio offset
* @slen: Size of the input buffer
* @dlen: Size of the output buffer and number of bytes produced
* @chain: Private API code data, do not use
@@ -95,15 +87,11 @@ struct acomp_req {
union {
struct scatterlist *src;
const u8 *svirt;
- struct folio *sfolio;
};
union {
struct scatterlist *dst;
u8 *dvirt;
- struct folio *dfolio;
};
- size_t soff;
- size_t doff;
unsigned int slen;
unsigned int dlen;
@@ -126,18 +114,11 @@ struct crypto_acomp {
int (*compress)(struct acomp_req *req);
int (*decompress)(struct acomp_req *req);
unsigned int reqsize;
- struct crypto_acomp *fb;
struct crypto_tfm base;
};
-struct crypto_acomp_stream {
- spinlock_t lock;
- void *ctx;
-};
-
#define COMP_ALG_COMMON { \
struct crypto_alg base; \
- struct crypto_acomp_stream __percpu *stream; \
}
struct comp_alg_common COMP_ALG_COMMON;
@@ -213,7 +194,7 @@ static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
static inline void acomp_request_set_tfm(struct acomp_req *req,
struct crypto_acomp *tfm)
{
- req->base.tfm = crypto_acomp_tfm(tfm);
+ crypto_request_set_tfm(&req->base, crypto_acomp_tfm(tfm));
}
static inline bool acomp_is_async(struct crypto_acomp *tfm)
@@ -310,6 +291,11 @@ static inline void *acomp_request_extra(struct acomp_req *req)
return (void *)((char *)req + len);
}
+static inline bool acomp_req_on_stack(struct acomp_req *req)
+{
+ return crypto_req_on_stack(&req->base);
+}
+
/**
* acomp_request_free() -- zeroize and free asynchronous (de)compression
* request as well as the output buffer if allocated
@@ -319,7 +305,7 @@ static inline void *acomp_request_extra(struct acomp_req *req)
*/
static inline void acomp_request_free(struct acomp_req *req)
{
- if (!req || (req->base.flags & CRYPTO_TFM_REQ_ON_STACK))
+ if (!req || acomp_req_on_stack(req))
return;
kfree_sensitive(req);
}
@@ -340,17 +326,9 @@ static inline void acomp_request_set_callback(struct acomp_req *req,
crypto_completion_t cmpl,
void *data)
{
- u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA |
- CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA |
- CRYPTO_ACOMP_REQ_SRC_FOLIO | CRYPTO_ACOMP_REQ_DST_FOLIO |
- CRYPTO_TFM_REQ_ON_STACK;
-
- req->base.complete = cmpl;
- req->base.data = data;
- req->base.flags &= keep;
- req->base.flags |= flgs & ~keep;
-
- crypto_reqchain_init(&req->base);
+ flgs &= ~CRYPTO_ACOMP_REQ_PRIVATE;
+ flgs |= req->base.flags & CRYPTO_ACOMP_REQ_PRIVATE;
+ crypto_request_set_callback(&req->base, flgs, cmpl, data);
}
/**
@@ -379,8 +357,6 @@ static inline void acomp_request_set_params(struct acomp_req *req,
req->base.flags &= ~(CRYPTO_ACOMP_REQ_SRC_VIRT |
CRYPTO_ACOMP_REQ_SRC_NONDMA |
- CRYPTO_ACOMP_REQ_SRC_FOLIO |
- CRYPTO_ACOMP_REQ_DST_FOLIO |
CRYPTO_ACOMP_REQ_DST_VIRT |
CRYPTO_ACOMP_REQ_DST_NONDMA);
}
@@ -403,7 +379,6 @@ static inline void acomp_request_set_src_sg(struct acomp_req *req,
req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT;
- req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO;
}
/**
@@ -423,7 +398,6 @@ static inline void acomp_request_set_src_dma(struct acomp_req *req,
req->slen = slen;
req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
- req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO;
req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
}
@@ -444,7 +418,6 @@ static inline void acomp_request_set_src_nondma(struct acomp_req *req,
req->svirt = src;
req->slen = slen;
- req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO;
req->base.flags |= CRYPTO_ACOMP_REQ_SRC_NONDMA;
req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
}
@@ -463,13 +436,9 @@ static inline void acomp_request_set_src_folio(struct acomp_req *req,
struct folio *folio, size_t off,
unsigned int len)
{
- req->sfolio = folio;
- req->soff = off;
- req->slen = len;
-
- req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
- req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT;
- req->base.flags |= CRYPTO_ACOMP_REQ_SRC_FOLIO;
+ sg_init_table(&req->chain.ssg, 1);
+ sg_set_folio(&req->chain.ssg, folio, len, off);
+ acomp_request_set_src_sg(req, &req->chain.ssg, len);
}
/**
@@ -490,7 +459,6 @@ static inline void acomp_request_set_dst_sg(struct acomp_req *req,
req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT;
- req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO;
}
/**
@@ -510,7 +478,6 @@ static inline void acomp_request_set_dst_dma(struct acomp_req *req,
req->dlen = dlen;
req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
- req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO;
req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
}
@@ -530,7 +497,6 @@ static inline void acomp_request_set_dst_nondma(struct acomp_req *req,
req->dvirt = dst;
req->dlen = dlen;
- req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO;
req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA;
req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
}
@@ -549,19 +515,9 @@ static inline void acomp_request_set_dst_folio(struct acomp_req *req,
struct folio *folio, size_t off,
unsigned int len)
{
- req->dfolio = folio;
- req->doff = off;
- req->dlen = len;
-
- req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
- req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT;
- req->base.flags |= CRYPTO_ACOMP_REQ_DST_FOLIO;
-}
-
-static inline void acomp_request_chain(struct acomp_req *req,
- struct acomp_req *head)
-{
- crypto_request_chain(&req->base, &head->base);
+ sg_init_table(&req->chain.dsg, 1);
+ sg_set_folio(&req->chain.dsg, folio, len, off);
+ acomp_request_set_dst_sg(req, &req->chain.dsg, len);
}
/**
@@ -587,18 +543,15 @@ int crypto_acomp_compress(struct acomp_req *req);
int crypto_acomp_decompress(struct acomp_req *req);
static inline struct acomp_req *acomp_request_on_stack_init(
- char *buf, struct crypto_acomp *tfm, gfp_t gfp, bool stackonly)
+ char *buf, struct crypto_acomp *tfm)
{
- struct acomp_req *req;
-
- if (!stackonly && (req = acomp_request_alloc(tfm, gfp)))
- return req;
-
- req = (void *)buf;
- acomp_request_set_tfm(req, tfm->fb);
- req->base.flags = CRYPTO_TFM_REQ_ON_STACK;
+ struct acomp_req *req = (void *)buf;
+ crypto_stack_request_init(&req->base, crypto_acomp_tfm(tfm));
return req;
}
+struct acomp_req *acomp_request_clone(struct acomp_req *req,
+ size_t total, gfp_t gfp);
+
#endif
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 6e07bbc04089..188eface0a11 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -68,16 +68,17 @@ struct crypto_instance {
struct crypto_spawn *spawns;
};
- struct work_struct free_work;
-
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
struct crypto_template {
struct list_head list;
struct hlist_head instances;
+ struct hlist_head dead;
struct module *module;
+ struct work_struct free_work;
+
int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
char name[CRYPTO_MAX_ALG_NAME];
@@ -106,18 +107,6 @@ struct crypto_queue {
unsigned int max_qlen;
};
-struct scatter_walk {
- /* Must be the first member, see struct skcipher_walk. */
- union {
- void *const addr;
-
- /* Private API field, do not touch. */
- union crypto_no_such_thing *__addr;
- };
- struct scatterlist *sg;
- unsigned int offset;
-};
-
struct crypto_attr_alg {
char name[CRYPTO_MAX_ALG_NAME];
};
@@ -157,8 +146,16 @@ void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
const char *crypto_attr_alg_name(struct rtattr *rta);
-int crypto_inst_setname(struct crypto_instance *inst, const char *name,
- struct crypto_alg *alg);
+int __crypto_inst_setname(struct crypto_instance *inst, const char *name,
+ const char *driver, struct crypto_alg *alg);
+
+#define crypto_inst_setname(inst, name, ...) \
+ CONCATENATE(crypto_inst_setname_, COUNT_ARGS(__VA_ARGS__))( \
+ inst, name, ##__VA_ARGS__)
+#define crypto_inst_setname_1(inst, name, alg) \
+ __crypto_inst_setname(inst, name, name, alg)
+#define crypto_inst_setname_2(inst, name, driver, alg) \
+ __crypto_inst_setname(inst, name, driver, alg)
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
int crypto_enqueue_request(struct crypto_queue *queue,
@@ -266,14 +263,14 @@ static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
}
-static inline bool crypto_request_chained(struct crypto_async_request *req)
+static inline bool crypto_tfm_req_virt(struct crypto_tfm *tfm)
{
- return !list_empty(&req->list);
+ return tfm->__crt_alg->cra_flags & CRYPTO_ALG_REQ_VIRT;
}
-static inline bool crypto_tfm_req_chain(struct crypto_tfm *tfm)
+static inline u32 crypto_request_flags(struct crypto_async_request *req)
{
- return tfm->__crt_alg->cra_flags & CRYPTO_ALG_REQ_CHAIN;
+ return req->flags & ~CRYPTO_TFM_REQ_ON_STACK;
}
#endif /* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/blake2b.h b/include/crypto/blake2b.h
index 0c0176285349..dd7694477e50 100644
--- a/include/crypto/blake2b.h
+++ b/include/crypto/blake2b.h
@@ -7,10 +7,20 @@
#include <linux/types.h>
#include <linux/string.h>
+struct blake2b_state {
+ /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */
+ u64 h[8];
+ u64 t[2];
+ /* The true state ends here. The rest is temporary storage. */
+ u64 f[2];
+};
+
enum blake2b_lengths {
BLAKE2B_BLOCK_SIZE = 128,
BLAKE2B_HASH_SIZE = 64,
BLAKE2B_KEY_SIZE = 64,
+ BLAKE2B_STATE_SIZE = offsetof(struct blake2b_state, f),
+ BLAKE2B_DESC_SIZE = sizeof(struct blake2b_state),
BLAKE2B_160_HASH_SIZE = 20,
BLAKE2B_256_HASH_SIZE = 32,
@@ -18,16 +28,6 @@ enum blake2b_lengths {
BLAKE2B_512_HASH_SIZE = 64,
};
-struct blake2b_state {
- /* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */
- u64 h[8];
- u64 t[2];
- u64 f[2];
- u8 buf[BLAKE2B_BLOCK_SIZE];
- unsigned int buflen;
- unsigned int outlen;
-};
-
enum blake2b_iv {
BLAKE2B_IV0 = 0x6A09E667F3BCC908ULL,
BLAKE2B_IV1 = 0xBB67AE8584CAA73BULL,
@@ -40,7 +40,7 @@ enum blake2b_iv {
};
static inline void __blake2b_init(struct blake2b_state *state, size_t outlen,
- const void *key, size_t keylen)
+ size_t keylen)
{
state->h[0] = BLAKE2B_IV0 ^ (0x01010000 | keylen << 8 | outlen);
state->h[1] = BLAKE2B_IV1;
@@ -52,15 +52,6 @@ static inline void __blake2b_init(struct blake2b_state *state, size_t outlen,
state->h[7] = BLAKE2B_IV7;
state->t[0] = 0;
state->t[1] = 0;
- state->f[0] = 0;
- state->f[1] = 0;
- state->buflen = 0;
- state->outlen = outlen;
- if (keylen) {
- memcpy(state->buf, key, keylen);
- memset(&state->buf[keylen], 0, BLAKE2B_BLOCK_SIZE - keylen);
- state->buflen = BLAKE2B_BLOCK_SIZE;
- }
}
#endif /* _CRYPTO_BLAKE2B_H */
diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
index f8cc073bba41..91f6b4cf561c 100644
--- a/include/crypto/chacha.h
+++ b/include/crypto/chacha.h
@@ -16,6 +16,7 @@
#define _CRYPTO_CHACHA_H
#include <linux/unaligned.h>
+#include <linux/string.h>
#include <linux/types.h>
/* 32-bit stream position, then 96-bit nonce (RFC7539 convention) */
@@ -25,21 +26,32 @@
#define CHACHA_BLOCK_SIZE 64
#define CHACHAPOLY_IV_SIZE 12
-#define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32))
+#define CHACHA_KEY_WORDS 8
+#define CHACHA_STATE_WORDS 16
+#define HCHACHA_OUT_WORDS 8
/* 192-bit nonce, then 64-bit stream position */
#define XCHACHA_IV_SIZE 32
-void chacha_block_generic(u32 *state, u8 *stream, int nrounds);
-static inline void chacha20_block(u32 *state, u8 *stream)
+struct chacha_state {
+ u32 x[CHACHA_STATE_WORDS];
+};
+
+void chacha_block_generic(struct chacha_state *state,
+ u8 out[CHACHA_BLOCK_SIZE], int nrounds);
+static inline void chacha20_block(struct chacha_state *state,
+ u8 out[CHACHA_BLOCK_SIZE])
{
- chacha_block_generic(state, stream, 20);
+ chacha_block_generic(state, out, 20);
}
-void hchacha_block_arch(const u32 *state, u32 *out, int nrounds);
-void hchacha_block_generic(const u32 *state, u32 *out, int nrounds);
+void hchacha_block_arch(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds);
+void hchacha_block_generic(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds);
-static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
+static inline void hchacha_block(const struct chacha_state *state,
+ u32 out[HCHACHA_OUT_WORDS], int nrounds)
{
if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
hchacha_block_arch(state, out, nrounds);
@@ -54,37 +66,40 @@ enum chacha_constants { /* expand 32-byte k */
CHACHA_CONSTANT_TE_K = 0x6b206574U
};
-static inline void chacha_init_consts(u32 *state)
+static inline void chacha_init_consts(struct chacha_state *state)
{
- state[0] = CHACHA_CONSTANT_EXPA;
- state[1] = CHACHA_CONSTANT_ND_3;
- state[2] = CHACHA_CONSTANT_2_BY;
- state[3] = CHACHA_CONSTANT_TE_K;
+ state->x[0] = CHACHA_CONSTANT_EXPA;
+ state->x[1] = CHACHA_CONSTANT_ND_3;
+ state->x[2] = CHACHA_CONSTANT_2_BY;
+ state->x[3] = CHACHA_CONSTANT_TE_K;
}
-static inline void chacha_init(u32 *state, const u32 *key, const u8 *iv)
+static inline void chacha_init(struct chacha_state *state,
+ const u32 key[CHACHA_KEY_WORDS],
+ const u8 iv[CHACHA_IV_SIZE])
{
chacha_init_consts(state);
- state[4] = key[0];
- state[5] = key[1];
- state[6] = key[2];
- state[7] = key[3];
- state[8] = key[4];
- state[9] = key[5];
- state[10] = key[6];
- state[11] = key[7];
- state[12] = get_unaligned_le32(iv + 0);
- state[13] = get_unaligned_le32(iv + 4);
- state[14] = get_unaligned_le32(iv + 8);
- state[15] = get_unaligned_le32(iv + 12);
+ state->x[4] = key[0];
+ state->x[5] = key[1];
+ state->x[6] = key[2];
+ state->x[7] = key[3];
+ state->x[8] = key[4];
+ state->x[9] = key[5];
+ state->x[10] = key[6];
+ state->x[11] = key[7];
+ state->x[12] = get_unaligned_le32(iv + 0);
+ state->x[13] = get_unaligned_le32(iv + 4);
+ state->x[14] = get_unaligned_le32(iv + 8);
+ state->x[15] = get_unaligned_le32(iv + 12);
}
-void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
+void chacha_crypt_arch(struct chacha_state *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds);
-void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src,
+void chacha_crypt_generic(struct chacha_state *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds);
-static inline void chacha_crypt(u32 *state, u8 *dst, const u8 *src,
+static inline void chacha_crypt(struct chacha_state *state,
+ u8 *dst, const u8 *src,
unsigned int bytes, int nrounds)
{
if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
@@ -93,10 +108,24 @@ static inline void chacha_crypt(u32 *state, u8 *dst, const u8 *src,
chacha_crypt_generic(state, dst, src, bytes, nrounds);
}
-static inline void chacha20_crypt(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes)
+static inline void chacha20_crypt(struct chacha_state *state,
+ u8 *dst, const u8 *src, unsigned int bytes)
{
chacha_crypt(state, dst, src, bytes, 20);
}
+static inline void chacha_zeroize_state(struct chacha_state *state)
+{
+ memzero_explicit(state, sizeof(*state));
+}
+
+#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA)
+bool chacha_is_arch_optimized(void);
+#else
+static inline bool chacha_is_arch_optimized(void)
+{
+ return false;
+}
+#endif
+
#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h
index da1ee73e9ce9..06984a26c8cf 100644
--- a/include/crypto/ctr.h
+++ b/include/crypto/ctr.h
@@ -8,58 +8,8 @@
#ifndef _CRYPTO_CTR_H
#define _CRYPTO_CTR_H
-#include <crypto/algapi.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
#define CTR_RFC3686_NONCE_SIZE 4
#define CTR_RFC3686_IV_SIZE 8
#define CTR_RFC3686_BLOCK_SIZE 16
-static inline int crypto_ctr_encrypt_walk(struct skcipher_request *req,
- void (*fn)(struct crypto_skcipher *,
- const u8 *, u8 *))
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- int blocksize = crypto_skcipher_chunksize(tfm);
- u8 buf[MAX_CIPHER_BLOCKSIZE];
- struct skcipher_walk walk;
- int err;
-
- /* avoid integer division due to variable blocksize parameter */
- if (WARN_ON_ONCE(!is_power_of_2(blocksize)))
- return -EINVAL;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes > 0) {
- const u8 *src = walk.src.virt.addr;
- u8 *dst = walk.dst.virt.addr;
- int nbytes = walk.nbytes;
- int tail = 0;
-
- if (nbytes < walk.total) {
- tail = walk.nbytes & (blocksize - 1);
- nbytes -= tail;
- }
-
- do {
- int bsize = min(nbytes, blocksize);
-
- fn(tfm, walk.iv, buf);
-
- crypto_xor_cpy(dst, src, buf, bsize);
- crypto_inc(walk.iv, blocksize);
-
- dst += bsize;
- src += bsize;
- nbytes -= bsize;
- } while (nbytes > 0);
-
- err = skcipher_walk_done(&walk, tail);
- }
- return err;
-}
-
#endif /* _CRYPTO_CTR_H */
diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h
index f832c9f2aca3..043d938e9a2c 100644
--- a/include/crypto/ghash.h
+++ b/include/crypto/ghash.h
@@ -7,18 +7,18 @@
#define __CRYPTO_GHASH_H__
#include <linux/types.h>
-#include <crypto/gf128mul.h>
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
+struct gf128mul_4k;
+
struct ghash_ctx {
struct gf128mul_4k *gf128;
};
struct ghash_desc_ctx {
u8 buffer[GHASH_BLOCK_SIZE];
- u32 bytes;
};
#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index a67988316d06..6f6b9de12cd3 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -8,14 +8,17 @@
#ifndef _CRYPTO_HASH_H
#define _CRYPTO_HASH_H
-#include <linux/atomic.h>
#include <linux/crypto.h>
+#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
/* Set this bit for virtual address instead of SG list. */
#define CRYPTO_AHASH_REQ_VIRT 0x00000001
+#define CRYPTO_AHASH_REQ_PRIVATE \
+ CRYPTO_AHASH_REQ_VIRT
+
struct crypto_ahash;
/**
@@ -62,6 +65,10 @@ struct ahash_request {
};
u8 *result;
+ struct scatterlist sg_head[2];
+ crypto_completion_t saved_complete;
+ void *saved_data;
+
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
@@ -82,6 +89,8 @@ struct ahash_request {
* transformation object. Data processing can happen synchronously
* [SHASH] or asynchronously [AHASH] at this point. Driver must not use
* req->result.
+ * For block-only algorithms, @update must return the number
+ * of bytes to store in the API partial block buffer.
* @final: **[mandatory]** Retrieve result from the driver. This function finalizes the
* transformation and retrieves the resulting hash from the driver and
* pushes it back to upper layers. No data processing happens at this
@@ -124,6 +133,10 @@ struct ahash_request {
* data so the transformation can continue from this point onward. No
* data processing happens at this point. Driver must not use
* req->result.
+ * @export_core: Export partial state without partial block. Only defined
+ * for algorithms that are not block-only.
+ * @import_core: Import partial state without partial block. Only defined
+ * for algorithms that are not block-only.
* @init_tfm: Initialize the cryptographic transformation object.
* This function is called only once at the instantiation
* time, right after the transformation context was
@@ -136,7 +149,6 @@ struct ahash_request {
* This is a counterpart to @init_tfm, used to remove
* various changes set in @init_tfm.
* @clone_tfm: Copy transform into new object, may allocate memory.
- * @reqsize: Size of the request context.
* @halg: see struct hash_alg_common
*/
struct ahash_alg {
@@ -147,14 +159,14 @@ struct ahash_alg {
int (*digest)(struct ahash_request *req);
int (*export)(struct ahash_request *req, void *out);
int (*import)(struct ahash_request *req, const void *in);
+ int (*export_core)(struct ahash_request *req, void *out);
+ int (*import_core)(struct ahash_request *req, const void *in);
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
int (*init_tfm)(struct crypto_ahash *tfm);
void (*exit_tfm)(struct crypto_ahash *tfm);
int (*clone_tfm)(struct crypto_ahash *dst, struct crypto_ahash *src);
- unsigned int reqsize;
-
struct hash_alg_common halg;
};
@@ -165,17 +177,31 @@ struct shash_desc {
#define HASH_MAX_DIGESTSIZE 64
+/* Worst case is sha3-224. */
+#define HASH_MAX_STATESIZE 200 + 144 + 1
+
/*
- * Worst case is hmac(sha3-224-generic). Its context is a nested 'shash_desc'
- * containing a 'struct sha3_state'.
+ * Worst case is hmac(sha3-224-s390). Its context is a nested 'shash_desc'
+ * containing a 'struct s390_sha_ctx'.
*/
#define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360)
+#define MAX_SYNC_HASH_REQSIZE (sizeof(struct ahash_request) + \
+ HASH_MAX_DESCSIZE)
#define SHASH_DESC_ON_STACK(shash, ctx) \
char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \
__aligned(__alignof__(struct shash_desc)); \
struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
+#define HASH_REQUEST_ON_STACK(name, _tfm) \
+ char __##name##_req[sizeof(struct ahash_request) + \
+ MAX_SYNC_HASH_REQSIZE] CRYPTO_MINALIGN_ATTR; \
+ struct ahash_request *name = \
+ ahash_request_on_stack_init(__##name##_req, (_tfm))
+
+#define HASH_REQUEST_CLONE(name, gfp) \
+ hash_request_clone(name, sizeof(__##name##_req), gfp)
+
/**
* struct shash_alg - synchronous message digest definition
* @init: see struct ahash_alg
@@ -185,6 +211,8 @@ struct shash_desc {
* @digest: see struct ahash_alg
* @export: see struct ahash_alg
* @import: see struct ahash_alg
+ * @export_core: see struct ahash_alg
+ * @import_core: see struct ahash_alg
* @setkey: see struct ahash_alg
* @init_tfm: Initialize the cryptographic transformation object.
* This function is called only once at the instantiation
@@ -215,6 +243,8 @@ struct shash_alg {
unsigned int len, u8 *out);
int (*export)(struct shash_desc *desc, void *out);
int (*import)(struct shash_desc *desc, const void *in);
+ int (*export_core)(struct shash_desc *desc, void *out);
+ int (*import_core)(struct shash_desc *desc, const void *in);
int (*setkey)(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
int (*init_tfm)(struct crypto_shash *tfm);
@@ -238,7 +268,6 @@ struct crypto_ahash {
};
struct crypto_shash {
- unsigned int descsize;
struct crypto_tfm base;
};
@@ -252,6 +281,11 @@ struct crypto_shash {
* CRYPTO_ALG_TYPE_SKCIPHER API applies here as well.
*/
+static inline bool ahash_req_on_stack(struct ahash_request *req)
+{
+ return crypto_req_on_stack(&req->base);
+}
+
static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_ahash, base);
@@ -459,7 +493,11 @@ int crypto_ahash_finup(struct ahash_request *req);
* -EBUSY if queue is full and request should be resubmitted later;
* other < 0 if an error occurred
*/
-int crypto_ahash_final(struct ahash_request *req);
+static inline int crypto_ahash_final(struct ahash_request *req)
+{
+ req->nbytes = 0;
+ return crypto_ahash_finup(req);
+}
/**
* crypto_ahash_digest() - calculate message digest for a buffer
@@ -548,7 +586,7 @@ int crypto_ahash_update(struct ahash_request *req);
static inline void ahash_request_set_tfm(struct ahash_request *req,
struct crypto_ahash *tfm)
{
- req->base.tfm = crypto_ahash_tfm(tfm);
+ crypto_request_set_tfm(&req->base, crypto_ahash_tfm(tfm));
}
/**
@@ -582,9 +620,12 @@ static inline struct ahash_request *ahash_request_alloc_noprof(
* ahash_request_free() - zeroize and free the request data structure
* @req: request data structure cipher handle to be freed
*/
-static inline void ahash_request_free(struct ahash_request *req)
+void ahash_request_free(struct ahash_request *req);
+
+static inline void ahash_request_zero(struct ahash_request *req)
{
- kfree_sensitive(req);
+ memzero_explicit(req, sizeof(*req) +
+ crypto_ahash_reqsize(crypto_ahash_reqtfm(req)));
}
static inline struct ahash_request *ahash_request_cast(
@@ -623,14 +664,9 @@ static inline void ahash_request_set_callback(struct ahash_request *req,
crypto_completion_t compl,
void *data)
{
- u32 keep = CRYPTO_AHASH_REQ_VIRT;
-
- req->base.complete = compl;
- req->base.data = data;
- flags &= ~keep;
- req->base.flags &= keep;
- req->base.flags |= flags;
- crypto_reqchain_init(&req->base);
+ flags &= ~CRYPTO_AHASH_REQ_PRIVATE;
+ flags |= req->base.flags & CRYPTO_AHASH_REQ_PRIVATE;
+ crypto_request_set_callback(&req->base, flags, compl, data);
}
/**
@@ -679,12 +715,6 @@ static inline void ahash_request_set_virt(struct ahash_request *req,
req->base.flags |= CRYPTO_AHASH_REQ_VIRT;
}
-static inline void ahash_request_chain(struct ahash_request *req,
- struct ahash_request *head)
-{
- crypto_request_chain(&req->base, &head->base);
-}
-
/**
* DOC: Synchronous Message Digest API
*
@@ -820,7 +850,7 @@ static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
*/
static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
{
- return tfm->descsize;
+ return crypto_shash_alg(tfm)->descsize;
}
static inline void *shash_desc_ctx(struct shash_desc *desc)
@@ -838,7 +868,7 @@ static inline void *shash_desc_ctx(struct shash_desc *desc)
* cipher handle must point to a keyed message digest cipher in order for this
* function to succeed.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
@@ -855,7 +885,7 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
* crypto_shash_update and crypto_shash_final. The parameters have the same
* meaning as discussed for those separate three functions.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
@@ -875,12 +905,15 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
* directly, and it allocates a hash descriptor on the stack internally.
* Note that this stack allocation may be fairly large.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 on success; < 0 if an error occurred.
*/
int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
unsigned int len, u8 *out);
+int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data,
+ unsigned int len, u8 *out);
+
/**
* crypto_shash_export() - extract operational state for message digest
* @desc: reference to the operational state handle whose state is exported
@@ -890,7 +923,7 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
* caller-allocated output buffer out which must have sufficient size (e.g. by
* calling crypto_shash_descsize).
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the export creation was successful; < 0 if an error occurred
*/
int crypto_shash_export(struct shash_desc *desc, void *out);
@@ -904,7 +937,7 @@ int crypto_shash_export(struct shash_desc *desc, void *out);
* the input buffer. That buffer should have been generated with the
* crypto_ahash_export function.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the import was successful; < 0 if an error occurred
*/
int crypto_shash_import(struct shash_desc *desc, const void *in);
@@ -917,19 +950,29 @@ int crypto_shash_import(struct shash_desc *desc, const void *in);
* operational state handle. Any potentially existing state created by
* previous operations is discarded.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest initialization was successful; < 0 if an
* error occurred
*/
-static inline int crypto_shash_init(struct shash_desc *desc)
-{
- struct crypto_shash *tfm = desc->tfm;
+int crypto_shash_init(struct shash_desc *desc);
- if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return crypto_shash_alg(tfm)->init(desc);
-}
+/**
+ * crypto_shash_finup() - calculate message digest of buffer
+ * @desc: see crypto_shash_final()
+ * @data: see crypto_shash_update()
+ * @len: see crypto_shash_update()
+ * @out: see crypto_shash_final()
+ *
+ * This function is a "short-hand" for the function calls of
+ * crypto_shash_update and crypto_shash_final. The parameters have the same
+ * meaning as discussed for those separate functions.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
+int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out);
/**
* crypto_shash_update() - add data to message digest for processing
@@ -939,12 +982,15 @@ static inline int crypto_shash_init(struct shash_desc *desc)
*
* Updates the message digest state of the operational state handle.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest update was successful; < 0 if an error
* occurred
*/
-int crypto_shash_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
+static inline int crypto_shash_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return crypto_shash_finup(desc, data, len, NULL);
+}
/**
* crypto_shash_final() - calculate message digest
@@ -956,29 +1002,14 @@ int crypto_shash_update(struct shash_desc *desc, const u8 *data,
* into the output buffer. The caller must ensure that the output buffer is
* large enough by using crypto_shash_digestsize.
*
- * Context: Any context.
- * Return: 0 if the message digest creation was successful; < 0 if an error
- * occurred
- */
-int crypto_shash_final(struct shash_desc *desc, u8 *out);
-
-/**
- * crypto_shash_finup() - calculate message digest of buffer
- * @desc: see crypto_shash_final()
- * @data: see crypto_shash_update()
- * @len: see crypto_shash_update()
- * @out: see crypto_shash_final()
- *
- * This function is a "short-hand" for the function calls of
- * crypto_shash_update and crypto_shash_final. The parameters have the same
- * meaning as discussed for those separate functions.
- *
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
-int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out);
+static inline int crypto_shash_final(struct shash_desc *desc, u8 *out)
+{
+ return crypto_shash_finup(desc, NULL, 0, out);
+}
static inline void shash_desc_zero(struct shash_desc *desc)
{
@@ -986,14 +1017,25 @@ static inline void shash_desc_zero(struct shash_desc *desc)
sizeof(*desc) + crypto_shash_descsize(desc->tfm));
}
-static inline int ahash_request_err(struct ahash_request *req)
+static inline bool ahash_is_async(struct crypto_ahash *tfm)
{
- return req->base.err;
+ return crypto_tfm_is_async(&tfm->base);
}
-static inline bool ahash_is_async(struct crypto_ahash *tfm)
+static inline struct ahash_request *ahash_request_on_stack_init(
+ char *buf, struct crypto_ahash *tfm)
{
- return crypto_tfm_is_async(&tfm->base);
+ struct ahash_request *req = (void *)buf;
+
+ crypto_stack_request_init(&req->base, crypto_ahash_tfm(tfm));
+ return req;
+}
+
+static inline struct ahash_request *ahash_request_clone(
+ struct ahash_request *req, size_t total, gfp_t gfp)
+{
+ return container_of(crypto_request_clone(&req->base, total, gfp),
+ struct ahash_request, base);
}
#endif /* _CRYPTO_HASH_H */
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index aaf59f3236fa..ffffd88bbbad 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -11,12 +11,17 @@
#include <crypto/acompress.h>
#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <linux/compiler_types.h>
+#include <linux/cpumask_types.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue_types.h>
-#define ACOMP_REQUEST_ON_STACK(name, tfm) \
+#define ACOMP_FBREQ_ON_STACK(name, req) \
char __##name##_req[sizeof(struct acomp_req) + \
MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
- struct acomp_req *name = acomp_request_on_stack_init( \
- __##name##_req, (tfm), 0, true)
+ struct acomp_req *name = acomp_fbreq_on_stack_init( \
+ __##name##_req, (req))
/**
* struct acomp_alg - asynchronous compression algorithm
@@ -35,9 +40,7 @@
* counterpart to @init, used to remove various changes set in
* @init.
*
- * @reqsize: Context size for (de)compression requests
* @base: Common crypto API algorithm data structure
- * @stream: Per-cpu memory for algorithm
* @calg: Cmonn algorithm data structure shared with scomp
*/
struct acomp_alg {
@@ -46,14 +49,61 @@ struct acomp_alg {
int (*init)(struct crypto_acomp *tfm);
void (*exit)(struct crypto_acomp *tfm);
- unsigned int reqsize;
-
union {
struct COMP_ALG_COMMON;
struct comp_alg_common calg;
};
};
+struct crypto_acomp_stream {
+ spinlock_t lock;
+ void *ctx;
+};
+
+struct crypto_acomp_streams {
+ /* These must come first because of struct scomp_alg. */
+ void *(*alloc_ctx)(void);
+ union {
+ void (*free_ctx)(void *);
+ void (*cfree_ctx)(const void *);
+ };
+
+ struct crypto_acomp_stream __percpu *streams;
+ struct work_struct stream_work;
+ cpumask_t stream_want;
+};
+
+struct acomp_walk {
+ union {
+ /* Virtual address of the source. */
+ struct {
+ struct {
+ const void *const addr;
+ } virt;
+ } src;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk in;
+ };
+
+ union {
+ /* Virtual address of the destination. */
+ struct {
+ struct {
+ void *const addr;
+ } virt;
+ } dst;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk out;
+ };
+
+ unsigned int slen;
+ unsigned int dlen;
+
+ int flags;
+};
+
/*
* Transform internal helpers.
*/
@@ -98,17 +148,10 @@ void crypto_unregister_acomp(struct acomp_alg *alg);
int crypto_register_acomps(struct acomp_alg *algs, int count);
void crypto_unregister_acomps(struct acomp_alg *algs, int count);
-static inline bool acomp_request_chained(struct acomp_req *req)
-{
- return crypto_request_chained(&req->base);
-}
-
static inline bool acomp_request_issg(struct acomp_req *req)
{
return !(req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
- CRYPTO_ACOMP_REQ_DST_VIRT |
- CRYPTO_ACOMP_REQ_SRC_FOLIO |
- CRYPTO_ACOMP_REQ_DST_FOLIO));
+ CRYPTO_ACOMP_REQ_DST_VIRT));
}
static inline bool acomp_request_src_isvirt(struct acomp_req *req)
@@ -143,19 +186,62 @@ static inline bool acomp_request_isnondma(struct acomp_req *req)
CRYPTO_ACOMP_REQ_DST_NONDMA);
}
-static inline bool acomp_request_src_isfolio(struct acomp_req *req)
+static inline bool crypto_acomp_req_virt(struct crypto_acomp *tfm)
+{
+ return crypto_tfm_req_virt(&tfm->base);
+}
+
+void crypto_acomp_free_streams(struct crypto_acomp_streams *s);
+int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s);
+
+struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
+ struct crypto_acomp_streams *s) __acquires(stream);
+
+static inline void crypto_acomp_unlock_stream_bh(
+ struct crypto_acomp_stream *stream) __releases(stream)
+{
+ spin_unlock_bh(&stream->lock);
+}
+
+void acomp_walk_done_src(struct acomp_walk *walk, int used);
+void acomp_walk_done_dst(struct acomp_walk *walk, int used);
+int acomp_walk_next_src(struct acomp_walk *walk);
+int acomp_walk_next_dst(struct acomp_walk *walk);
+int acomp_walk_virt(struct acomp_walk *__restrict walk,
+ struct acomp_req *__restrict req, bool atomic);
+
+static inline bool acomp_walk_more_src(const struct acomp_walk *walk, int cur)
+{
+ return walk->slen != cur;
+}
+
+static inline u32 acomp_request_flags(struct acomp_req *req)
{
- return req->base.flags & CRYPTO_ACOMP_REQ_SRC_FOLIO;
+ return crypto_request_flags(&req->base) & ~CRYPTO_ACOMP_REQ_PRIVATE;
}
-static inline bool acomp_request_dst_isfolio(struct acomp_req *req)
+static inline struct crypto_acomp *crypto_acomp_fb(struct crypto_acomp *tfm)
{
- return req->base.flags & CRYPTO_ACOMP_REQ_DST_FOLIO;
+ return __crypto_acomp_tfm(crypto_acomp_tfm(tfm)->fb);
}
-static inline bool crypto_acomp_req_chain(struct crypto_acomp *tfm)
+static inline struct acomp_req *acomp_fbreq_on_stack_init(
+ char *buf, struct acomp_req *old)
{
- return crypto_tfm_req_chain(&tfm->base);
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(old);
+ struct acomp_req *req = (void *)buf;
+
+ crypto_stack_request_init(&req->base,
+ crypto_acomp_tfm(crypto_acomp_fb(tfm)));
+ acomp_request_set_callback(req, acomp_request_flags(old), NULL, NULL);
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_PRIVATE;
+ req->base.flags |= old->base.flags & CRYPTO_ACOMP_REQ_PRIVATE;
+ req->src = old->src;
+ req->dst = old->dst;
+ req->slen = old->slen;
+ req->dlen = old->dlen;
+
+ return req;
}
#endif
diff --git a/include/crypto/internal/blake2b.h b/include/crypto/internal/blake2b.h
index 982fe5e8471c..3e09e2485306 100644
--- a/include/crypto/internal/blake2b.h
+++ b/include/crypto/internal/blake2b.h
@@ -7,65 +7,36 @@
#ifndef _CRYPTO_INTERNAL_BLAKE2B_H
#define _CRYPTO_INTERNAL_BLAKE2B_H
+#include <asm/byteorder.h>
#include <crypto/blake2b.h>
#include <crypto/internal/hash.h>
+#include <linux/array_size.h>
+#include <linux/compiler.h>
+#include <linux/build_bug.h>
+#include <linux/errno.h>
+#include <linux/math.h>
#include <linux/string.h>
-
-void blake2b_compress_generic(struct blake2b_state *state,
- const u8 *block, size_t nblocks, u32 inc);
+#include <linux/types.h>
static inline void blake2b_set_lastblock(struct blake2b_state *state)
{
state->f[0] = -1;
+ state->f[1] = 0;
}
-typedef void (*blake2b_compress_t)(struct blake2b_state *state,
- const u8 *block, size_t nblocks, u32 inc);
-
-static inline void __blake2b_update(struct blake2b_state *state,
- const u8 *in, size_t inlen,
- blake2b_compress_t compress)
+static inline void blake2b_set_nonlast(struct blake2b_state *state)
{
- const size_t fill = BLAKE2B_BLOCK_SIZE - state->buflen;
-
- if (unlikely(!inlen))
- return;
- if (inlen > fill) {
- memcpy(state->buf + state->buflen, in, fill);
- (*compress)(state, state->buf, 1, BLAKE2B_BLOCK_SIZE);
- state->buflen = 0;
- in += fill;
- inlen -= fill;
- }
- if (inlen > BLAKE2B_BLOCK_SIZE) {
- const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2B_BLOCK_SIZE);
- /* Hash one less (full) block than strictly possible */
- (*compress)(state, in, nblocks - 1, BLAKE2B_BLOCK_SIZE);
- in += BLAKE2B_BLOCK_SIZE * (nblocks - 1);
- inlen -= BLAKE2B_BLOCK_SIZE * (nblocks - 1);
- }
- memcpy(state->buf + state->buflen, in, inlen);
- state->buflen += inlen;
+ state->f[0] = 0;
+ state->f[1] = 0;
}
-static inline void __blake2b_final(struct blake2b_state *state, u8 *out,
- blake2b_compress_t compress)
-{
- int i;
-
- blake2b_set_lastblock(state);
- memset(state->buf + state->buflen, 0,
- BLAKE2B_BLOCK_SIZE - state->buflen); /* Padding */
- (*compress)(state, state->buf, 1, state->buflen);
- for (i = 0; i < ARRAY_SIZE(state->h); i++)
- __cpu_to_le64s(&state->h[i]);
- memcpy(out, state->h, state->outlen);
-}
+typedef void (*blake2b_compress_t)(struct blake2b_state *state,
+ const u8 *block, size_t nblocks, u32 inc);
/* Helper functions for shash implementations of BLAKE2b */
struct blake2b_tfm_ctx {
- u8 key[BLAKE2B_KEY_SIZE];
+ u8 key[BLAKE2B_BLOCK_SIZE];
unsigned int keylen;
};
@@ -74,10 +45,13 @@ static inline int crypto_blake2b_setkey(struct crypto_shash *tfm,
{
struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm);
- if (keylen == 0 || keylen > BLAKE2B_KEY_SIZE)
+ if (keylen > BLAKE2B_KEY_SIZE)
return -EINVAL;
+ BUILD_BUG_ON(BLAKE2B_KEY_SIZE > BLAKE2B_BLOCK_SIZE);
+
memcpy(tctx->key, key, keylen);
+ memset(tctx->key + keylen, 0, BLAKE2B_BLOCK_SIZE - keylen);
tctx->keylen = keylen;
return 0;
@@ -89,26 +63,38 @@ static inline int crypto_blake2b_init(struct shash_desc *desc)
struct blake2b_state *state = shash_desc_ctx(desc);
unsigned int outlen = crypto_shash_digestsize(desc->tfm);
- __blake2b_init(state, outlen, tctx->key, tctx->keylen);
- return 0;
+ __blake2b_init(state, outlen, tctx->keylen);
+ return tctx->keylen ?
+ crypto_shash_update(desc, tctx->key, BLAKE2B_BLOCK_SIZE) : 0;
}
-static inline int crypto_blake2b_update(struct shash_desc *desc,
- const u8 *in, unsigned int inlen,
- blake2b_compress_t compress)
+static inline int crypto_blake2b_update_bo(struct shash_desc *desc,
+ const u8 *in, unsigned int inlen,
+ blake2b_compress_t compress)
{
struct blake2b_state *state = shash_desc_ctx(desc);
- __blake2b_update(state, in, inlen, compress);
- return 0;
+ blake2b_set_nonlast(state);
+ compress(state, in, inlen / BLAKE2B_BLOCK_SIZE, BLAKE2B_BLOCK_SIZE);
+ return inlen - round_down(inlen, BLAKE2B_BLOCK_SIZE);
}
-static inline int crypto_blake2b_final(struct shash_desc *desc, u8 *out,
+static inline int crypto_blake2b_finup(struct shash_desc *desc, const u8 *in,
+ unsigned int inlen, u8 *out,
blake2b_compress_t compress)
{
struct blake2b_state *state = shash_desc_ctx(desc);
+ u8 buf[BLAKE2B_BLOCK_SIZE];
+ int i;
- __blake2b_final(state, out, compress);
+ memcpy(buf, in, inlen);
+ memset(buf + inlen, 0, BLAKE2B_BLOCK_SIZE - inlen);
+ blake2b_set_lastblock(state);
+ compress(state, buf, 1, inlen);
+ for (i = 0; i < ARRAY_SIZE(state->h); i++)
+ __cpu_to_le64s(&state->h[i]);
+ memcpy(out, state->h, crypto_shash_digestsize(desc->tfm));
+ memzero_explicit(buf, sizeof(buf));
return 0;
}
diff --git a/include/crypto/internal/blockhash.h b/include/crypto/internal/blockhash.h
new file mode 100644
index 000000000000..52d9d4c82493
--- /dev/null
+++ b/include/crypto/internal/blockhash.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Handle partial blocks for block hash.
+ *
+ * Copyright (c) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ * Copyright (c) 2025 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#ifndef _CRYPTO_INTERNAL_BLOCKHASH_H
+#define _CRYPTO_INTERNAL_BLOCKHASH_H
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#define BLOCK_HASH_UPDATE_BASE(block_fn, state, src, nbytes, bs, dv, \
+ buf, buflen) \
+ ({ \
+ typeof(block_fn) *_block_fn = &(block_fn); \
+ typeof(state + 0) _state = (state); \
+ unsigned int _buflen = (buflen); \
+ size_t _nbytes = (nbytes); \
+ unsigned int _bs = (bs); \
+ const u8 *_src = (src); \
+ u8 *_buf = (buf); \
+ while ((_buflen + _nbytes) >= _bs) { \
+ const u8 *data = _src; \
+ size_t len = _nbytes; \
+ size_t blocks; \
+ int remain; \
+ if (_buflen) { \
+ remain = _bs - _buflen; \
+ memcpy(_buf + _buflen, _src, remain); \
+ data = _buf; \
+ len = _bs; \
+ } \
+ remain = len % bs; \
+ blocks = (len - remain) / (dv); \
+ (*_block_fn)(_state, data, blocks); \
+ _src += len - remain - _buflen; \
+ _nbytes -= len - remain - _buflen; \
+ _buflen = 0; \
+ } \
+ memcpy(_buf + _buflen, _src, _nbytes); \
+ _buflen += _nbytes; \
+ })
+
+#define BLOCK_HASH_UPDATE(block, state, src, nbytes, bs, buf, buflen) \
+ BLOCK_HASH_UPDATE_BASE(block, state, src, nbytes, bs, 1, buf, buflen)
+#define BLOCK_HASH_UPDATE_BLOCKS(block, state, src, nbytes, bs, buf, buflen) \
+ BLOCK_HASH_UPDATE_BASE(block, state, src, nbytes, bs, bs, buf, buflen)
+
+#endif /* _CRYPTO_INTERNAL_BLOCKHASH_H */
diff --git a/include/crypto/internal/chacha.h b/include/crypto/internal/chacha.h
deleted file mode 100644
index b085dc1ac151..000000000000
--- a/include/crypto/internal/chacha.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef _CRYPTO_INTERNAL_CHACHA_H
-#define _CRYPTO_INTERNAL_CHACHA_H
-
-#include <crypto/chacha.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/crypto.h>
-
-struct chacha_ctx {
- u32 key[8];
- int nrounds;
-};
-
-static inline int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize, int nrounds)
-{
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- int i;
-
- if (keysize != CHACHA_KEY_SIZE)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
- ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
-
- ctx->nrounds = nrounds;
- return 0;
-}
-
-static inline int chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize)
-{
- return chacha_setkey(tfm, key, keysize, 20);
-}
-
-static inline int chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize)
-{
- return chacha_setkey(tfm, key, keysize, 12);
-}
-
-#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h
index fbf4be56cf12..b6a4ea2240fc 100644
--- a/include/crypto/internal/engine.h
+++ b/include/crypto/internal/engine.h
@@ -27,10 +27,10 @@ struct device;
* @retry_support: indication that the hardware allows re-execution
* of a failed backlog request
* crypto-engine, in head position to keep order
+ * @rt: whether this queue is set to run as a realtime task
* @list: link with the global crypto engine list
* @queue_lock: spinlock to synchronise access to request queue
* @queue: the crypto queue of the engine
- * @rt: whether this queue is set to run as a realtime task
* @prepare_crypt_hardware: a request will soon arrive from the queue
* so the subsystem requests the driver to prepare the hardware
* by issuing this call
@@ -51,14 +51,13 @@ struct crypto_engine {
bool running;
bool retry_support;
+ bool rt;
struct list_head list;
spinlock_t queue_lock;
struct crypto_queue queue;
struct device *dev;
- bool rt;
-
int (*prepare_crypt_hardware)(struct crypto_engine *engine);
int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
int (*do_batch_requests)(struct crypto_engine *engine);
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 7fd7126f593a..012f5fb22d43 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -15,7 +15,6 @@
struct aead_geniv_ctx {
spinlock_t lock;
struct crypto_aead *child;
- struct crypto_sync_skcipher *sknull;
u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
};
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 052ac7924af3..0f85c543f80b 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -11,6 +11,24 @@
#include <crypto/algapi.h>
#include <crypto/hash.h>
+/* Set this bit to handle partial blocks in the API. */
+#define CRYPTO_AHASH_ALG_BLOCK_ONLY 0x01000000
+
+/* Set this bit if final requires at least one byte. */
+#define CRYPTO_AHASH_ALG_FINAL_NONZERO 0x02000000
+
+/* Set this bit if finup can deal with multiple blocks. */
+#define CRYPTO_AHASH_ALG_FINUP_MAX 0x04000000
+
+/* This bit is set by the Crypto API if export_core is not supported. */
+#define CRYPTO_AHASH_ALG_NO_EXPORT_CORE 0x08000000
+
+#define HASH_FBREQ_ON_STACK(name, req) \
+ char __##name##_req[sizeof(struct ahash_request) + \
+ MAX_SYNC_HASH_REQSIZE] CRYPTO_MINALIGN_ATTR; \
+ struct ahash_request *name = ahash_fbreq_on_stack_init( \
+ __##name##_req, (req))
+
struct ahash_request;
struct ahash_instance {
@@ -49,6 +67,7 @@ int crypto_register_ahashes(struct ahash_alg *algs, int count);
void crypto_unregister_ahashes(struct ahash_alg *algs, int count);
int ahash_register_instance(struct crypto_template *tmpl,
struct ahash_instance *inst);
+void ahash_free_singlespawn_instance(struct ahash_instance *inst);
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
@@ -58,12 +77,20 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
return alg->setkey != shash_no_setkey;
}
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
+
static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg)
{
return crypto_shash_alg_has_setkey(alg) &&
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
}
+static inline bool crypto_hash_alg_needs_key(struct hash_alg_common *alg)
+{
+ return crypto_hash_alg_has_setkey(alg) &&
+ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
+}
+
int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask);
@@ -187,7 +214,7 @@ static inline void ahash_request_complete(struct ahash_request *req, int err)
static inline u32 ahash_request_flags(struct ahash_request *req)
{
- return req->base.flags;
+ return crypto_request_flags(&req->base) & ~CRYPTO_AHASH_REQ_PRIVATE;
}
static inline struct crypto_ahash *crypto_spawn_ahash(
@@ -247,20 +274,96 @@ static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
return container_of(tfm, struct crypto_shash, base);
}
-static inline bool ahash_request_chained(struct ahash_request *req)
+static inline bool ahash_request_isvirt(struct ahash_request *req)
{
- return false;
+ return req->base.flags & CRYPTO_AHASH_REQ_VIRT;
}
-static inline bool ahash_request_isvirt(struct ahash_request *req)
+static inline bool crypto_ahash_req_virt(struct crypto_ahash *tfm)
{
- return req->base.flags & CRYPTO_AHASH_REQ_VIRT;
+ return crypto_tfm_req_virt(&tfm->base);
}
-static inline bool crypto_ahash_req_chain(struct crypto_ahash *tfm)
+static inline struct crypto_ahash *crypto_ahash_fb(struct crypto_ahash *tfm)
{
- return crypto_tfm_req_chain(&tfm->base);
+ return __crypto_ahash_cast(crypto_ahash_tfm(tfm)->fb);
}
+static inline struct ahash_request *ahash_fbreq_on_stack_init(
+ char *buf, struct ahash_request *old)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(old);
+ struct ahash_request *req = (void *)buf;
+
+ crypto_stack_request_init(&req->base,
+ crypto_ahash_tfm(crypto_ahash_fb(tfm)));
+ ahash_request_set_callback(req, ahash_request_flags(old), NULL, NULL);
+ req->base.flags &= ~CRYPTO_AHASH_REQ_PRIVATE;
+ req->base.flags |= old->base.flags & CRYPTO_AHASH_REQ_PRIVATE;
+ req->src = old->src;
+ req->result = old->result;
+ req->nbytes = old->nbytes;
+
+ return req;
+}
+
+/* Return the state size without partial block for block-only algorithms. */
+static inline unsigned int crypto_shash_coresize(struct crypto_shash *tfm)
+{
+ return crypto_shash_statesize(tfm) - crypto_shash_blocksize(tfm) - 1;
+}
+
+/* This can only be used if the request was never cloned. */
+#define HASH_REQUEST_ZERO(name) \
+ memzero_explicit(__##name##_req, sizeof(__##name##_req))
+
+/**
+ * crypto_ahash_export_core() - extract core state for message digest
+ * @req: reference to the ahash_request handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * Export the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the export creation was successful; < 0 if an error occurred
+ */
+int crypto_ahash_export_core(struct ahash_request *req, void *out);
+
+/**
+ * crypto_ahash_import_core() - import core state
+ * @req: reference to ahash_request handle the state is imported into
+ * @in: buffer holding the state
+ *
+ * Import the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
+int crypto_ahash_import_core(struct ahash_request *req, const void *in);
+
+/**
+ * crypto_shash_export_core() - extract core state for message digest
+ * @desc: reference to the operational state handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * Export the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the export creation was successful; < 0 if an error occurred
+ */
+int crypto_shash_export_core(struct shash_desc *desc, void *out);
+
+/**
+ * crypto_shash_import_core() - import core state
+ * @desc: reference to the operational state handle the state imported into
+ * @in: buffer holding the state
+ *
+ * Import the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
+int crypto_shash_import_core(struct shash_desc *desc, const void *in);
+
#endif /* _CRYPTO_INTERNAL_HASH_H */
diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
index e614594f88c1..c60315f47562 100644
--- a/include/crypto/internal/poly1305.h
+++ b/include/crypto/internal/poly1305.h
@@ -6,9 +6,8 @@
#ifndef _CRYPTO_INTERNAL_POLY1305_H
#define _CRYPTO_INTERNAL_POLY1305_H
-#include <linux/unaligned.h>
-#include <linux/types.h>
#include <crypto/poly1305.h>
+#include <linux/types.h>
/*
* Poly1305 core functions. These only accept whole blocks; the caller must
@@ -31,4 +30,29 @@ void poly1305_core_blocks(struct poly1305_state *state,
void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4],
void *dst);
+void poly1305_block_init_arch(struct poly1305_block_state *state,
+ const u8 raw_key[POLY1305_BLOCK_SIZE]);
+void poly1305_block_init_generic(struct poly1305_block_state *state,
+ const u8 raw_key[POLY1305_BLOCK_SIZE]);
+void poly1305_blocks_arch(struct poly1305_block_state *state, const u8 *src,
+ unsigned int len, u32 padbit);
+
+static inline void poly1305_blocks_generic(struct poly1305_block_state *state,
+ const u8 *src, unsigned int len,
+ u32 padbit)
+{
+ poly1305_core_blocks(&state->h, &state->core_r, src,
+ len / POLY1305_BLOCK_SIZE, padbit);
+}
+
+void poly1305_emit_arch(const struct poly1305_state *state,
+ u8 digest[POLY1305_DIGEST_SIZE], const u32 nonce[4]);
+
+static inline void poly1305_emit_generic(const struct poly1305_state *state,
+ u8 digest[POLY1305_DIGEST_SIZE],
+ const u32 nonce[4])
+{
+ poly1305_core_emit(state, nonce, digest);
+}
+
#endif
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index f25aa2ea3b48..533d6c16a491 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -9,10 +9,7 @@
#ifndef _CRYPTO_SCOMP_INT_H
#define _CRYPTO_SCOMP_INT_H
-#include <crypto/acompress.h>
-#include <crypto/algapi.h>
-
-struct acomp_req;
+#include <crypto/internal/acompress.h>
struct crypto_scomp {
struct crypto_tfm base;
@@ -26,12 +23,10 @@ struct crypto_scomp {
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
* @base: Common crypto API algorithm data structure
- * @stream: Per-cpu memory for algorithm
+ * @streams: Per-cpu memory for algorithm
* @calg: Cmonn algorithm data structure shared with acomp
*/
struct scomp_alg {
- void *(*alloc_ctx)(void);
- void (*free_ctx)(void *ctx);
int (*compress)(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx);
@@ -40,6 +35,14 @@ struct scomp_alg {
void *ctx);
union {
+ struct {
+ void *(*alloc_ctx)(void);
+ void (*free_ctx)(void *ctx);
+ };
+ struct crypto_acomp_streams streams;
+ };
+
+ union {
struct COMP_ALG_COMMON;
struct comp_alg_common calg;
};
diff --git a/include/crypto/internal/sha2.h b/include/crypto/internal/sha2.h
new file mode 100644
index 000000000000..b9bccd3ff57f
--- /dev/null
+++ b/include/crypto/internal/sha2.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _CRYPTO_INTERNAL_SHA2_H
+#define _CRYPTO_INTERNAL_SHA2_H
+
+#include <crypto/internal/simd.h>
+#include <crypto/sha2.h>
+#include <linux/compiler_attributes.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
+
+#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256)
+bool sha256_is_arch_optimized(void);
+#else
+static inline bool sha256_is_arch_optimized(void)
+{
+ return false;
+}
+#endif
+void sha256_blocks_generic(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+void sha256_blocks_simd(u32 state[SHA256_STATE_WORDS],
+ const u8 *data, size_t nblocks);
+
+static inline void sha256_choose_blocks(
+ u32 state[SHA256_STATE_WORDS], const u8 *data, size_t nblocks,
+ bool force_generic, bool force_simd)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256) || force_generic)
+ sha256_blocks_generic(state, data, nblocks);
+ else if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD) &&
+ (force_simd || crypto_simd_usable()))
+ sha256_blocks_simd(state, data, nblocks);
+ else
+ sha256_blocks_arch(state, data, nblocks);
+}
+
+static __always_inline void sha256_finup(
+ struct crypto_sha256_state *sctx, u8 buf[SHA256_BLOCK_SIZE],
+ size_t len, u8 out[SHA256_DIGEST_SIZE], size_t digest_size,
+ bool force_generic, bool force_simd)
+{
+ const size_t bit_offset = SHA256_BLOCK_SIZE - 8;
+ __be64 *bits = (__be64 *)&buf[bit_offset];
+ int i;
+
+ buf[len++] = 0x80;
+ if (len > bit_offset) {
+ memset(&buf[len], 0, SHA256_BLOCK_SIZE - len);
+ sha256_choose_blocks(sctx->state, buf, 1, force_generic,
+ force_simd);
+ len = 0;
+ }
+
+ memset(&buf[len], 0, bit_offset - len);
+ *bits = cpu_to_be64(sctx->count << 3);
+ sha256_choose_blocks(sctx->state, buf, 1, force_generic, force_simd);
+
+ for (i = 0; i < digest_size; i += 4)
+ put_unaligned_be32(sctx->state[i / 4], out + i);
+}
+
+#endif /* _CRYPTO_INTERNAL_SHA2_H */
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
index be97b97a75dd..7e7f1ac3b7fd 100644
--- a/include/crypto/internal/simd.h
+++ b/include/crypto/internal/simd.h
@@ -6,6 +6,7 @@
#ifndef _CRYPTO_INTERNAL_SIMD_H
#define _CRYPTO_INTERNAL_SIMD_H
+#include <asm/simd.h>
#include <linux/percpu.h>
#include <linux/types.h>
@@ -43,14 +44,9 @@ void simd_unregister_aeads(struct aead_alg *algs, int count,
*
* This delegates to may_use_simd(), except that this also returns false if SIMD
* in crypto code has been temporarily disabled on this CPU by the crypto
- * self-tests, in order to test the no-SIMD fallback code. This override is
- * currently limited to configurations where the extra self-tests are enabled,
- * because it might be a bit too invasive to be part of the regular self-tests.
- *
- * This is a macro so that <asm/simd.h>, which some architectures don't have,
- * doesn't have to be included directly here.
+ * self-tests, in order to test the no-SIMD fallback code.
*/
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+#ifdef CONFIG_CRYPTO_SELFTESTS
DECLARE_PER_CPU(bool, crypto_simd_disabled_for_test);
#define crypto_simd_usable() \
(may_use_simd() && !this_cpu_read(crypto_simd_disabled_for_test))
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index a958ab0636ad..d5aa535263f6 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -10,6 +10,7 @@
#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
+#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <linux/types.h>
@@ -54,48 +55,6 @@ struct crypto_lskcipher_spawn {
struct crypto_spawn base;
};
-struct skcipher_walk {
- union {
- /* Virtual address of the source. */
- struct {
- struct {
- const void *const addr;
- } virt;
- } src;
-
- /* Private field for the API, do not use. */
- struct scatter_walk in;
- };
-
- unsigned int nbytes;
-
- union {
- /* Virtual address of the destination. */
- struct {
- struct {
- void *const addr;
- } virt;
- } dst;
-
- /* Private field for the API, do not use. */
- struct scatter_walk out;
- };
-
- unsigned int total;
-
- u8 *page;
- u8 *buffer;
- u8 *oiv;
- void *iv;
-
- unsigned int ivsize;
-
- int flags;
- unsigned int blocksize;
- unsigned int stride;
- unsigned int alignmask;
-};
-
static inline struct crypto_instance *skcipher_crypto_instance(
struct skcipher_instance *inst)
{
@@ -212,7 +171,6 @@ void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count);
int lskcipher_register_instance(struct crypto_template *tmpl,
struct lskcipher_instance *inst);
-int skcipher_walk_done(struct skcipher_walk *walk, int res);
int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
struct skcipher_request *__restrict req,
bool atomic);
@@ -223,11 +181,6 @@ int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk,
struct aead_request *__restrict req,
bool atomic);
-static inline void skcipher_walk_abort(struct skcipher_walk *walk)
-{
- skcipher_walk_done(walk, -ECANCELED);
-}
-
static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
{
return crypto_tfm_ctx(&tfm->base);
diff --git a/include/crypto/md5.h b/include/crypto/md5.h
index cf9e9dec3d21..198b5d69b92f 100644
--- a/include/crypto/md5.h
+++ b/include/crypto/md5.h
@@ -8,6 +8,7 @@
#define MD5_HMAC_BLOCK_SIZE 64
#define MD5_BLOCK_WORDS 16
#define MD5_HASH_WORDS 4
+#define MD5_STATE_SIZE 24
#define MD5_H0 0x67452301UL
#define MD5_H1 0xefcdab89UL
@@ -18,8 +19,8 @@ extern const u8 md5_zero_message_hash[MD5_DIGEST_SIZE];
struct md5_state {
u32 hash[MD5_HASH_WORDS];
- u32 block[MD5_BLOCK_WORDS];
u64 byte_count;
+ u32 block[MD5_BLOCK_WORDS];
};
#endif
diff --git a/include/crypto/null.h b/include/crypto/null.h
index 0ef577cc00e3..1c66abf9de3b 100644
--- a/include/crypto/null.h
+++ b/include/crypto/null.h
@@ -9,7 +9,4 @@
#define NULL_DIGEST_SIZE 0
#define NULL_IV_SIZE 0
-struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void);
-void crypto_put_default_null_skcipher(void);
-
#endif
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index 090692ec3bc7..e54abda8cfe9 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -7,7 +7,6 @@
#define _CRYPTO_POLY1305_H
#include <linux/types.h>
-#include <linux/crypto.h>
#define POLY1305_BLOCK_SIZE 16
#define POLY1305_KEY_SIZE 32
@@ -38,17 +37,8 @@ struct poly1305_state {
};
};
-struct poly1305_desc_ctx {
- /* partial buffer */
- u8 buf[POLY1305_BLOCK_SIZE];
- /* bytes used in partial buffer */
- unsigned int buflen;
- /* how many keys have been set in r[] */
- unsigned short rset;
- /* whether s[] has been set */
- bool sset;
- /* finalize key */
- u32 s[4];
+/* Combined state for block function. */
+struct poly1305_block_state {
/* accumulator */
struct poly1305_state h;
/* key */
@@ -58,42 +48,29 @@ struct poly1305_desc_ctx {
};
};
-void poly1305_init_arch(struct poly1305_desc_ctx *desc,
- const u8 key[POLY1305_KEY_SIZE]);
-void poly1305_init_generic(struct poly1305_desc_ctx *desc,
- const u8 key[POLY1305_KEY_SIZE]);
-
-static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key)
-{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
- poly1305_init_arch(desc, key);
- else
- poly1305_init_generic(desc, key);
-}
-
-void poly1305_update_arch(struct poly1305_desc_ctx *desc, const u8 *src,
- unsigned int nbytes);
-void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src,
- unsigned int nbytes);
-
-static inline void poly1305_update(struct poly1305_desc_ctx *desc,
- const u8 *src, unsigned int nbytes)
-{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
- poly1305_update_arch(desc, src, nbytes);
- else
- poly1305_update_generic(desc, src, nbytes);
-}
+struct poly1305_desc_ctx {
+ /* partial buffer */
+ u8 buf[POLY1305_BLOCK_SIZE];
+ /* bytes used in partial buffer */
+ unsigned int buflen;
+ /* finalize key */
+ u32 s[4];
+ struct poly1305_block_state state;
+};
-void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *digest);
-void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *digest);
+void poly1305_init(struct poly1305_desc_ctx *desc,
+ const u8 key[POLY1305_KEY_SIZE]);
+void poly1305_update(struct poly1305_desc_ctx *desc,
+ const u8 *src, unsigned int nbytes);
+void poly1305_final(struct poly1305_desc_ctx *desc, u8 *digest);
-static inline void poly1305_final(struct poly1305_desc_ctx *desc, u8 *digest)
+#if IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)
+bool poly1305_is_arch_optimized(void);
+#else
+static inline bool poly1305_is_arch_optimized(void)
{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
- poly1305_final_arch(desc, digest);
- else
- poly1305_final_generic(desc, digest);
+ return false;
}
+#endif
#endif
diff --git a/include/crypto/polyval.h b/include/crypto/polyval.h
index 1d630f371f77..d2e63743e592 100644
--- a/include/crypto/polyval.h
+++ b/include/crypto/polyval.h
@@ -8,15 +8,7 @@
#ifndef _CRYPTO_POLYVAL_H
#define _CRYPTO_POLYVAL_H
-#include <linux/types.h>
-#include <linux/crypto.h>
-
#define POLYVAL_BLOCK_SIZE 16
#define POLYVAL_DIGEST_SIZE 16
-void polyval_mul_non4k(u8 *op1, const u8 *op2);
-
-void polyval_update_non4k(const u8 *key, const u8 *in,
- size_t nblocks, u8 *accumulator);
-
#endif
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index 5ac4388f50e1..f8224cc390f8 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -102,12 +102,10 @@ static inline struct rng_alg *__crypto_rng_alg(struct crypto_alg *alg)
}
/**
- * crypto_rng_alg - obtain name of RNG
- * @tfm: cipher handle
- *
- * Return the generic name (cra_name) of the initialized random number generator
+ * crypto_rng_alg() - obtain 'struct rng_alg' pointer from RNG handle
+ * @tfm: RNG handle
*
- * Return: generic name string
+ * Return: Pointer to 'struct rng_alg', derived from @tfm RNG handle
*/
static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
{
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 94a8585f26b2..15ab743f68c8 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -11,11 +11,64 @@
#ifndef _CRYPTO_SCATTERWALK_H
#define _CRYPTO_SCATTERWALK_H
-#include <crypto/algapi.h>
-
+#include <linux/errno.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+struct scatter_walk {
+ /* Must be the first member, see struct skcipher_walk. */
+ union {
+ void *const addr;
+
+ /* Private API field, do not touch. */
+ union crypto_no_such_thing *__addr;
+ };
+ struct scatterlist *sg;
+ unsigned int offset;
+};
+
+struct skcipher_walk {
+ union {
+ /* Virtual address of the source. */
+ struct {
+ struct {
+ const void *const addr;
+ } virt;
+ } src;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk in;
+ };
+
+ union {
+ /* Virtual address of the destination. */
+ struct {
+ struct {
+ void *const addr;
+ } virt;
+ } dst;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk out;
+ };
+
+ unsigned int nbytes;
+ unsigned int total;
+
+ u8 *page;
+ u8 *buffer;
+ u8 *oiv;
+ void *iv;
+
+ unsigned int ivsize;
+
+ int flags;
+ unsigned int blocksize;
+ unsigned int stride;
+ unsigned int alignmask;
+};
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
struct scatterlist *sg, int num)
@@ -243,4 +296,12 @@ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
struct scatterlist *src,
unsigned int len);
+int skcipher_walk_first(struct skcipher_walk *walk, bool atomic);
+int skcipher_walk_done(struct skcipher_walk *walk, int res);
+
+static inline void skcipher_walk_abort(struct skcipher_walk *walk)
+{
+ skcipher_walk_done(walk, -ECANCELED);
+}
+
#endif /* _CRYPTO_SCATTERWALK_H */
diff --git a/include/crypto/sha1.h b/include/crypto/sha1.h
index 044ecea60ac8..f48230b1413c 100644
--- a/include/crypto/sha1.h
+++ b/include/crypto/sha1.h
@@ -10,6 +10,7 @@
#define SHA1_DIGEST_SIZE 20
#define SHA1_BLOCK_SIZE 64
+#define SHA1_STATE_SIZE offsetof(struct sha1_state, buffer)
#define SHA1_H0 0x67452301UL
#define SHA1_H1 0xefcdab89UL
@@ -25,14 +26,6 @@ struct sha1_state {
u8 buffer[SHA1_BLOCK_SIZE];
};
-struct shash_desc;
-
-extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-
/*
* An implementation of SHA-1's compression function. Don't use in new code!
* You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't
diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h
index 0c342ed0d038..62701d136c79 100644
--- a/include/crypto/sha1_base.h
+++ b/include/crypto/sha1_base.h
@@ -10,10 +10,9 @@
#include <crypto/internal/hash.h>
#include <crypto/sha1.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
+#include <linux/math.h>
#include <linux/string.h>
-
+#include <linux/types.h>
#include <linux/unaligned.h>
typedef void (sha1_block_fn)(struct sha1_state *sst, u8 const *src, int blocks);
@@ -32,63 +31,38 @@ static inline int sha1_base_init(struct shash_desc *desc)
return 0;
}
-static inline int sha1_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sha1_block_fn *block_fn)
+static inline int sha1_base_do_update_blocks(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha1_block_fn *block_fn)
{
+ unsigned int remain = len - round_down(len, SHA1_BLOCK_SIZE);
struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
-
- sctx->count += len;
-
- if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SHA1_BLOCK_SIZE - partial;
-
- memcpy(sctx->buffer + partial, data, p);
- data += p;
- len -= p;
-
- block_fn(sctx, sctx->buffer, 1);
- }
- blocks = len / SHA1_BLOCK_SIZE;
- len %= SHA1_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SHA1_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buffer + partial, data, len);
-
- return 0;
+ sctx->count += len - remain;
+ block_fn(sctx, data, len / SHA1_BLOCK_SIZE);
+ return remain;
}
-static inline int sha1_base_do_finalize(struct shash_desc *desc,
- sha1_block_fn *block_fn)
+static inline int sha1_base_do_finup(struct shash_desc *desc,
+ const u8 *src, unsigned int len,
+ sha1_block_fn *block_fn)
{
- const int bit_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
+ unsigned int bit_offset = SHA1_BLOCK_SIZE / 8 - 1;
struct sha1_state *sctx = shash_desc_ctx(desc);
- __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
- unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
-
- sctx->buffer[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buffer + partial, 0x0, SHA1_BLOCK_SIZE - partial);
- partial = 0;
-
- block_fn(sctx, sctx->buffer, 1);
- }
-
- memset(sctx->buffer + partial, 0x0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- block_fn(sctx, sctx->buffer, 1);
+ union {
+ __be64 b64[SHA1_BLOCK_SIZE / 4];
+ u8 u8[SHA1_BLOCK_SIZE * 2];
+ } block = {};
+
+ if (len >= bit_offset * 8)
+ bit_offset += SHA1_BLOCK_SIZE / 8;
+ memcpy(&block, src, len);
+ block.u8[len] = 0x80;
+ sctx->count += len;
+ block.b64[bit_offset] = cpu_to_be64(sctx->count << 3);
+ block_fn(sctx, block.u8, (bit_offset + 1) * 8 / SHA1_BLOCK_SIZE);
+ memzero_explicit(&block, sizeof(block));
return 0;
}
@@ -102,7 +76,6 @@ static inline int sha1_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
put_unaligned_be32(sctx->state[i], digest++);
- memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
diff --git a/include/crypto/sha2.h b/include/crypto/sha2.h
index b9e9281d76c9..4912572578dc 100644
--- a/include/crypto/sha2.h
+++ b/include/crypto/sha2.h
@@ -13,12 +13,14 @@
#define SHA256_DIGEST_SIZE 32
#define SHA256_BLOCK_SIZE 64
+#define SHA256_STATE_WORDS 8
#define SHA384_DIGEST_SIZE 48
#define SHA384_BLOCK_SIZE 128
#define SHA512_DIGEST_SIZE 64
#define SHA512_BLOCK_SIZE 128
+#define SHA512_STATE_SIZE 80
#define SHA224_H0 0xc1059ed8UL
#define SHA224_H1 0x367cd507UL
@@ -64,9 +66,19 @@ extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE];
extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE];
-struct sha256_state {
- u32 state[SHA256_DIGEST_SIZE / 4];
+struct crypto_sha256_state {
+ u32 state[SHA256_STATE_WORDS];
u64 count;
+};
+
+struct sha256_state {
+ union {
+ struct crypto_sha256_state ctx;
+ struct {
+ u32 state[SHA256_STATE_WORDS];
+ u64 count;
+ };
+ };
u8 buf[SHA256_BLOCK_SIZE];
};
@@ -76,31 +88,7 @@ struct sha512_state {
u8 buf[SHA512_BLOCK_SIZE];
};
-struct shash_desc;
-
-extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-
-extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-
-/*
- * Stand-alone implementation of the SHA256 algorithm. It is designed to
- * have as little dependencies as possible so it can be used in the
- * kexec_file purgatory. In other cases you should generally use the
- * hash APIs from include/crypto/hash.h. Especially when hashing large
- * amounts of data as those APIs may be hw-accelerated.
- *
- * For details see lib/crypto/sha256.c
- */
-
-static inline void sha256_init(struct sha256_state *sctx)
+static inline void sha256_block_init(struct crypto_sha256_state *sctx)
{
sctx->state[0] = SHA256_H0;
sctx->state[1] = SHA256_H1;
@@ -112,11 +100,16 @@ static inline void sha256_init(struct sha256_state *sctx)
sctx->state[7] = SHA256_H7;
sctx->count = 0;
}
-void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len);
-void sha256_final(struct sha256_state *sctx, u8 *out);
-void sha256(const u8 *data, unsigned int len, u8 *out);
-static inline void sha224_init(struct sha256_state *sctx)
+static inline void sha256_init(struct sha256_state *sctx)
+{
+ sha256_block_init(&sctx->ctx);
+}
+void sha256_update(struct sha256_state *sctx, const u8 *data, size_t len);
+void sha256_final(struct sha256_state *sctx, u8 out[SHA256_DIGEST_SIZE]);
+void sha256(const u8 *data, size_t len, u8 out[SHA256_DIGEST_SIZE]);
+
+static inline void sha224_block_init(struct crypto_sha256_state *sctx)
{
sctx->state[0] = SHA224_H0;
sctx->state[1] = SHA224_H1;
@@ -128,7 +121,12 @@ static inline void sha224_init(struct sha256_state *sctx)
sctx->state[7] = SHA224_H7;
sctx->count = 0;
}
+
+static inline void sha224_init(struct sha256_state *sctx)
+{
+ sha224_block_init(&sctx->ctx);
+}
/* Simply use sha256_update as it is equivalent to sha224_update. */
-void sha224_final(struct sha256_state *sctx, u8 *out);
+void sha224_final(struct sha256_state *sctx, u8 out[SHA224_DIGEST_SIZE]);
#endif /* _CRYPTO_SHA2_H */
diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h
deleted file mode 100644
index e0418818d63c..000000000000
--- a/include/crypto/sha256_base.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * sha256_base.h - core logic for SHA-256 implementations
- *
- * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
- */
-
-#ifndef _CRYPTO_SHA256_BASE_H
-#define _CRYPTO_SHA256_BASE_H
-
-#include <asm/byteorder.h>
-#include <linux/unaligned.h>
-#include <crypto/internal/hash.h>
-#include <crypto/sha2.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src,
- int blocks);
-
-static inline int sha224_base_init(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- sha224_init(sctx);
- return 0;
-}
-
-static inline int sha256_base_init(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- sha256_init(sctx);
- return 0;
-}
-
-static inline int lib_sha256_base_do_update(struct sha256_state *sctx,
- const u8 *data,
- unsigned int len,
- sha256_block_fn *block_fn)
-{
- unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
-
- sctx->count += len;
-
- if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SHA256_BLOCK_SIZE - partial;
-
- memcpy(sctx->buf + partial, data, p);
- data += p;
- len -= p;
-
- block_fn(sctx, sctx->buf, 1);
- }
-
- blocks = len / SHA256_BLOCK_SIZE;
- len %= SHA256_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SHA256_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buf + partial, data, len);
-
- return 0;
-}
-
-static inline int sha256_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sha256_block_fn *block_fn)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- return lib_sha256_base_do_update(sctx, data, len, block_fn);
-}
-
-static inline int lib_sha256_base_do_finalize(struct sha256_state *sctx,
- sha256_block_fn *block_fn)
-{
- const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
- __be64 *bits = (__be64 *)(sctx->buf + bit_offset);
- unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
-
- sctx->buf[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial);
- partial = 0;
-
- block_fn(sctx, sctx->buf, 1);
- }
-
- memset(sctx->buf + partial, 0x0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- block_fn(sctx, sctx->buf, 1);
-
- return 0;
-}
-
-static inline int sha256_base_do_finalize(struct shash_desc *desc,
- sha256_block_fn *block_fn)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- return lib_sha256_base_do_finalize(sctx, block_fn);
-}
-
-static inline int lib_sha256_base_finish(struct sha256_state *sctx, u8 *out,
- unsigned int digest_size)
-{
- __be32 *digest = (__be32 *)out;
- int i;
-
- for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32))
- put_unaligned_be32(sctx->state[i], digest++);
-
- memzero_explicit(sctx, sizeof(*sctx));
- return 0;
-}
-
-static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
-{
- unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- return lib_sha256_base_finish(sctx, out, digest_size);
-}
-
-#endif /* _CRYPTO_SHA256_BASE_H */
diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h
index 080f60c2e6b1..41e1b83a6d91 100644
--- a/include/crypto/sha3.h
+++ b/include/crypto/sha3.h
@@ -5,30 +5,32 @@
#ifndef __CRYPTO_SHA3_H__
#define __CRYPTO_SHA3_H__
+#include <linux/types.h>
+
#define SHA3_224_DIGEST_SIZE (224 / 8)
#define SHA3_224_BLOCK_SIZE (200 - 2 * SHA3_224_DIGEST_SIZE)
+#define SHA3_224_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_224_BLOCK_SIZE + 1
#define SHA3_256_DIGEST_SIZE (256 / 8)
#define SHA3_256_BLOCK_SIZE (200 - 2 * SHA3_256_DIGEST_SIZE)
+#define SHA3_256_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_256_BLOCK_SIZE + 1
#define SHA3_384_DIGEST_SIZE (384 / 8)
#define SHA3_384_BLOCK_SIZE (200 - 2 * SHA3_384_DIGEST_SIZE)
+#define SHA3_384_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_384_BLOCK_SIZE + 1
#define SHA3_512_DIGEST_SIZE (512 / 8)
#define SHA3_512_BLOCK_SIZE (200 - 2 * SHA3_512_DIGEST_SIZE)
+#define SHA3_512_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_512_BLOCK_SIZE + 1
-struct sha3_state {
- u64 st[25];
- unsigned int rsiz;
- unsigned int rsizw;
+#define SHA3_STATE_SIZE 200
- unsigned int partial;
- u8 buf[SHA3_224_BLOCK_SIZE];
+struct shash_desc;
+
+struct sha3_state {
+ u64 st[SHA3_STATE_SIZE / 8];
};
int crypto_sha3_init(struct shash_desc *desc);
-int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-int crypto_sha3_final(struct shash_desc *desc, u8 *out);
#endif
diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h
index 679916a84cb2..aa814bab442d 100644
--- a/include/crypto/sha512_base.h
+++ b/include/crypto/sha512_base.h
@@ -10,10 +10,10 @@
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
+#include <linux/compiler.h>
+#include <linux/math.h>
#include <linux/string.h>
-
+#include <linux/types.h>
#include <linux/unaligned.h>
typedef void (sha512_block_fn)(struct sha512_state *sst, u8 const *src,
@@ -53,66 +53,51 @@ static inline int sha512_base_init(struct shash_desc *desc)
return 0;
}
-static inline int sha512_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sha512_block_fn *block_fn)
+static inline int sha512_base_do_update_blocks(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha512_block_fn *block_fn)
{
+ unsigned int remain = len - round_down(len, SHA512_BLOCK_SIZE);
struct sha512_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+ len -= remain;
sctx->count[0] += len;
if (sctx->count[0] < len)
sctx->count[1]++;
-
- if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SHA512_BLOCK_SIZE - partial;
-
- memcpy(sctx->buf + partial, data, p);
- data += p;
- len -= p;
-
- block_fn(sctx, sctx->buf, 1);
- }
-
- blocks = len / SHA512_BLOCK_SIZE;
- len %= SHA512_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SHA512_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buf + partial, data, len);
-
- return 0;
+ block_fn(sctx, data, len / SHA512_BLOCK_SIZE);
+ return remain;
}
-static inline int sha512_base_do_finalize(struct shash_desc *desc,
- sha512_block_fn *block_fn)
+static inline int sha512_base_do_finup(struct shash_desc *desc, const u8 *src,
+ unsigned int len,
+ sha512_block_fn *block_fn)
{
- const int bit_offset = SHA512_BLOCK_SIZE - sizeof(__be64[2]);
+ unsigned int bit_offset = SHA512_BLOCK_SIZE / 8 - 2;
struct sha512_state *sctx = shash_desc_ctx(desc);
- __be64 *bits = (__be64 *)(sctx->buf + bit_offset);
- unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+ union {
+ __be64 b64[SHA512_BLOCK_SIZE / 4];
+ u8 u8[SHA512_BLOCK_SIZE * 2];
+ } block = {};
- sctx->buf[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buf + partial, 0x0, SHA512_BLOCK_SIZE - partial);
- partial = 0;
+ if (len >= SHA512_BLOCK_SIZE) {
+ int remain;
- block_fn(sctx, sctx->buf, 1);
+ remain = sha512_base_do_update_blocks(desc, src, len, block_fn);
+ src += len - remain;
+ len = remain;
}
- memset(sctx->buf + partial, 0x0, bit_offset - partial);
- bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
- bits[1] = cpu_to_be64(sctx->count[0] << 3);
- block_fn(sctx, sctx->buf, 1);
+ if (len >= bit_offset * 8)
+ bit_offset += SHA512_BLOCK_SIZE / 8;
+ memcpy(&block, src, len);
+ block.u8[len] = 0x80;
+ sctx->count[0] += len;
+ block.b64[bit_offset] = cpu_to_be64(sctx->count[1] << 3 |
+ sctx->count[0] >> 61);
+ block.b64[bit_offset + 1] = cpu_to_be64(sctx->count[0] << 3);
+ block_fn(sctx, block.u8, (bit_offset + 2) * 8 / SHA512_BLOCK_SIZE);
+ memzero_explicit(&block, sizeof(block));
return 0;
}
@@ -126,9 +111,10 @@ static inline int sha512_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64))
put_unaligned_be64(sctx->state[i], digest++);
-
- memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
+void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src,
+ int blocks);
+
#endif /* _CRYPTO_SHA512_BASE_H */
diff --git a/include/crypto/sig.h b/include/crypto/sig.h
index 11024708c069..fa6dafafab3f 100644
--- a/include/crypto/sig.h
+++ b/include/crypto/sig.h
@@ -128,7 +128,7 @@ static inline void crypto_free_sig(struct crypto_sig *tfm)
/**
* crypto_sig_keysize() - Get key size
*
- * Function returns the key size in bytes.
+ * Function returns the key size in bits.
* Function assumes that the key is already set in the transformation. If this
* function is called without a setkey or with a failed setkey, you may end up
* in a NULL dereference.
diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h
index 1f021ad0533f..c8d02c86c298 100644
--- a/include/crypto/sm3.h
+++ b/include/crypto/sm3.h
@@ -14,6 +14,7 @@
#define SM3_DIGEST_SIZE 32
#define SM3_BLOCK_SIZE 64
+#define SM3_STATE_SIZE 40
#define SM3_T1 0x79CC4519
#define SM3_T2 0x7A879D8A
@@ -58,7 +59,6 @@ static inline void sm3_init(struct sm3_state *sctx)
sctx->count = 0;
}
-void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len);
-void sm3_final(struct sm3_state *sctx, u8 *out);
+void sm3_block_generic(struct sm3_state *sctx, u8 const *data, int blocks);
#endif
diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h
index b33ed39c2bce..7c53570bc05e 100644
--- a/include/crypto/sm3_base.h
+++ b/include/crypto/sm3_base.h
@@ -11,87 +11,59 @@
#include <crypto/internal/hash.h>
#include <crypto/sm3.h>
-#include <linux/crypto.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/string.h>
+#include <linux/types.h>
#include <linux/unaligned.h>
typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks);
static inline int sm3_base_init(struct shash_desc *desc)
{
- struct sm3_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SM3_IVA;
- sctx->state[1] = SM3_IVB;
- sctx->state[2] = SM3_IVC;
- sctx->state[3] = SM3_IVD;
- sctx->state[4] = SM3_IVE;
- sctx->state[5] = SM3_IVF;
- sctx->state[6] = SM3_IVG;
- sctx->state[7] = SM3_IVH;
- sctx->count = 0;
-
+ sm3_init(shash_desc_ctx(desc));
return 0;
}
-static inline int sm3_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sm3_block_fn *block_fn)
+static inline int sm3_base_do_update_blocks(struct shash_desc *desc,
+ const u8 *data, unsigned int len,
+ sm3_block_fn *block_fn)
{
+ unsigned int remain = len - round_down(len, SM3_BLOCK_SIZE);
struct sm3_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
-
- sctx->count += len;
-
- if (unlikely((partial + len) >= SM3_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SM3_BLOCK_SIZE - partial;
-
- memcpy(sctx->buffer + partial, data, p);
- data += p;
- len -= p;
- block_fn(sctx, sctx->buffer, 1);
- }
-
- blocks = len / SM3_BLOCK_SIZE;
- len %= SM3_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SM3_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buffer + partial, data, len);
-
- return 0;
+ sctx->count += len - remain;
+ block_fn(sctx, data, len / SM3_BLOCK_SIZE);
+ return remain;
}
-static inline int sm3_base_do_finalize(struct shash_desc *desc,
- sm3_block_fn *block_fn)
+static inline int sm3_base_do_finup(struct shash_desc *desc,
+ const u8 *src, unsigned int len,
+ sm3_block_fn *block_fn)
{
- const int bit_offset = SM3_BLOCK_SIZE - sizeof(__be64);
+ unsigned int bit_offset = SM3_BLOCK_SIZE / 8 - 1;
struct sm3_state *sctx = shash_desc_ctx(desc);
- __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
- unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
+ union {
+ __be64 b64[SM3_BLOCK_SIZE / 4];
+ u8 u8[SM3_BLOCK_SIZE * 2];
+ } block = {};
- sctx->buffer[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buffer + partial, 0x0, SM3_BLOCK_SIZE - partial);
- partial = 0;
+ if (len >= SM3_BLOCK_SIZE) {
+ int remain;
- block_fn(sctx, sctx->buffer, 1);
+ remain = sm3_base_do_update_blocks(desc, src, len, block_fn);
+ src += len - remain;
+ len = remain;
}
- memset(sctx->buffer + partial, 0x0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- block_fn(sctx, sctx->buffer, 1);
+ if (len >= bit_offset * 8)
+ bit_offset += SM3_BLOCK_SIZE / 8;
+ memcpy(&block, src, len);
+ block.u8[len] = 0x80;
+ sctx->count += len;
+ block.b64[bit_offset] = cpu_to_be64(sctx->count << 3);
+ block_fn(sctx, block.u8, (bit_offset + 1) * 8 / SM3_BLOCK_SIZE);
+ memzero_explicit(&block, sizeof(block));
return 0;
}
@@ -104,8 +76,6 @@ static inline int sm3_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++)
put_unaligned_be32(sctx->state[i], digest++);
-
- memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
diff --git a/include/crypto/streebog.h b/include/crypto/streebog.h
index cae1b4a01971..570f720a113b 100644
--- a/include/crypto/streebog.h
+++ b/include/crypto/streebog.h
@@ -23,15 +23,10 @@ struct streebog_uint512 {
};
struct streebog_state {
- union {
- u8 buffer[STREEBOG_BLOCK_SIZE];
- struct streebog_uint512 m;
- };
struct streebog_uint512 hash;
struct streebog_uint512 h;
struct streebog_uint512 N;
struct streebog_uint512 Sigma;
- size_t fillsize;
};
#endif /* !_CRYPTO_STREEBOG_H_ */
diff --git a/include/drm/Makefile b/include/drm/Makefile
index a7bd15d2803e..1df6962556ef 100644
--- a/include/drm/Makefile
+++ b/include/drm/Makefile
@@ -11,7 +11,7 @@ always-$(CONFIG_DRM_HEADER_TEST) += \
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
cmd_hdrtest = \
$(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \
- $(srctree)/scripts/kernel-doc -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
+ PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
touch $@
$(obj)/%.hdrtest: $(src)/%.h FORCE
diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h
index 6002c5666031..cf17646c1310 100644
--- a/include/drm/bridge/analogix_dp.h
+++ b/include/drm/bridge/analogix_dp.h
@@ -10,16 +10,18 @@
#include <drm/drm_crtc.h>
struct analogix_dp_device;
+struct drm_dp_aux;
enum analogix_dp_devtype {
EXYNOS_DP,
RK3288_DP,
RK3399_EDP,
+ RK3588_EDP,
};
static inline bool is_rockchip(enum analogix_dp_devtype type)
{
- return type == RK3288_DP || type == RK3399_EDP;
+ return type == RK3288_DP || type == RK3399_EDP || type == RK3588_EDP;
}
struct analogix_dp_plat_data {
@@ -48,4 +50,7 @@ void analogix_dp_unbind(struct analogix_dp_device *dp);
int analogix_dp_start_crc(struct drm_connector *connector);
int analogix_dp_stop_crc(struct drm_connector *connector);
+struct analogix_dp_plat_data *analogix_dp_aux_to_plat_data(struct drm_dp_aux *aux);
+struct drm_dp_aux *analogix_dp_get_aux(struct analogix_dp_device *dp);
+
#endif /* _ANALOGIX_DP_H_ */
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index c413ef68f9a3..3001c0b6e7bb 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -1025,6 +1025,7 @@
#define DP_EDP_GENERAL_CAP_2 0x703
# define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0)
# define DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE (1 << 4)
+# define DP_EDP_SMOOTH_BRIGHTNESS_CAPABLE (1 << 6) /* eDP 2.0 */
#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */
# define DP_EDP_X_REGION_CAP_MASK (0xf << 0)
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index 5ae4241959f2..e4ca35143ff9 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -518,6 +518,11 @@ struct drm_dp_aux {
* @powered_down: If true then the remote endpoint is powered down.
*/
bool powered_down;
+
+ /**
+ * @no_zero_sized: If the hw can't use zero sized transfers (NVIDIA)
+ */
+ bool no_zero_sized;
};
int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset);
@@ -528,13 +533,72 @@ ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size);
/**
+ * drm_dp_dpcd_read_data() - read a series of bytes from the DPCD
+ * @aux: DisplayPort AUX channel (SST or MST)
+ * @offset: address of the (first) register to read
+ * @buffer: buffer to store the register values
+ * @size: number of bytes in @buffer
+ *
+ * Returns zero (0) on success, or a negative error
+ * code on failure. -EIO is returned if the request was NAKed by the sink or
+ * if the retry count was exceeded. If not all bytes were transferred, this
+ * function returns -EPROTO. Errors from the underlying AUX channel transfer
+ * function, with the exception of -EBUSY (which causes the transaction to
+ * be retried), are propagated to the caller.
+ */
+static inline int drm_dp_dpcd_read_data(struct drm_dp_aux *aux,
+ unsigned int offset,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = drm_dp_dpcd_read(aux, offset, buffer, size);
+ if (ret < 0)
+ return ret;
+ if (ret < size)
+ return -EPROTO;
+
+ return 0;
+}
+
+/**
+ * drm_dp_dpcd_write_data() - write a series of bytes to the DPCD
+ * @aux: DisplayPort AUX channel (SST or MST)
+ * @offset: address of the (first) register to write
+ * @buffer: buffer containing the values to write
+ * @size: number of bytes in @buffer
+ *
+ * Returns zero (0) on success, or a negative error
+ * code on failure. -EIO is returned if the request was NAKed by the sink or
+ * if the retry count was exceeded. If not all bytes were transferred, this
+ * function returns -EPROTO. Errors from the underlying AUX channel transfer
+ * function, with the exception of -EBUSY (which causes the transaction to
+ * be retried), are propagated to the caller.
+ */
+static inline int drm_dp_dpcd_write_data(struct drm_dp_aux *aux,
+ unsigned int offset,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = drm_dp_dpcd_write(aux, offset, buffer, size);
+ if (ret < 0)
+ return ret;
+ if (ret < size)
+ return -EPROTO;
+
+ return 0;
+}
+
+/**
* drm_dp_dpcd_readb() - read a single byte from the DPCD
* @aux: DisplayPort AUX channel
* @offset: address of the register to read
* @valuep: location where the value of the register will be stored
*
* Returns the number of bytes transferred (1) on success, or a negative
- * error code on failure.
+ * error code on failure. In most of the cases you should be using
+ * drm_dp_dpcd_read_byte() instead.
*/
static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
unsigned int offset, u8 *valuep)
@@ -549,7 +613,8 @@ static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
* @value: value to write to the register
*
* Returns the number of bytes transferred (1) on success, or a negative
- * error code on failure.
+ * error code on failure. In most of the cases you should be using
+ * drm_dp_dpcd_write_byte() instead.
*/
static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
unsigned int offset, u8 value)
@@ -557,6 +622,34 @@ static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
return drm_dp_dpcd_write(aux, offset, &value, 1);
}
+/**
+ * drm_dp_dpcd_read_byte() - read a single byte from the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the register to read
+ * @valuep: location where the value of the register will be stored
+ *
+ * Returns zero (0) on success, or a negative error code on failure.
+ */
+static inline int drm_dp_dpcd_read_byte(struct drm_dp_aux *aux,
+ unsigned int offset, u8 *valuep)
+{
+ return drm_dp_dpcd_read_data(aux, offset, valuep, 1);
+}
+
+/**
+ * drm_dp_dpcd_write_byte() - write a single byte to the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the register to write
+ * @value: value to write to the register
+ *
+ * Returns zero (0) on success, or a negative error code on failure.
+ */
+static inline int drm_dp_dpcd_write_byte(struct drm_dp_aux *aux,
+ unsigned int offset, u8 value)
+{
+ return drm_dp_dpcd_write_data(aux, offset, &value, 1);
+}
+
int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux,
u8 dpcd[DP_RECEIVER_CAP_SIZE]);
@@ -566,6 +659,8 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
enum drm_dp_phy dp_phy,
u8 link_status[DP_LINK_STATUS_SIZE]);
+int drm_dp_link_power_up(struct drm_dp_aux *aux, unsigned char revision);
+int drm_dp_link_power_down(struct drm_dp_aux *aux, unsigned char revision);
int drm_dp_dpcd_write_payload(struct drm_dp_aux *aux,
int vcpid, u8 start_time_slot, u8 time_slot_count);
@@ -881,5 +976,7 @@ int drm_dp_bw_channel_coding_efficiency(bool is_uhbr);
int drm_dp_max_dprx_data_rate(int max_link_rate, int max_lanes);
ssize_t drm_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, struct dp_sdp *sdp);
+int drm_dp_link_symbol_cycles(int lane_count, int pixels, int dsc_slice_count,
+ int bpp_x16, int symbol_size, bool is_mst);
#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/display/drm_hdmi_helper.h b/include/drm/display/drm_hdmi_helper.h
index 57e3b18c15ec..09145c9ee9fc 100644
--- a/include/drm/display/drm_hdmi_helper.h
+++ b/include/drm/display/drm_hdmi_helper.h
@@ -28,4 +28,10 @@ unsigned long long
drm_hdmi_compute_mode_clock(const struct drm_display_mode *mode,
unsigned int bpc, enum hdmi_colorspace fmt);
+void
+drm_hdmi_acr_get_n_cts(unsigned long long tmds_char_rate,
+ unsigned int sample_rate,
+ unsigned int *out_n,
+ unsigned int *out_cts);
+
#endif
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 4c673f0698fe..38636a593c9d 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -625,6 +625,9 @@ drm_atomic_get_old_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_connector *
drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_encoder *encoder);
+struct drm_connector *
+drm_atomic_get_connector_for_encoder(const struct drm_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx);
struct drm_crtc *
drm_atomic_get_old_crtc_for_encoder(struct drm_atomic_state *state,
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index d4c75d59fa12..4e418a29a9ff 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -73,7 +73,7 @@ struct drm_bridge_funcs {
*
* Zero on success, error code on failure.
*/
- int (*attach)(struct drm_bridge *bridge,
+ int (*attach)(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags);
/**
@@ -681,8 +681,10 @@ struct drm_bridge_funcs {
/**
* @hdmi_audio_startup:
*
- * Called when ASoC starts an audio stream setup. The
- * @hdmi_audio_startup() is optional.
+ * Called when ASoC starts an audio stream setup.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
*
* Returns:
* 0 on success, a negative error code otherwise
@@ -693,8 +695,10 @@ struct drm_bridge_funcs {
/**
* @hdmi_audio_prepare:
* Configures HDMI-encoder for audio stream. Can be called multiple
- * times for each setup. Mandatory if HDMI audio is enabled in the
- * bridge's configuration.
+ * times for each setup.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
*
* Returns:
* 0 on success, a negative error code otherwise
@@ -707,8 +711,10 @@ struct drm_bridge_funcs {
/**
* @hdmi_audio_shutdown:
*
- * Shut down the audio stream. Mandatory if HDMI audio is enabled in
- * the bridge's configuration.
+ * Shut down the audio stream.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
*
* Returns:
* 0 on success, a negative error code otherwise
@@ -719,8 +725,10 @@ struct drm_bridge_funcs {
/**
* @hdmi_audio_mute_stream:
*
- * Mute/unmute HDMI audio stream. The @hdmi_audio_mute_stream callback
- * is optional.
+ * Mute/unmute HDMI audio stream.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
*
* Returns:
* 0 on success, a negative error code otherwise
@@ -730,6 +738,65 @@ struct drm_bridge_funcs {
bool enable, int direction);
/**
+ * @dp_audio_startup:
+ *
+ * Called when ASoC starts a DisplayPort audio stream setup.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*dp_audio_startup)(struct drm_connector *connector,
+ struct drm_bridge *bridge);
+
+ /**
+ * @dp_audio_prepare:
+ * Configures DisplayPort audio stream. Can be called multiple
+ * times for each setup.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*dp_audio_prepare)(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
+
+ /**
+ * @dp_audio_shutdown:
+ *
+ * Shut down the DisplayPort audio stream.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ void (*dp_audio_shutdown)(struct drm_connector *connector,
+ struct drm_bridge *bridge);
+
+ /**
+ * @dp_audio_mute_stream:
+ *
+ * Mute/unmute DisplayPort audio stream.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*dp_audio_mute_stream)(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ bool enable, int direction);
+
+ /**
* @debugfs_init:
*
* Allows bridges to create bridge-specific debugfs files.
@@ -814,6 +881,32 @@ enum drm_bridge_ops {
* drivers.
*/
DRM_BRIDGE_OP_HDMI = BIT(4),
+ /**
+ * @DRM_BRIDGE_OP_HDMI_AUDIO: The bridge provides HDMI audio operations.
+ * Bridges that set this flag must implement the
+ * &drm_bridge_funcs->hdmi_audio_prepare and
+ * &drm_bridge_funcs->hdmi_audio_shutdown callbacks.
+ *
+ * Note: currently there can be at most one bridge in a chain that sets
+ * this bit. This is to simplify corresponding glue code in connector
+ * drivers. Also it is not possible to have a bridge in the chain that
+ * sets @DRM_BRIDGE_OP_DP_AUDIO if there is a bridge that sets this
+ * flag.
+ */
+ DRM_BRIDGE_OP_HDMI_AUDIO = BIT(5),
+ /**
+ * @DRM_BRIDGE_OP_DP_AUDIO: The bridge provides DisplayPort audio operations.
+ * Bridges that set this flag must implement the
+ * &drm_bridge_funcs->dp_audio_prepare and
+ * &drm_bridge_funcs->dp_audio_shutdown callbacks.
+ *
+ * Note: currently there can be at most one bridge in a chain that sets
+ * this bit. This is to simplify corresponding glue code in connector
+ * drivers. Also it is not possible to have a bridge in the chain that
+ * sets @DRM_BRIDGE_OP_HDMI_AUDIO if there is a bridge that sets this
+ * flag.
+ */
+ DRM_BRIDGE_OP_DP_AUDIO = BIT(6),
};
/**
@@ -840,6 +933,18 @@ struct drm_bridge {
const struct drm_bridge_timings *timings;
/** @funcs: control functions */
const struct drm_bridge_funcs *funcs;
+
+ /**
+ * @container: Pointer to the private driver struct embedding this
+ * @struct drm_bridge.
+ */
+ void *container;
+
+ /**
+ * @refcount: reference count of users referencing this bridge.
+ */
+ struct kref refcount;
+
/** @driver_private: pointer to the bridge driver's internal context */
void *driver_private;
/** @ops: bitmask of operations supported by the bridge */
@@ -914,23 +1019,28 @@ struct drm_bridge {
unsigned int max_bpc;
/**
- * @hdmi_audio_dev: device to be used as a parent for the HDMI Codec
+ * @hdmi_audio_dev: device to be used as a parent for the HDMI Codec if
+ * either of @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO is set.
*/
struct device *hdmi_audio_dev;
/**
* @hdmi_audio_max_i2s_playback_channels: maximum number of playback
- * I2S channels for the HDMI codec
+ * I2S channels for the @DRM_BRIDGE_OP_HDMI_AUDIO or
+ * @DRM_BRIDGE_OP_DP_AUDIO.
*/
int hdmi_audio_max_i2s_playback_channels;
/**
- * @hdmi_audio_spdif_playback: set if HDMI codec has S/PDIF playback port
+ * @hdmi_audio_spdif_playback: set if this bridge has S/PDIF playback
+ * port for @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO.
*/
unsigned int hdmi_audio_spdif_playback : 1;
/**
- * @hdmi_audio_dai_port: sound DAI port, -1 if it is not enabled
+ * @hdmi_audio_dai_port: sound DAI port for either of
+ * @DRM_BRIDGE_OP_HDMI_AUDIO and @DRM_BRIDGE_OP_DP_AUDIO, -1 if it is
+ * not used.
*/
int hdmi_audio_dai_port;
};
@@ -941,6 +1051,30 @@ drm_priv_to_bridge(struct drm_private_obj *priv)
return container_of(priv, struct drm_bridge, base);
}
+struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge);
+void drm_bridge_put(struct drm_bridge *bridge);
+
+void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_bridge_funcs *funcs);
+
+/**
+ * devm_drm_bridge_alloc - Allocate and initialize a bridge
+ * @dev: struct device of the bridge device
+ * @type: the type of the struct which contains struct &drm_bridge
+ * @member: the name of the &drm_bridge within @type
+ * @funcs: callbacks for this bridge
+ *
+ * The reference count of the returned bridge is initialized to 1. This
+ * reference will be automatically dropped via devm (by calling
+ * drm_bridge_put()) when @dev is removed.
+ *
+ * Returns:
+ * Pointer to new bridge, or ERR_PTR on failure.
+ */
+#define devm_drm_bridge_alloc(dev, type, member, funcs) \
+ ((type *)__devm_drm_bridge_alloc(dev, sizeof(type), \
+ offsetof(type, member), funcs))
+
void drm_bridge_add(struct drm_bridge *bridge);
int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge);
void drm_bridge_remove(struct drm_bridge *bridge);
@@ -958,6 +1092,38 @@ static inline struct drm_bridge *of_drm_find_bridge(struct device_node *np)
#endif
/**
+ * drm_bridge_get_current_state() - Get the current bridge state
+ * @bridge: bridge object
+ *
+ * This function must be called with the modeset lock held.
+ *
+ * RETURNS:
+ *
+ * The current bridge state, or NULL if there is none.
+ */
+static inline struct drm_bridge_state *
+drm_bridge_get_current_state(struct drm_bridge *bridge)
+{
+ if (!bridge)
+ return NULL;
+
+ /*
+ * Only atomic bridges will have bridge->base initialized by
+ * drm_atomic_private_obj_init(), so we need to make sure we're
+ * working with one before we try to use the lock.
+ */
+ if (!bridge->funcs || !bridge->funcs->atomic_reset)
+ return NULL;
+
+ drm_modeset_lock_assert_held(&bridge->base.lock);
+
+ if (!bridge->base.state)
+ return NULL;
+
+ return drm_priv_to_bridge_state(bridge->base.state);
+}
+
+/**
* drm_bridge_get_next_bridge() - Get the next bridge in the chain
* @bridge: bridge object
*
@@ -1108,4 +1274,7 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm,
}
#endif
+void drm_bridge_debugfs_params(struct dentry *root);
+void drm_bridge_debugfs_encoder_params(struct dentry *root, struct drm_encoder *encoder);
+
#endif
diff --git a/include/drm/drm_bridge_helper.h b/include/drm/drm_bridge_helper.h
new file mode 100644
index 000000000000..6c35b479ec2a
--- /dev/null
+++ b/include/drm/drm_bridge_helper.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __DRM_BRIDGE_HELPER_H_
+#define __DRM_BRIDGE_HELPER_H_
+
+struct drm_bridge;
+struct drm_modeset_acquire_ctx;
+
+int drm_bridge_helper_reset_crtc(struct drm_bridge *bridge,
+ struct drm_modeset_acquire_ctx *ctx);
+
+#endif // __DRM_BRIDGE_HELPER_H_
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 6ea54a578cda..e2f894f1b90a 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -65,6 +65,28 @@ struct drm_device {
struct device *dev;
/**
+ * @dma_dev:
+ *
+ * Device for DMA operations. Only required if the device @dev
+ * cannot perform DMA by itself. Should be NULL otherwise. Call
+ * drm_dev_dma_dev() to get the DMA device instead of using this
+ * field directly. Call drm_dev_set_dma_dev() to set this field.
+ *
+ * DRM devices are sometimes bound to virtual devices that cannot
+ * perform DMA by themselves. Drivers should set this field to the
+ * respective DMA controller.
+ *
+ * Devices on USB and other peripheral busses also cannot perform
+ * DMA by themselves. The @dma_dev field should point the bus
+ * controller that does DMA on behalve of such a device. Required
+ * for importing buffers via dma-buf.
+ *
+ * If set, the DRM core automatically releases the reference on the
+ * device.
+ */
+ struct device *dma_dev;
+
+ /**
* @managed:
*
* Managed resources linked to the lifetime of this &drm_device as
@@ -327,4 +349,23 @@ struct drm_device {
struct dentry *debugfs_root;
};
+void drm_dev_set_dma_dev(struct drm_device *dev, struct device *dma_dev);
+
+/**
+ * drm_dev_dma_dev - returns the DMA device for a DRM device
+ * @dev: DRM device
+ *
+ * Returns the DMA device of the given DRM device. By default, this
+ * the DRM device's parent. See drm_dev_set_dma_dev().
+ *
+ * Returns:
+ * A DMA-capable device for the DRM device.
+ */
+static inline struct device *drm_dev_dma_dev(struct drm_device *dev)
+{
+ if (dev->dma_dev)
+ return dev->dma_dev;
+ return dev->dev;
+}
+
#endif
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index a43d707b5f36..63b51942d606 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -473,6 +473,11 @@ drmm_cgroup_register_region(struct drm_device *dev,
struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
struct device *parent);
+
+void *__drm_dev_alloc(struct device *parent,
+ const struct drm_driver *driver,
+ size_t size, size_t offset);
+
int drm_dev_register(struct drm_device *dev, unsigned long flags);
void drm_dev_unregister(struct drm_device *dev);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index eaac5e665892..b38409670868 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -437,7 +437,7 @@ bool drm_detect_monitor_audio(const struct edid *edid);
enum hdmi_quantization_range
drm_default_rgb_quant_range(const struct drm_display_mode *mode);
int drm_add_modes_noedid(struct drm_connector *connector,
- int hdisplay, int vdisplay);
+ unsigned int hdisplay, unsigned int vdisplay);
int drm_edid_header_is_valid(const void *edid);
bool drm_edid_is_valid(struct edid *edid);
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 94d365b22505..5c3b2aa3e69d 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -446,6 +446,9 @@ static inline bool drm_is_accel_client(const struct drm_file *file_priv)
return file_priv->minor->type == DRM_MINOR_ACCEL;
}
+__printf(2, 3)
+void drm_file_err(struct drm_file *file_priv, const char *fmt, ...);
+
void drm_file_update_pid(struct drm_file *);
struct drm_minor *drm_minor_acquire(struct xarray *minors_xa, unsigned int minor_id);
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index bcd54020d6ba..a3133a08267c 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -159,7 +159,8 @@ struct drm_gem_object_funcs {
* @vmap:
*
* Returns a virtual address for the buffer. Used by the
- * drm_gem_dmabuf_vmap() helper.
+ * drm_gem_dmabuf_vmap() helper. Called with a held GEM reservation
+ * lock.
*
* This callback is optional.
*/
@@ -169,7 +170,8 @@ struct drm_gem_object_funcs {
* @vunmap:
*
* Releases the address previously returned by @vmap. Used by the
- * drm_gem_dmabuf_vunmap() helper.
+ * drm_gem_dmabuf_vunmap() helper. Called with a held GEM reservation
+ * lock.
*
* This callback is optional.
*/
@@ -192,7 +194,8 @@ struct drm_gem_object_funcs {
* @evict:
*
* Evicts gem object out from memory. Used by the drm_gem_object_evict()
- * helper. Returns 0 on success, -errno otherwise.
+ * helper. Returns 0 on success, -errno otherwise. Called with a held
+ * GEM reservation lock.
*
* This callback is optional.
*/
@@ -537,8 +540,8 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
void drm_gem_lock(struct drm_gem_object *obj);
void drm_gem_unlock(struct drm_gem_object *obj);
-int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
-void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
+int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
+void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
int count, struct drm_gem_object ***objs_out);
@@ -561,7 +564,7 @@ unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
unsigned long *remaining,
bool (*shrink)(struct drm_gem_object *obj));
-int drm_gem_evict(struct drm_gem_object *obj);
+int drm_gem_evict_locked(struct drm_gem_object *obj);
/**
* drm_gem_object_is_shared_for_memory_stats - helper for shared memory stats
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index cef5a6b5a4d6..b4f993da3cae 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -37,7 +37,18 @@ struct drm_gem_shmem_object {
* Reference count on the pages table.
* The pages are put when the count reaches zero.
*/
- unsigned int pages_use_count;
+ refcount_t pages_use_count;
+
+ /**
+ * @pages_pin_count:
+ *
+ * Reference count on the pinned pages table.
+ *
+ * Pages are hard-pinned and reside in memory if count
+ * greater than zero. Otherwise, when count is zero, the pages are
+ * allowed to be evicted and purged by memory shrinker.
+ */
+ refcount_t pages_pin_count;
/**
* @madv: State for madvise
@@ -71,7 +82,7 @@ struct drm_gem_shmem_object {
* Reference count on the virtual address.
* The address are un-mapped when the count reaches zero.
*/
- unsigned int vmap_use_count;
+ refcount_t vmap_use_count;
/**
* @pages_mark_dirty_on_put:
@@ -102,28 +113,28 @@ struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *de
struct vfsmount *gemfs);
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
-void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem);
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
-int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map);
-void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map);
+int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map);
+void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map);
int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma);
int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem);
-int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv);
+int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv);
static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
{
return (shmem->madv > 0) &&
- !shmem->vmap_use_count && shmem->sgt &&
+ !refcount_read(&shmem->pages_pin_count) && shmem->sgt &&
!shmem->base.dma_buf && !drm_gem_is_imported(&shmem->base);
}
-void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem);
@@ -214,12 +225,12 @@ static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_
}
/*
- * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap()
+ * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap_locked()
* @obj: GEM object
* @map: Returns the kernel virtual address of the SHMEM GEM object's backing store.
*
- * This function wraps drm_gem_shmem_vmap(). Drivers that employ the shmem helpers should
- * use it as their &drm_gem_object_funcs.vmap handler.
+ * This function wraps drm_gem_shmem_vmap_locked(). Drivers that employ the shmem
+ * helpers should use it as their &drm_gem_object_funcs.vmap handler.
*
* Returns:
* 0 on success or a negative error code on failure.
@@ -229,7 +240,7 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj,
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- return drm_gem_shmem_vmap(shmem, map);
+ return drm_gem_shmem_vmap_locked(shmem, map);
}
/*
@@ -237,15 +248,15 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj,
* @obj: GEM object
* @map: Kernel virtual address where the SHMEM GEM object was mapped
*
- * This function wraps drm_gem_shmem_vunmap(). Drivers that employ the shmem helpers should
- * use it as their &drm_gem_object_funcs.vunmap handler.
+ * This function wraps drm_gem_shmem_vunmap_locked(). Drivers that employ the shmem
+ * helpers should use it as their &drm_gem_object_funcs.vunmap handler.
*/
static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj,
struct iosys_map *map)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- drm_gem_shmem_vunmap(shmem, map);
+ drm_gem_shmem_vunmap_locked(shmem, map);
}
/**
diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
index 1c62d1d4458c..4948379237e9 100644
--- a/include/drm/drm_kunit_helpers.h
+++ b/include/drm/drm_kunit_helpers.h
@@ -9,6 +9,7 @@
#include <kunit/test.h>
+struct drm_connector;
struct drm_crtc_funcs;
struct drm_crtc_helper_funcs;
struct drm_device;
@@ -118,6 +119,13 @@ drm_kunit_helper_create_crtc(struct kunit *test,
const struct drm_crtc_funcs *funcs,
const struct drm_crtc_helper_funcs *helper_funcs);
+int drm_kunit_helper_enable_crtc_connector(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_crtc *crtc,
+ struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ struct drm_modeset_acquire_ctx *ctx);
+
int drm_kunit_add_mode_destroy_action(struct kunit *test,
struct drm_display_mode *mode);
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index bd40a443385c..b37860f4a895 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -293,6 +293,7 @@ void mipi_dsi_generic_write_multi(struct mipi_dsi_multi_context *ctx,
const void *payload, size_t size);
ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
size_t num_params, void *data, size_t size);
+u32 drm_mipi_dsi_get_input_bus_fmt(enum mipi_dsi_pixel_format dsi_format);
#define mipi_dsi_msleep(ctx, delay) \
do { \
@@ -417,28 +418,6 @@ void mipi_dsi_dcs_set_tear_off_multi(struct mipi_dsi_multi_context *ctx);
} while (0)
/**
- * mipi_dsi_dcs_write_seq - transmit a DCS command with payload
- *
- * This macro will print errors for you and will RETURN FROM THE CALLING
- * FUNCTION (yes this is non-intuitive) upon error.
- *
- * Because of the non-intuitive return behavior, THIS MACRO IS DEPRECATED.
- * Please replace calls of it with mipi_dsi_dcs_write_seq_multi().
- *
- * @dsi: DSI peripheral device
- * @cmd: Command
- * @seq: buffer containing data to be transmitted
- */
-#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...) \
- do { \
- static const u8 d[] = { cmd, seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer_chatty(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
-/**
* mipi_dsi_dcs_write_seq_multi - transmit a DCS command with payload
*
* This macro will print errors for you and error handling is optimized for
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 271765e2e9f2..9e524b51a001 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -532,8 +532,8 @@ struct drm_mode_config {
*/
struct list_head privobj_list;
- int min_width, min_height;
- int max_width, max_height;
+ unsigned int min_width, min_height;
+ unsigned int max_width, max_height;
const struct drm_mode_config_funcs *funcs;
/* output poll support */
@@ -937,6 +937,12 @@ struct drm_mode_config {
struct drm_property *modifiers_property;
/**
+ * @async_modifiers_property: Plane property to list support modifier/format
+ * combination for asynchronous flips.
+ */
+ struct drm_property *async_modifiers_property;
+
+ /**
* @size_hints_property: Plane SIZE_HINTS property.
*/
struct drm_property *size_hints_property;
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index a9c042c8dea1..843fb756a295 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -28,6 +28,7 @@
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/kref.h>
struct backlight_device;
struct dentry;
@@ -266,20 +267,60 @@ struct drm_panel {
* If true then the panel has been enabled.
*/
bool enabled;
+
+ /**
+ * @container: Pointer to the private driver struct embedding this
+ * @struct drm_panel.
+ */
+ void *container;
+
+ /**
+ * @refcount: reference count of users referencing this panel.
+ */
+ struct kref refcount;
};
+void *__devm_drm_panel_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_panel_funcs *funcs,
+ int connector_type);
+
+/**
+ * devm_drm_panel_alloc - Allocate and initialize a refcounted panel.
+ *
+ * @dev: struct device of the panel device
+ * @type: the type of the struct which contains struct &drm_panel
+ * @member: the name of the &drm_panel within @type
+ * @funcs: callbacks for this panel
+ * @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to
+ * the panel interface
+ *
+ * The reference count of the returned panel is initialized to 1. This
+ * reference will be automatically dropped via devm (by calling
+ * drm_panel_put()) when @dev is removed.
+ *
+ * Returns:
+ * Pointer to container structure embedding the panel, ERR_PTR on failure.
+ */
+#define devm_drm_panel_alloc(dev, type, member, funcs, connector_type) \
+ ((type *)__devm_drm_panel_alloc(dev, sizeof(type), \
+ offsetof(type, member), funcs, \
+ connector_type))
+
void drm_panel_init(struct drm_panel *panel, struct device *dev,
const struct drm_panel_funcs *funcs,
int connector_type);
+struct drm_panel *drm_panel_get(struct drm_panel *panel);
+void drm_panel_put(struct drm_panel *panel);
+
void drm_panel_add(struct drm_panel *panel);
void drm_panel_remove(struct drm_panel *panel);
-int drm_panel_prepare(struct drm_panel *panel);
-int drm_panel_unprepare(struct drm_panel *panel);
+void drm_panel_prepare(struct drm_panel *panel);
+void drm_panel_unprepare(struct drm_panel *panel);
-int drm_panel_enable(struct drm_panel *panel);
-int drm_panel_disable(struct drm_panel *panel);
+void drm_panel_enable(struct drm_panel *panel);
+void drm_panel_disable(struct drm_panel *panel);
int drm_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector);
diff --git a/include/drm/drm_panic.h b/include/drm/drm_panic.h
index ff78d00c3da5..310c88c4d336 100644
--- a/include/drm/drm_panic.h
+++ b/include/drm/drm_panic.h
@@ -40,6 +40,16 @@ struct drm_scanout_buffer {
struct iosys_map map[DRM_FORMAT_MAX_PLANES];
/**
+ * @pages: Optional, if the scanout buffer is not mapped, set this field
+ * to the array of pages of the scanout buffer. The panic code will use
+ * kmap_local_page_try_from_panic() to map one page at a time to write
+ * all the pixels. This array shouldn't be allocated from the
+ * get_scanoutbuffer() callback.
+ * The scanout buffer should be in linear format.
+ */
+ struct page **pages;
+
+ /**
* @width: Width of the scanout buffer, in pixels.
*/
unsigned int width;
@@ -57,7 +67,7 @@ struct drm_scanout_buffer {
/**
* @set_pixel: Optional function, to set a pixel color on the
* framebuffer. It allows to handle special tiling format inside the
- * driver.
+ * driver. It takes precedence over the @map and @pages fields.
*/
void (*set_pixel)(struct drm_scanout_buffer *sb, unsigned int x,
unsigned int y, u32 color);
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index dd718c62ac31..01479dd94e76 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -549,6 +549,23 @@ struct drm_plane_funcs {
*/
bool (*format_mod_supported)(struct drm_plane *plane, uint32_t format,
uint64_t modifier);
+ /**
+ * @format_mod_supported_async:
+ *
+ * This optional hook is used for the DRM to determine if for
+ * asynchronous flip the given format/modifier combination is valid for
+ * the plane. This allows the DRM to generate the correct format
+ * bitmask (which formats apply to which modifier), and to validate
+ * modifiers at atomic_check time.
+ *
+ * Returns:
+ *
+ * True if the given modifier is valid for that format on the plane.
+ * False otherwise.
+ */
+ bool (*format_mod_supported_async)(struct drm_plane *plane,
+ u32 format, u64 modifier);
+
};
/**
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index f31eba1c7cab..ab017b05e175 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -345,6 +345,26 @@ drm_coredump_printer(struct drm_print_iterator *iter)
}
/**
+ * drm_coredump_printer_is_full() - DRM coredump printer output is full
+ * @p: DRM coredump printer
+ *
+ * DRM printer output is full, useful to short circuit coredump printing once
+ * printer is full.
+ *
+ * RETURNS:
+ * True if DRM coredump printer output buffer is full, False otherwise
+ */
+static inline bool drm_coredump_printer_is_full(struct drm_printer *p)
+{
+ struct drm_print_iterator *iterator = p->arg;
+
+ if (p->printfn != __drm_printfn_coredump)
+ return true;
+
+ return !iterator->remain;
+}
+
+/**
* drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file
* @f: the &struct seq_file to output to
*
diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h
index d6ce7b218b77..840ae5f798c2 100644
--- a/include/drm/drm_probe_helper.h
+++ b/include/drm/drm_probe_helper.h
@@ -17,7 +17,7 @@ int drm_helper_probe_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force);
-int drmm_kms_helper_poll_init(struct drm_device *dev);
+void drmm_kms_helper_poll_init(struct drm_device *dev);
void drm_kms_helper_poll_init(struct drm_device *dev);
void drm_kms_helper_poll_fini(struct drm_device *dev);
bool drm_helper_hpd_irq_event(struct drm_device *dev);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 50928a7ae98e..1a7e377d4cbb 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -383,8 +383,15 @@ struct drm_sched_job {
struct xarray dependencies;
};
+/**
+ * enum drm_gpu_sched_stat - the scheduler's status
+ *
+ * @DRM_GPU_SCHED_STAT_NONE: Reserved. Do not use.
+ * @DRM_GPU_SCHED_STAT_NOMINAL: Operation succeeded.
+ * @DRM_GPU_SCHED_STAT_ENODEV: Error: Device is not available anymore.
+ */
enum drm_gpu_sched_stat {
- DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
+ DRM_GPU_SCHED_STAT_NONE,
DRM_GPU_SCHED_STAT_NOMINAL,
DRM_GPU_SCHED_STAT_ENODEV,
};
@@ -410,10 +417,36 @@ struct drm_sched_backend_ops {
struct drm_sched_entity *s_entity);
/**
- * @run_job: Called to execute the job once all of the dependencies
- * have been resolved. This may be called multiple times, if
- * timedout_job() has happened and drm_sched_job_recovery()
- * decides to try it again.
+ * @run_job: Called to execute the job once all of the dependencies
+ * have been resolved.
+ *
+ * @sched_job: the job to run
+ *
+ * The deprecated drm_sched_resubmit_jobs() (called by &struct
+ * drm_sched_backend_ops.timedout_job) can invoke this again with the
+ * same parameters. Using this is discouraged because it violates
+ * dma_fence rules, notably dma_fence_init() has to be called on
+ * already initialized fences for a second time. Moreover, this is
+ * dangerous because attempts to allocate memory might deadlock with
+ * memory management code waiting for the reset to complete.
+ *
+ * TODO: Document what drivers should do / use instead.
+ *
+ * This method is called in a workqueue context - either from the
+ * submit_wq the driver passed through drm_sched_init(), or, if the
+ * driver passed NULL, a separate, ordered workqueue the scheduler
+ * allocated.
+ *
+ * Note that the scheduler expects to 'inherit' its own reference to
+ * this fence from the callback. It does not invoke an extra
+ * dma_fence_get() on it. Consequently, this callback must take a
+ * reference for the scheduler, and additional ones for the driver's
+ * respective needs.
+ *
+ * Return:
+ * * On success: dma_fence the driver must signal once the hardware has
+ * completed the job ("hardware fence").
+ * * On failure: NULL or an ERR_PTR.
*/
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
@@ -421,43 +454,52 @@ struct drm_sched_backend_ops {
* @timedout_job: Called when a job has taken too long to execute,
* to trigger GPU recovery.
*
- * This method is called in a workqueue context.
+ * @sched_job: The job that has timed out
+ *
+ * Drivers typically issue a reset to recover from GPU hangs.
+ * This procedure looks very different depending on whether a firmware
+ * or a hardware scheduler is being used.
+ *
+ * For a FIRMWARE SCHEDULER, each ring has one scheduler, and each
+ * scheduler has one entity. Hence, the steps taken typically look as
+ * follows:
*
- * Drivers typically issue a reset to recover from GPU hangs, and this
- * procedure usually follows the following workflow:
+ * 1. Stop the scheduler using drm_sched_stop(). This will pause the
+ * scheduler workqueues and cancel the timeout work, guaranteeing
+ * that nothing is queued while the ring is being removed.
+ * 2. Remove the ring. The firmware will make sure that the
+ * corresponding parts of the hardware are resetted, and that other
+ * rings are not impacted.
+ * 3. Kill the entity and the associated scheduler.
*
- * 1. Stop the scheduler using drm_sched_stop(). This will park the
- * scheduler thread and cancel the timeout work, guaranteeing that
- * nothing is queued while we reset the hardware queue
- * 2. Try to gracefully stop non-faulty jobs (optional)
- * 3. Issue a GPU reset (driver-specific)
- * 4. Re-submit jobs using drm_sched_resubmit_jobs()
- * 5. Restart the scheduler using drm_sched_start(). At that point, new
- * jobs can be queued, and the scheduler thread is unblocked
+ *
+ * For a HARDWARE SCHEDULER, a scheduler instance schedules jobs from
+ * one or more entities to one ring. This implies that all entities
+ * associated with the affected scheduler cannot be torn down, because
+ * this would effectively also affect innocent userspace processes which
+ * did not submit faulty jobs (for example).
+ *
+ * Consequently, the procedure to recover with a hardware scheduler
+ * should look like this:
+ *
+ * 1. Stop all schedulers impacted by the reset using drm_sched_stop().
+ * 2. Kill the entity the faulty job stems from.
+ * 3. Issue a GPU reset on all faulty rings (driver-specific).
+ * 4. Re-submit jobs on all schedulers impacted by re-submitting them to
+ * the entities which are still alive.
+ * 5. Restart all schedulers that were stopped in step #1 using
+ * drm_sched_start().
*
* Note that some GPUs have distinct hardware queues but need to reset
* the GPU globally, which requires extra synchronization between the
- * timeout handler of the different &drm_gpu_scheduler. One way to
- * achieve this synchronization is to create an ordered workqueue
- * (using alloc_ordered_workqueue()) at the driver level, and pass this
- * queue to drm_sched_init(), to guarantee that timeout handlers are
- * executed sequentially. The above workflow needs to be slightly
- * adjusted in that case:
- *
- * 1. Stop all schedulers impacted by the reset using drm_sched_stop()
- * 2. Try to gracefully stop non-faulty jobs on all queues impacted by
- * the reset (optional)
- * 3. Issue a GPU reset on all faulty queues (driver-specific)
- * 4. Re-submit jobs on all schedulers impacted by the reset using
- * drm_sched_resubmit_jobs()
- * 5. Restart all schedulers that were stopped in step #1 using
- * drm_sched_start()
+ * timeout handlers of different schedulers. One way to achieve this
+ * synchronization is to create an ordered workqueue (using
+ * alloc_ordered_workqueue()) at the driver level, and pass this queue
+ * as drm_sched_init()'s @timeout_wq parameter. This will guarantee
+ * that timeout handlers are executed sequentially.
*
- * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
- * and the underlying driver has started or completed recovery.
+ * Return: The scheduler's status, defined by &enum drm_gpu_sched_stat
*
- * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
- * available, i.e. has been unplugged.
*/
enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
diff --git a/include/drm/intel/intel-gtt.h b/include/drm/intel/intel-gtt.h
index cb0d5b7200c7..f53bcff01f22 100644
--- a/include/drm/intel/intel-gtt.h
+++ b/include/drm/intel/intel-gtt.h
@@ -28,6 +28,8 @@ void intel_gmch_gtt_insert_sg_entries(struct sg_table *st,
unsigned int pg_start,
unsigned int flags);
void intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
+dma_addr_t intel_gmch_gtt_read_entry(unsigned int pg,
+ bool *is_present, bool *is_local);
/* Special gtt memory types */
#define AGP_DCACHE_MEMORY 1
diff --git a/include/drm/intel/pciids.h b/include/drm/intel/pciids.h
index d212848d07f3..a7ce9523c50d 100644
--- a/include/drm/intel/pciids.h
+++ b/include/drm/intel/pciids.h
@@ -861,6 +861,10 @@
MACRO__(0xB081, ## __VA_ARGS__), \
MACRO__(0xB082, ## __VA_ARGS__), \
MACRO__(0xB083, ## __VA_ARGS__), \
+ MACRO__(0xB084, ## __VA_ARGS__), \
+ MACRO__(0xB085, ## __VA_ARGS__), \
+ MACRO__(0xB086, ## __VA_ARGS__), \
+ MACRO__(0xB087, ## __VA_ARGS__), \
MACRO__(0xB08F, ## __VA_ARGS__), \
MACRO__(0xB090, ## __VA_ARGS__), \
MACRO__(0xB0A0, ## __VA_ARGS__), \
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index 903cd1030110..cf027558b6db 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -172,7 +172,6 @@ struct ttm_bo_kmap_obj {
* @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages.
* @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple
* BOs share the same reservation object.
- * @force_alloc: Don't check the memory account during suspend or CPU page
* faults. Should only be used by TTM internally.
* @resv: Reservation object to allow reserved evictions with.
* @bytes_moved: Statistics on how many bytes have been moved.
@@ -185,7 +184,6 @@ struct ttm_operation_ctx {
bool no_wait_gpu;
bool gfp_retry_mayfail;
bool allow_res_evict;
- bool force_alloc;
struct dma_resv *resv;
uint64_t bytes_moved;
};
diff --git a/include/dt-bindings/power/mediatek,mt6893-power.h b/include/dt-bindings/power/mediatek,mt6893-power.h
new file mode 100644
index 000000000000..aeab51bb2ad8
--- /dev/null
+++ b/include/dt-bindings/power/mediatek,mt6893-power.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2025 Collabora Ltd
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT6893_POWER_H
+#define _DT_BINDINGS_POWER_MT6893_POWER_H
+
+#define MT6893_POWER_DOMAIN_CONN 0
+#define MT6893_POWER_DOMAIN_MFG0 1
+#define MT6893_POWER_DOMAIN_MFG1 2
+#define MT6893_POWER_DOMAIN_MFG2 3
+#define MT6893_POWER_DOMAIN_MFG3 4
+#define MT6893_POWER_DOMAIN_MFG4 5
+#define MT6893_POWER_DOMAIN_MFG5 6
+#define MT6893_POWER_DOMAIN_MFG6 7
+#define MT6893_POWER_DOMAIN_ISP 8
+#define MT6893_POWER_DOMAIN_ISP2 9
+#define MT6893_POWER_DOMAIN_IPE 10
+#define MT6893_POWER_DOMAIN_VDEC0 11
+#define MT6893_POWER_DOMAIN_VDEC1 12
+#define MT6893_POWER_DOMAIN_VENC0 13
+#define MT6893_POWER_DOMAIN_VENC1 14
+#define MT6893_POWER_DOMAIN_MDP 15
+#define MT6893_POWER_DOMAIN_DISP 16
+#define MT6893_POWER_DOMAIN_AUDIO 17
+#define MT6893_POWER_DOMAIN_ADSP 18
+#define MT6893_POWER_DOMAIN_CAM 19
+#define MT6893_POWER_DOMAIN_CAM_RAWA 20
+#define MT6893_POWER_DOMAIN_CAM_RAWB 21
+#define MT6893_POWER_DOMAIN_CAM_RAWC 22
+#define MT6893_POWER_DOMAIN_DP_TX 23
+
+#endif /* _DT_BINDINGS_POWER_MT6893_POWER_H */
diff --git a/include/dt-bindings/power/rockchip,rk3562-power.h b/include/dt-bindings/power/rockchip,rk3562-power.h
new file mode 100644
index 000000000000..5182c2427a55
--- /dev/null
+++ b/include/dt-bindings/power/rockchip,rk3562-power.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022-2024 Rockchip Electronics Co., Ltd.
+ */
+#ifndef __DT_BINDINGS_POWER_RK3562_POWER_H__
+#define __DT_BINDINGS_POWER_RK3562_POWER_H__
+
+/* VD_CORE */
+#define RK3562_PD_CPU_0 0
+#define RK3562_PD_CPU_1 1
+#define RK3562_PD_CPU_2 2
+#define RK3562_PD_CPU_3 3
+#define RK3562_PD_CORE_ALIVE 4
+
+/* VD_PMU */
+#define RK3562_PD_PMU 5
+#define RK3562_PD_PMU_ALIVE 6
+
+/* VD_NPU */
+#define RK3562_PD_NPU 7
+
+/* VD_GPU */
+#define RK3562_PD_GPU 8
+
+/* VD_LOGIC */
+#define RK3562_PD_DDR 9
+#define RK3562_PD_VEPU 10
+#define RK3562_PD_VDPU 11
+#define RK3562_PD_VI 12
+#define RK3562_PD_VO 13
+#define RK3562_PD_RGA 14
+#define RK3562_PD_PHP 15
+#define RK3562_PD_LOGIC_ALIVE 16
+
+#endif
diff --git a/include/dt-bindings/sound/cs48l32.h b/include/dt-bindings/sound/cs48l32.h
new file mode 100644
index 000000000000..4e82260fff67
--- /dev/null
+++ b/include/dt-bindings/sound/cs48l32.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Device Tree defines for CS48L32 DSP.
+ *
+ * Copyright (C) 2016-2018, 2022, 2025 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef DT_BINDINGS_SOUND_CS48L32_H
+#define DT_BINDINGS_SOUND_CS48L32_H
+
+/* Values for cirrus,in-type */
+#define CS48L32_IN_TYPE_DIFF 0
+#define CS48L32_IN_TYPE_SE 1
+
+/* Values for cirrus,pdm-sup */
+#define CS48L32_PDM_SUP_VOUT_MIC 0
+#define CS48L32_PDM_SUP_MICBIAS1 1
+
+#endif
diff --git a/include/hyperv/hvgdk_mini.h b/include/hyperv/hvgdk_mini.h
index abf0bd76e370..68606fa5fe73 100644
--- a/include/hyperv/hvgdk_mini.h
+++ b/include/hyperv/hvgdk_mini.h
@@ -1013,7 +1013,7 @@ enum hv_register_name {
/*
* To support arch-generic code calling hv_set/get_register:
- * - On x86, HV_MSR_ indicates an MSR accessed via rdmsrl/wrmsrl
+ * - On x86, HV_MSR_ indicates an MSR accessed via rdmsrq/wrmsrq
* - On ARM, HV_MSR_ indicates a VP register accessed via hypercall
*/
#define HV_MSR_CRASH_P0 (HV_X64_MSR_CRASH_P0)
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 3f2e93ed9730..f4b3d442b7df 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -772,6 +772,10 @@ int acpi_get_local_u64_address(acpi_handle handle, u64 *addr);
int acpi_get_local_address(acpi_handle handle, u32 *addr);
const char *acpi_get_subsystem_id(acpi_handle handle);
+#ifdef CONFIG_ACPI_MRRM
+int acpi_mrrm_max_mem_region(void);
+#endif
+
#else /* !CONFIG_ACPI */
#define acpi_disabled 1
@@ -1092,6 +1096,11 @@ static inline acpi_handle acpi_get_processor_handle(int cpu)
return NULL;
}
+static inline int acpi_mrrm_max_mem_region(void)
+{
+ return 1;
+}
+
#endif /* !CONFIG_ACPI */
#ifdef CONFIG_ACPI_HMAT
@@ -1125,13 +1134,13 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
u32 val_a, u32 val_b);
-#if defined(CONFIG_SUSPEND) && defined(CONFIG_X86)
struct acpi_s2idle_dev_ops {
struct list_head list_node;
void (*prepare)(void);
void (*check)(void);
void (*restore)(void);
};
+#if defined(CONFIG_SUSPEND) && defined(CONFIG_X86)
int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg);
void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg);
int acpi_get_lps0_constraint(struct acpi_device *adev);
@@ -1140,6 +1149,13 @@ static inline int acpi_get_lps0_constraint(struct device *dev)
{
return ACPI_STATE_UNKNOWN;
}
+static inline int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg)
+{
+ return -ENODEV;
+}
+static inline void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg)
+{
+}
#endif /* CONFIG_SUSPEND && CONFIG_X86 */
void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
#else
diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
index a946e0203e6d..8f7931eb7d16 100644
--- a/include/linux/alloc_tag.h
+++ b/include/linux/alloc_tag.h
@@ -104,6 +104,16 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
#else /* ARCH_NEEDS_WEAK_PER_CPU */
+#ifdef MODULE
+
+#define DEFINE_ALLOC_TAG(_alloc_tag) \
+ static struct alloc_tag _alloc_tag __used __aligned(8) \
+ __section(ALLOC_TAG_SECTION_NAME) = { \
+ .ct = CODE_TAG_INIT, \
+ .counters = NULL };
+
+#else /* MODULE */
+
#define DEFINE_ALLOC_TAG(_alloc_tag) \
static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \
static struct alloc_tag _alloc_tag __used __aligned(8) \
@@ -111,6 +121,8 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
.ct = CODE_TAG_INIT, \
.counters = &_alloc_tag_cntr };
+#endif /* MODULE */
+
#endif /* ARCH_NEEDS_WEAK_PER_CPU */
DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 2222e8b03ff4..d72d6e5aa200 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -14,14 +14,6 @@ int topology_update_cpu_topology(void);
struct device_node;
bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
-DECLARE_PER_CPU(unsigned long, cpu_scale);
-
-static inline unsigned long topology_get_cpu_scale(int cpu)
-{
- return per_cpu(cpu_scale, cpu);
-}
-
-void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
DECLARE_PER_CPU(unsigned long, capacity_freq_ref);
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 1625c8529e70..65abd5ab8836 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -90,7 +90,6 @@ struct linux_binfmt {
struct list_head lh;
struct module *module;
int (*load_binary)(struct linux_binprm *);
- int (*load_shlib)(struct file *);
#ifdef CONFIG_COREDUMP
int (*core_dump)(struct coredump_params *cprm);
unsigned long min_coredump; /* minimal dump size */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index b786ec5bcc81..9c37c66ef9ca 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -403,7 +403,6 @@ static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
struct request_queue;
-extern int submit_bio_wait(struct bio *bio);
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
unsigned short max_vecs, blk_opf_t opf);
extern void bio_uninit(struct bio *);
@@ -418,6 +417,30 @@ void __bio_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int off);
void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
size_t off);
+void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len);
+
+/**
+ * bio_add_max_vecs - number of bio_vecs needed to add data to a bio
+ * @kaddr: kernel virtual address to add
+ * @len: length in bytes to add
+ *
+ * Calculate how many bio_vecs need to be allocated to add the kernel virtual
+ * address range in [@kaddr:@len] in the worse case.
+ */
+static inline unsigned int bio_add_max_vecs(void *kaddr, unsigned int len)
+{
+ if (is_vmalloc_addr(kaddr))
+ return DIV_ROUND_UP(offset_in_page(kaddr) + len, PAGE_SIZE);
+ return 1;
+}
+
+unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len);
+bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len);
+
+int submit_bio_wait(struct bio *bio);
+int bdev_rw_virt(struct block_device *bdev, sector_t sector, void *data,
+ size_t len, enum req_op op);
+
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter);
void __bio_release_pages(struct bio *bio, bool mark_dirty);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 8eb9b3310167..de8c85a03bb7 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -9,6 +9,7 @@
#include <linux/prefetch.h>
#include <linux/srcu.h>
#include <linux/rw_hint.h>
+#include <linux/rwsem.h>
struct blk_mq_tags;
struct blk_flush_queue;
@@ -506,6 +507,9 @@ enum hctx_type {
* request_queue.tag_set_list.
* @srcu: Use as lock when type of the request queue is blocking
* (BLK_MQ_F_BLOCKING).
+ * @update_nr_hwq_lock:
+ * Synchronize updating nr_hw_queues with add/del disk &
+ * switching elevator.
*/
struct blk_mq_tag_set {
const struct blk_mq_ops *ops;
@@ -527,6 +531,8 @@ struct blk_mq_tag_set {
struct mutex tag_list_lock;
struct list_head tag_list;
struct srcu_struct *srcu;
+
+ struct rw_semaphore update_nr_hwq_lock;
};
/**
@@ -1031,8 +1037,8 @@ int blk_rq_map_user_io(struct request *, struct rq_map_data *,
int blk_rq_map_user_iov(struct request_queue *, struct request *,
struct rq_map_data *, const struct iov_iter *, gfp_t);
int blk_rq_unmap_user(struct bio *);
-int blk_rq_map_kern(struct request_queue *, struct request *, void *,
- unsigned int, gfp_t);
+int blk_rq_map_kern(struct request *rq, void *kbuf, unsigned int len,
+ gfp_t gfp);
int blk_rq_append_bio(struct request *rq, struct bio *bio);
void blk_execute_rq_nowait(struct request *rq, bool at_head);
blk_status_t blk_execute_rq(struct request *rq, bool at_head);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index dce7615c35e7..3d1577f07c1c 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -220,6 +220,7 @@ struct bio {
unsigned short bi_flags; /* BIO_* below */
unsigned short bi_ioprio;
enum rw_hint bi_write_hint;
+ u8 bi_write_stream;
blk_status_t bi_status;
atomic_t __bi_remaining;
@@ -286,7 +287,6 @@ struct bio {
enum {
BIO_PAGE_PINNED, /* Unpin pages in bio_release_pages() */
BIO_CLONED, /* doesn't own data */
- BIO_BOUNCED, /* bio is a bounce bio */
BIO_QUIET, /* Make BIO Quiet */
BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
BIO_REFFED, /* bio has elevated ->bi_cnt */
@@ -296,6 +296,14 @@ enum {
* of this bio. */
BIO_CGROUP_ACCT, /* has been accounted to a cgroup */
BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */
+ /*
+ * This bio has completed bps throttling at the single tg granularity,
+ * which is different from BIO_BPS_THROTTLED. When the bio is enqueued
+ * into the sq->queued of the upper tg, or is about to be dispatched,
+ * this flag needs to be cleared. Since blk-throttle and rq_qos are not
+ * on the same hierarchical level, reuse the value.
+ */
+ BIO_TG_BPS_THROTTLED = BIO_QOS_THROTTLED,
BIO_QOS_MERGED, /* but went through rq_qos merge path */
BIO_REMAPPED,
BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 9a1f0ee40b56..332b56f323d9 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -182,7 +182,6 @@ struct gendisk {
struct list_head slave_bdevs;
#endif
struct timer_rand_state *random;
- atomic_t sync_io; /* RAID */
struct disk_events *ev;
#ifdef CONFIG_BLK_DEV_ZONED
@@ -218,6 +217,8 @@ struct gendisk {
* devices that do not have multiple independent access ranges.
*/
struct blk_independent_access_ranges *ia_ranges;
+
+ struct mutex rqos_state_mutex; /* rqos state change mutex */
};
/**
@@ -331,9 +332,6 @@ typedef unsigned int __bitwise blk_features_t;
/* skip this queue in blk_mq_(un)quiesce_tagset */
#define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13))
-/* bounce all highmem pages */
-#define BLK_FEAT_BOUNCE_HIGH ((__force blk_features_t)(1u << 14))
-
/* undocumented magic for bcache */
#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
((__force blk_features_t)(1u << 15))
@@ -347,7 +345,7 @@ typedef unsigned int __bitwise blk_features_t;
*/
#define BLK_FEAT_INHERIT_MASK \
(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \
- BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | BLK_FEAT_BOUNCE_HIGH | \
+ BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | \
BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE)
/* internal flags in queue_limits.flags */
@@ -405,6 +403,9 @@ struct queue_limits {
unsigned short max_integrity_segments;
unsigned short max_discard_segments;
+ unsigned short max_write_streams;
+ unsigned int write_stream_granularity;
+
unsigned int max_open_zones;
unsigned int max_active_zones;
@@ -644,6 +645,8 @@ enum {
QUEUE_FLAG_RQ_ALLOC_TIME, /* record rq->alloc_time_ns */
QUEUE_FLAG_HCTX_ACTIVE, /* at least one blk-mq hctx is active */
QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */
+ QUEUE_FLAG_DISABLE_WBT_DEF, /* for sched to disable/enable wbt */
+ QUEUE_FLAG_NO_ELV_SWITCH, /* can't switch elevator any more */
QUEUE_FLAG_MAX
};
@@ -679,6 +682,10 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
#define blk_queue_skip_tagset_quiesce(q) \
((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE)
+#define blk_queue_disable_wbt(q) \
+ test_bit(QUEUE_FLAG_DISABLE_WBT_DEF, &(q)->queue_flags)
+#define blk_queue_no_elv_switch(q) \
+ test_bit(QUEUE_FLAG_NO_ELV_SWITCH, &(q)->queue_flags)
extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);
@@ -1288,6 +1295,13 @@ static inline unsigned int bdev_max_segments(struct block_device *bdev)
return queue_max_segments(bdev_get_queue(bdev));
}
+static inline unsigned short bdev_max_write_streams(struct block_device *bdev)
+{
+ if (bdev_is_partition(bdev))
+ return 0;
+ return bdev_limits(bdev)->max_write_streams;
+}
+
static inline unsigned queue_logical_block_size(const struct request_queue *q)
{
return q->limits.logical_block_size;
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 9de7adb68294..60d1511b4f4d 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -114,8 +114,7 @@ struct bpf_prog_list {
u32 flags;
};
-int cgroup_bpf_inherit(struct cgroup *cgrp);
-void cgroup_bpf_offline(struct cgroup *cgrp);
+void __init cgroup_bpf_lifetime_notifier_init(void);
int __cgroup_bpf_run_filter_skb(struct sock *sk,
struct sk_buff *skb,
@@ -431,8 +430,10 @@ const struct bpf_func_proto *
cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
#else
-static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
-static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
+static inline void cgroup_bpf_lifetime_notifier_init(void)
+{
+ return;
+}
static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
enum bpf_prog_type ptype,
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 5bc8f55c8cca..e61687d5e496 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -170,6 +170,23 @@ struct cgroup_subsys_state {
struct percpu_ref refcnt;
/*
+ * Depending on the context, this field is initialized
+ * via css_rstat_init() at different places:
+ *
+ * when css is associated with cgroup::self
+ * when css->cgroup is the root cgroup
+ * performed in cgroup_init()
+ * when css->cgroup is not the root cgroup
+ * performed in cgroup_create()
+ * when css is associated with a subsystem
+ * when css->cgroup is the root cgroup
+ * performed in cgroup_init_subsys() in the non-early path
+ * when css->cgroup is not the root cgroup
+ * performed in css_create()
+ */
+ struct css_rstat_cpu __percpu *rstat_cpu;
+
+ /*
* siblings list anchored at the parent's ->children
*
* linkage is protected by cgroup_mutex or RCU
@@ -177,9 +194,6 @@ struct cgroup_subsys_state {
struct list_head sibling;
struct list_head children;
- /* flush target list anchored at cgrp->rstat_css_list */
- struct list_head rstat_css_node;
-
/*
* PI: Subsys-unique ID. 0 is unused and root is always 1. The
* matching css can be looked up using css_from_id().
@@ -219,6 +233,16 @@ struct cgroup_subsys_state {
* Protected by cgroup_mutex.
*/
int nr_descendants;
+
+ /*
+ * A singly-linked list of css structures to be rstat flushed.
+ * This is a scratch field to be used exclusively by
+ * css_rstat_flush().
+ *
+ * Protected by rstat_base_lock when css is cgroup::self.
+ * Protected by css->ss->rstat_ss_lock otherwise.
+ */
+ struct cgroup_subsys_state *rstat_flush_next;
};
/*
@@ -329,10 +353,10 @@ struct cgroup_base_stat {
/*
* rstat - cgroup scalable recursive statistics. Accounting is done
- * per-cpu in cgroup_rstat_cpu which is then lazily propagated up the
+ * per-cpu in css_rstat_cpu which is then lazily propagated up the
* hierarchy on reads.
*
- * When a stat gets updated, the cgroup_rstat_cpu and its ancestors are
+ * When a stat gets updated, the css_rstat_cpu and its ancestors are
* linked into the updated tree. On the following read, propagation only
* considers and consumes the updated tree. This makes reading O(the
* number of descendants which have been active since last read) instead of
@@ -344,10 +368,29 @@ struct cgroup_base_stat {
* frequency decreases the cost of each read.
*
* This struct hosts both the fields which implement the above -
- * updated_children and updated_next - and the fields which track basic
- * resource statistics on top of it - bsync, bstat and last_bstat.
+ * updated_children and updated_next.
*/
-struct cgroup_rstat_cpu {
+struct css_rstat_cpu {
+ /*
+ * Child cgroups with stat updates on this cpu since the last read
+ * are linked on the parent's ->updated_children through
+ * ->updated_next. updated_children is terminated by its container css.
+ *
+ * In addition to being more compact, singly-linked list pointing to
+ * the css makes it unnecessary for each per-cpu struct to point back
+ * to the associated css.
+ *
+ * Protected by per-cpu css->ss->rstat_ss_cpu_lock.
+ */
+ struct cgroup_subsys_state *updated_children;
+ struct cgroup_subsys_state *updated_next; /* NULL if not on the list */
+};
+
+/*
+ * This struct hosts the fields which track basic resource statistics on
+ * top of it - bsync, bstat and last_bstat.
+ */
+struct cgroup_rstat_base_cpu {
/*
* ->bsync protects ->bstat. These are the only fields which get
* updated in the hot path.
@@ -374,20 +417,6 @@ struct cgroup_rstat_cpu {
* deltas to propagate to the per-cpu subtree_bstat.
*/
struct cgroup_base_stat last_subtree_bstat;
-
- /*
- * Child cgroups with stat updates on this cpu since the last read
- * are linked on the parent's ->updated_children through
- * ->updated_next.
- *
- * In addition to being more compact, singly-linked list pointing
- * to the cgroup makes it unnecessary for each per-cpu struct to
- * point back to the associated cgroup.
- *
- * Protected by per-cpu cgroup_rstat_cpu_lock.
- */
- struct cgroup *updated_children; /* terminated by self cgroup */
- struct cgroup *updated_next; /* NULL iff not on the list */
};
struct cgroup_freezer_state {
@@ -516,23 +545,23 @@ struct cgroup {
struct cgroup *dom_cgrp;
struct cgroup *old_dom_cgrp; /* used while enabling threaded */
- /* per-cpu recursive resource statistics */
- struct cgroup_rstat_cpu __percpu *rstat_cpu;
- struct list_head rstat_css_list;
-
/*
- * Add padding to separate the read mostly rstat_cpu and
- * rstat_css_list into a different cacheline from the following
- * rstat_flush_next and *bstat fields which can have frequent updates.
+ * Depending on the context, this field is initialized via
+ * css_rstat_init() at different places:
+ *
+ * when cgroup is the root cgroup
+ * performed in cgroup_setup_root()
+ * otherwise
+ * performed in cgroup_create()
*/
- CACHELINE_PADDING(_pad_);
+ struct cgroup_rstat_base_cpu __percpu *rstat_base_cpu;
/*
- * A singly-linked list of cgroup structures to be rstat flushed.
- * This is a scratch field to be used exclusively by
- * cgroup_rstat_flush_locked() and protected by cgroup_rstat_lock.
+ * Add padding to keep the read mostly rstat per-cpu pointer on a
+ * different cacheline than the following *bstat fields which can have
+ * frequent updates.
*/
- struct cgroup *rstat_flush_next;
+ CACHELINE_PADDING(_pad_);
/* cgroup basic resource statistics */
struct cgroup_base_stat last_bstat;
@@ -790,6 +819,9 @@ struct cgroup_subsys {
* specifies the mask of subsystems that this one depends on.
*/
unsigned int depends_on;
+
+ spinlock_t rstat_ss_lock;
+ raw_spinlock_t __percpu *rstat_ss_cpu_lock;
};
extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index e7da3c3b098b..b18fb5fcb38e 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -19,6 +19,7 @@
#include <linux/kernfs.h>
#include <linux/jump_label.h>
#include <linux/types.h>
+#include <linux/notifier.h>
#include <linux/ns_common.h>
#include <linux/nsproxy.h>
#include <linux/user_namespace.h>
@@ -40,7 +41,7 @@ struct kernel_clone_args;
#ifdef CONFIG_CGROUPS
-enum {
+enum css_task_iter_flags {
CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */
CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */
CSS_TASK_ITER_SKIPPED = (1U << 16), /* internal flags */
@@ -66,10 +67,16 @@ struct css_task_iter {
struct list_head iters_node; /* css_set->task_iters */
};
+enum cgroup_lifetime_events {
+ CGROUP_LIFETIME_ONLINE,
+ CGROUP_LIFETIME_OFFLINE,
+};
+
extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set;
extern spinlock_t css_set_lock;
+extern struct blocking_notifier_head cgroup_lifetime_notifier;
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
#include <linux/cgroup_subsys.h>
@@ -347,6 +354,17 @@ static inline bool css_is_dying(struct cgroup_subsys_state *css)
return css->flags & CSS_DYING;
}
+static inline bool css_is_self(struct cgroup_subsys_state *css)
+{
+ if (css == &css->cgroup->self) {
+ /* cgroup::self should not have subsystem association */
+ WARN_ON(css->ss != NULL);
+ return true;
+ }
+
+ return false;
+}
+
static inline void cgroup_get(struct cgroup *cgrp)
{
css_get(&cgrp->self);
@@ -688,8 +706,8 @@ static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
/*
* cgroup scalable recursive statistics.
*/
-void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
-void cgroup_rstat_flush(struct cgroup *cgrp);
+void css_rstat_updated(struct cgroup_subsys_state *css, int cpu);
+void css_rstat_flush(struct cgroup_subsys_state *css);
/*
* Basic resource stats.
@@ -785,6 +803,17 @@ struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
struct cgroup_namespace *ns);
+static inline void get_cgroup_ns(struct cgroup_namespace *ns)
+{
+ refcount_inc(&ns->ns.count);
+}
+
+static inline void put_cgroup_ns(struct cgroup_namespace *ns)
+{
+ if (refcount_dec_and_test(&ns->ns.count))
+ free_cgroup_ns(ns);
+}
+
#else /* !CONFIG_CGROUPS */
static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
@@ -795,19 +824,10 @@ copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
return old_ns;
}
-#endif /* !CONFIG_CGROUPS */
+static inline void get_cgroup_ns(struct cgroup_namespace *ns) { }
+static inline void put_cgroup_ns(struct cgroup_namespace *ns) { }
-static inline void get_cgroup_ns(struct cgroup_namespace *ns)
-{
- if (ns)
- refcount_inc(&ns->ns.count);
-}
-
-static inline void put_cgroup_ns(struct cgroup_namespace *ns)
-{
- if (ns && refcount_dec_and_test(&ns->ns.count))
- free_cgroup_ns(ns);
-}
+#endif /* !CONFIG_CGROUPS */
#ifdef CONFIG_CGROUPS
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index 7e57047e1564..7093e1d08af0 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -216,6 +216,25 @@ const volatile void * __must_check_fn(const volatile void *val)
#define return_ptr(p) return no_free_ptr(p)
+/*
+ * Only for situations where an allocation is handed in to another function
+ * and consumed by that function on success.
+ *
+ * struct foo *f __free(kfree) = kzalloc(sizeof(*f), GFP_KERNEL);
+ *
+ * setup(f);
+ * if (some_condition)
+ * return -EINVAL;
+ * ....
+ * ret = bar(f);
+ * if (!ret)
+ * retain_and_null_ptr(f);
+ * return ret;
+ *
+ * After retain_and_null_ptr(f) the variable f is NULL and cannot be
+ * dereferenced anymore.
+ */
+#define retain_and_null_ptr(p) ((void)__get_and_null(p, NULL))
/*
* DEFINE_CLASS(name, type, exit, init, init_args...):
diff --git a/include/linux/codetag.h b/include/linux/codetag.h
index d14dbd26b370..0ee4c21c6dbc 100644
--- a/include/linux/codetag.h
+++ b/include/linux/codetag.h
@@ -36,10 +36,10 @@ union codetag_ref {
struct codetag_type_desc {
const char *section;
size_t tag_size;
- void (*module_load)(struct codetag_type *cttype,
- struct codetag_module *cmod);
- void (*module_unload)(struct codetag_type *cttype,
- struct codetag_module *cmod);
+ void (*module_load)(struct module *mod,
+ struct codetag *start, struct codetag *end);
+ void (*module_unload)(struct module *mod,
+ struct codetag *start, struct codetag *end);
#ifdef CONFIG_MODULES
void (*module_replaced)(struct module *mod, struct module *new_mod);
bool (*needs_section_mem)(struct module *mod, unsigned long size);
diff --git a/include/linux/compiler-version.h b/include/linux/compiler-version.h
index 573fa85b6c0c..ac1665a98a15 100644
--- a/include/linux/compiler-version.h
+++ b/include/linux/compiler-version.h
@@ -12,3 +12,33 @@
* and add dependency on include/config/CC_VERSION_TEXT, which is touched
* by Kconfig when the version string from the compiler changes.
*/
+
+/* Additional tree-wide dependencies start here. */
+
+/*
+ * If any of the GCC plugins change, we need to rebuild everything that
+ * was built with them, as they may have changed their behavior and those
+ * behaviors may need to be synchronized across all translation units.
+ */
+#ifdef GCC_PLUGINS
+#include <generated/gcc-plugins.h>
+#endif
+
+/*
+ * If the randstruct seed itself changes (whether for GCC plugins or
+ * Clang), the entire tree needs to be rebuilt since the randomization of
+ * structures may change between compilation units if not.
+ */
+#ifdef RANDSTRUCT
+#include <generated/randstruct_hash.h>
+#endif
+
+/*
+ * If any external changes affect Clang's integer wrapping sanitizer
+ * behavior, a full rebuild is needed as the coverage for wrapping types
+ * may have changed, which may impact the expected behaviors that should
+ * not differ between compilation units.
+ */
+#ifdef INTEGER_WRAP
+#include <generated/integer-wrap.h>
+#endif
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 501cffddc2f4..20881cc761fa 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -449,6 +449,11 @@ struct ftrace_likely_data {
/*
* When the size of an allocated object is needed, use the best available
* mechanism to find it. (For cases where sizeof() cannot be used.)
+ *
+ * Optional: only supported since gcc >= 12
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Object-Size-Checking.html
+ * clang: https://clang.llvm.org/docs/LanguageExtensions.html#evaluating-object-size
*/
#if __has_builtin(__builtin_dynamic_object_size)
#define __struct_size(p) __builtin_dynamic_object_size(p, 0)
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index c771e9d0d0b9..698520b1bfdb 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -120,15 +120,19 @@ struct configfs_attribute {
ssize_t (*store)(struct config_item *, const char *, size_t);
};
-#define CONFIGFS_ATTR(_pfx, _name) \
+#define CONFIGFS_ATTR_PERM(_pfx, _name, _perm) \
static struct configfs_attribute _pfx##attr_##_name = { \
.ca_name = __stringify(_name), \
- .ca_mode = S_IRUGO | S_IWUSR, \
+ .ca_mode = _perm, \
.ca_owner = THIS_MODULE, \
.show = _pfx##_name##_show, \
.store = _pfx##_name##_store, \
}
+#define CONFIGFS_ATTR(_pfx, _name) CONFIGFS_ATTR_PERM( \
+ _pfx, _name, S_IRUGO | S_IWUSR \
+)
+
#define CONFIGFS_ATTR_RO(_pfx, _name) \
static struct configfs_attribute _pfx##attr_##_name = { \
.ca_name = __stringify(_name), \
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index 77e6e195d1d6..76e41805b92d 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -28,6 +28,7 @@ struct coredump_params {
int vma_count;
size_t vma_data_size;
struct core_vma_metadata *vma_meta;
+ struct pid *pid;
};
extern unsigned int core_file_note_size_limit;
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 3aa955102b34..e6089abc28e2 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -78,6 +78,8 @@ extern ssize_t cpu_show_gds(struct device *dev,
extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_ghostwrite(struct device *dev, struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_old_microcode(struct device *dev,
+ struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
struct device_attribute *attr, char *buf);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 7a5b391dcc01..95f3807c8c55 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -170,6 +170,12 @@ struct cpufreq_policy {
struct notifier_block nb_max;
};
+DEFINE_GUARD(cpufreq_policy_write, struct cpufreq_policy *,
+ down_write(&_T->rwsem), up_write(&_T->rwsem))
+
+DEFINE_GUARD(cpufreq_policy_read, struct cpufreq_policy *,
+ down_read(&_T->rwsem), up_read(&_T->rwsem))
+
/*
* Used for passing new cpufreq policy data to the cpufreq driver's ->verify()
* callback for sanitization. That callback is only expected to modify the min
@@ -235,9 +241,6 @@ void disable_cpufreq(void);
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
-struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
-void cpufreq_cpu_release(struct cpufreq_policy *policy);
-int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
void refresh_frequency_limits(struct cpufreq_policy *policy);
void cpufreq_update_policy(unsigned int cpu);
void cpufreq_update_limits(unsigned int cpu);
@@ -395,7 +398,7 @@ struct cpufreq_driver {
unsigned int (*get)(unsigned int cpu);
/* Called to update policy limits on firmware notifications. */
- void (*update_limits)(unsigned int cpu);
+ void (*update_limits)(struct cpufreq_policy *policy);
/* optional */
int (*bios_limit)(int cpu, unsigned int *limit);
@@ -647,6 +650,15 @@ module_exit(__governor##_exit)
struct cpufreq_governor *cpufreq_default_governor(void);
struct cpufreq_governor *cpufreq_fallback_governor(void);
+#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
+bool sugov_is_governor(struct cpufreq_policy *policy);
+#else
+static inline bool sugov_is_governor(struct cpufreq_policy *policy)
+{
+ return false;
+}
+#endif
+
static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
{
if (policy->max < policy->cur)
@@ -1225,6 +1237,8 @@ void cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int transition_latency);
+bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask);
+
static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy)
{
dev_pm_opp_of_register_em(get_cpu_device(policy->cpu),
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 1987400000b4..df366ee15456 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -60,7 +60,6 @@ enum cpuhp_state {
/* PREPARE section invoked on a control CPU */
CPUHP_OFFLINE = 0,
CPUHP_CREATE_THREADS,
- CPUHP_PERF_PREPARE,
CPUHP_PERF_X86_PREPARE,
CPUHP_PERF_X86_AMD_UNCORE_PREP,
CPUHP_PERF_POWER,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index f9a868384083..6a569c7534db 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -179,6 +179,19 @@ unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask
}
/**
+ * cpumask_first_andnot - return the first cpu from *srcp1 & ~*srcp2
+ * @srcp1: the first input
+ * @srcp2: the second input
+ *
+ * Return: >= nr_cpu_ids if no such cpu found.
+ */
+static __always_inline
+unsigned int cpumask_first_andnot(const struct cpumask *srcp1, const struct cpumask *srcp2)
+{
+ return find_first_andnot_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
+}
+
+/**
* cpumask_first_and_and - return the first cpu from *srcp1 & *srcp2 & *srcp3
* @srcp1: the first input
* @srcp2: the second input
@@ -285,6 +298,25 @@ unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
}
/**
+ * cpumask_next_andnot - get the next cpu in *src1p & ~*src2p
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
+ * @src1p: the first cpumask pointer
+ * @src2p: the second cpumask pointer
+ *
+ * Return: >= nr_cpu_ids if no further cpus set in both.
+ */
+static __always_inline
+unsigned int cpumask_next_andnot(int n, const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_andnot_bit(cpumask_bits(src1p), cpumask_bits(src2p),
+ small_cpumask_bits, n + 1);
+}
+
+/**
* cpumask_next_and_wrap - get the next cpu in *src1p & *src2p, starting from
* @n+1. If nothing found, wrap around and start from
* the beginning
@@ -413,14 +445,18 @@ unsigned int cpumask_next_wrap(int n, const struct cpumask *src)
* @cpu: the cpu to ignore.
*
* Often used to find any cpu but smp_processor_id() in a mask.
+ * If @cpu == -1, the function is equivalent to cpumask_any().
* Return: >= nr_cpu_ids if no cpus set.
*/
static __always_inline
-unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
+unsigned int cpumask_any_but(const struct cpumask *mask, int cpu)
{
unsigned int i;
- cpumask_check(cpu);
+ /* -1 is a legal arg here. */
+ if (cpu != -1)
+ cpumask_check(cpu);
+
for_each_cpu(i, mask)
if (i != cpu)
break;
@@ -433,16 +469,20 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
* @mask2: the second input cpumask
* @cpu: the cpu to ignore
*
+ * If @cpu == -1, the function is equivalent to cpumask_any_and().
* Returns >= nr_cpu_ids if no cpus set.
*/
static __always_inline
unsigned int cpumask_any_and_but(const struct cpumask *mask1,
const struct cpumask *mask2,
- unsigned int cpu)
+ int cpu)
{
unsigned int i;
- cpumask_check(cpu);
+ /* -1 is a legal arg here. */
+ if (cpu != -1)
+ cpumask_check(cpu);
+
i = cpumask_first_and(mask1, mask2);
if (i != cpu)
return i;
@@ -451,6 +491,33 @@ unsigned int cpumask_any_and_but(const struct cpumask *mask1,
}
/**
+ * cpumask_any_andnot_but - pick an arbitrary cpu from *mask1 & ~*mask2, but not this one.
+ * @mask1: the first input cpumask
+ * @mask2: the second input cpumask
+ * @cpu: the cpu to ignore
+ *
+ * If @cpu == -1, the function returns the first matching cpu.
+ * Returns >= nr_cpu_ids if no cpus set.
+ */
+static __always_inline
+unsigned int cpumask_any_andnot_but(const struct cpumask *mask1,
+ const struct cpumask *mask2,
+ int cpu)
+{
+ unsigned int i;
+
+ /* -1 is a legal arg here. */
+ if (cpu != -1)
+ cpumask_check(cpu);
+
+ i = cpumask_first_andnot(mask1, mask2);
+ if (i != cpu)
+ return i;
+
+ return cpumask_next_andnot(cpu, mask1, mask2);
+}
+
+/**
* cpumask_nth - get the Nth cpu in a cpumask
* @srcp: the cpumask pointer
* @cpu: the Nth cpu to find, starting from 0
diff --git a/include/linux/crc16.h b/include/linux/crc16.h
index 9fa74529b317..b861d969b161 100644
--- a/include/linux/crc16.h
+++ b/include/linux/crc16.h
@@ -15,14 +15,7 @@
#include <linux/types.h>
-extern u16 const crc16_table[256];
-
-extern u16 crc16(u16 crc, const u8 *buffer, size_t len);
-
-static inline u16 crc16_byte(u16 crc, const u8 data)
-{
- return (crc >> 8) ^ crc16_table[(crc ^ data) & 0xff];
-}
+u16 crc16(u16 crc, const u8 *p, size_t len);
#endif /* __CRC16_H */
diff --git a/include/linux/crc32.h b/include/linux/crc32.h
index 69c2e8bb3782..569dc13f139f 100644
--- a/include/linux/crc32.h
+++ b/include/linux/crc32.h
@@ -1,7 +1,4 @@
-/*
- * crc32.h
- * See linux/lib/crc32.c for license and changes
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _LINUX_CRC32_H
#define _LINUX_CRC32_H
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 1e3809d28abd..b50f1954d1bb 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -14,8 +14,7 @@
#include <linux/completion.h>
#include <linux/errno.h>
-#include <linux/list.h>
-#include <linux/refcount.h>
+#include <linux/refcount_types.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -51,6 +50,15 @@
#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
/*
+ * Set if the algorithm data structure should be duplicated into
+ * kmalloc memory before registration. This is useful for hardware
+ * that can be disconnected at will. Do not use this if the data
+ * structure is embedded into a bigger one. Duplicate the overall
+ * data structure in the driver in that case.
+ */
+#define CRYPTO_ALG_DUP_FIRST 0x00000200
+
+/*
* Set if the algorithm has passed automated run-time testing. Note that
* if there is no run-time testing for a given algorithm it is considered
* to have passed.
@@ -125,8 +133,10 @@
*/
#define CRYPTO_ALG_FIPS_INTERNAL 0x00020000
-/* Set if the algorithm supports request chains and virtual addresses. */
-#define CRYPTO_ALG_REQ_CHAIN 0x00040000
+/* Set if the algorithm supports virtual addresses. */
+#define CRYPTO_ALG_REQ_VIRT 0x00040000
+
+/* The high bits 0xff000000 are reserved for type-specific flags. */
/*
* Transform masks and values (for crt_flags).
@@ -179,7 +189,6 @@ struct crypto_async_request {
struct crypto_tfm *tfm;
u32 flags;
- int err;
};
/**
@@ -278,6 +287,7 @@ struct cipher_alg {
* to the alignmask of the algorithm being used, in order to
* avoid the API having to realign them. Note: the alignmask is
* not supported for hash algorithms and is always 0 for them.
+ * @cra_reqsize: Size of the request context for this algorithm.
* @cra_priority: Priority of this transformation implementation. In case
* multiple transformations with same @cra_name are available to
* the Crypto API, the kernel will use the one with highest
@@ -302,17 +312,8 @@ struct cipher_alg {
* by @cra_type and @cra_flags above, the associated structure must be
* filled with callbacks. This field might be empty. This is the case
* for ahash, shash.
- * @cra_init: Initialize the cryptographic transformation object. This function
- * is used to initialize the cryptographic transformation object.
- * This function is called only once at the instantiation time, right
- * after the transformation context was allocated. In case the
- * cryptographic hardware has some special requirements which need to
- * be handled by software, this function shall check for the precise
- * requirement of the transformation and put any software fallbacks
- * in place.
- * @cra_exit: Deinitialize the cryptographic transformation object. This is a
- * counterpart to @cra_init, used to remove various changes set in
- * @cra_init.
+ * @cra_init: Deprecated, do not use.
+ * @cra_exit: Deprecated, do not use.
* @cra_u.cipher: Union member which contains a single-block symmetric cipher
* definition. See @struct @cipher_alg.
* @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
@@ -333,6 +334,7 @@ struct crypto_alg {
unsigned int cra_blocksize;
unsigned int cra_ctxsize;
unsigned int cra_alignmask;
+ unsigned int cra_reqsize;
int cra_priority;
refcount_t cra_refcnt;
@@ -409,9 +411,11 @@ struct crypto_tfm {
u32 crt_flags;
int node;
-
+
+ struct crypto_tfm *fb;
+
void (*exit)(struct crypto_tfm *tfm);
-
+
struct crypto_alg *__crt_alg;
void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
@@ -452,6 +456,11 @@ static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
return tfm->__crt_alg->cra_alignmask;
}
+static inline unsigned int crypto_tfm_alg_reqsize(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_reqsize;
+}
+
static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
{
return tfm->crt_flags;
@@ -473,22 +482,44 @@ static inline unsigned int crypto_tfm_ctx_alignment(void)
return __alignof__(tfm->__crt_ctx);
}
-static inline void crypto_reqchain_init(struct crypto_async_request *req)
+static inline bool crypto_tfm_is_async(struct crypto_tfm *tfm)
{
- req->err = -EINPROGRESS;
- INIT_LIST_HEAD(&req->list);
+ return tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
}
-static inline void crypto_request_chain(struct crypto_async_request *req,
- struct crypto_async_request *head)
+static inline bool crypto_req_on_stack(struct crypto_async_request *req)
{
- req->err = -EINPROGRESS;
- list_add_tail(&req->list, &head->list);
+ return req->flags & CRYPTO_TFM_REQ_ON_STACK;
}
-static inline bool crypto_tfm_is_async(struct crypto_tfm *tfm)
+static inline void crypto_request_set_callback(
+ struct crypto_async_request *req, u32 flags,
+ crypto_completion_t compl, void *data)
{
- return tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
+ u32 keep = CRYPTO_TFM_REQ_ON_STACK;
+
+ req->complete = compl;
+ req->data = data;
+ req->flags &= keep;
+ req->flags |= flags & ~keep;
+}
+
+static inline void crypto_request_set_tfm(struct crypto_async_request *req,
+ struct crypto_tfm *tfm)
+{
+ req->tfm = tfm;
+ req->flags &= ~CRYPTO_TFM_REQ_ON_STACK;
+}
+
+struct crypto_async_request *crypto_request_clone(
+ struct crypto_async_request *req, size_t total, gfp_t gfp);
+
+static inline void crypto_stack_request_init(struct crypto_async_request *req,
+ struct crypto_tfm *tfm)
+{
+ req->flags = 0;
+ crypto_request_set_tfm(req, tfm);
+ req->flags |= CRYPTO_TFM_REQ_ON_STACK;
}
#endif /* _LINUX_CRYPTO_H */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index e9f07e37dd6f..e29823c701ac 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -57,7 +57,8 @@ struct qstr {
};
#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
-#define QSTR(n) (struct qstr)QSTR_INIT(n, strlen(n))
+#define QSTR_LEN(n,l) (struct qstr)QSTR_INIT(n,l)
+#define QSTR(n) QSTR_LEN(n, strlen(n))
extern const struct qstr empty_name;
extern const struct qstr slash_name;
@@ -281,7 +282,6 @@ extern void d_exchange(struct dentry *, struct dentry *);
extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
extern struct dentry *d_lookup(const struct dentry *, const struct qstr *);
-extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *);
static inline unsigned d_count(const struct dentry *dentry)
{
diff --git a/include/linux/device.h b/include/linux/device.h
index 79e49fe494b7..4940db137fff 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -281,44 +281,6 @@ int __must_check device_create_bin_file(struct device *dev,
void device_remove_bin_file(struct device *dev,
const struct bin_attribute *attr);
-/* allows to add/remove a custom action to devres stack */
-int devm_remove_action_nowarn(struct device *dev, void (*action)(void *), void *data);
-
-/**
- * devm_remove_action() - removes previously added custom action
- * @dev: Device that owns the action
- * @action: Function implementing the action
- * @data: Pointer to data passed to @action implementation
- *
- * Removes instance of @action previously added by devm_add_action().
- * Both action and data should match one of the existing entries.
- */
-static inline
-void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
-{
- WARN_ON(devm_remove_action_nowarn(dev, action, data));
-}
-
-void devm_release_action(struct device *dev, void (*action)(void *), void *data);
-
-int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name);
-#define devm_add_action(dev, action, data) \
- __devm_add_action(dev, action, data, #action)
-
-static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(void *),
- void *data, const char *name)
-{
- int ret;
-
- ret = __devm_add_action(dev, action, data, name);
- if (ret)
- action(data);
-
- return ret;
-}
-#define devm_add_action_or_reset(dev, action, data) \
- __devm_add_action_or_reset(dev, action, data, #action)
-
/**
* devm_alloc_percpu - Resource-managed alloc_percpu
* @dev: Device to allocate per-cpu memory for
diff --git a/include/linux/device/devres.h b/include/linux/device/devres.h
index 9b49f9915850..ae696d10faff 100644
--- a/include/linux/device/devres.h
+++ b/include/linux/device/devres.h
@@ -8,6 +8,7 @@
#include <linux/overflow.h>
#include <linux/stdarg.h>
#include <linux/types.h>
+#include <asm/bug.h>
struct device;
struct device_node;
@@ -126,4 +127,44 @@ void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int in
#endif
+/* allows to add/remove a custom action to devres stack */
+int devm_remove_action_nowarn(struct device *dev, void (*action)(void *), void *data);
+
+/**
+ * devm_remove_action() - removes previously added custom action
+ * @dev: Device that owns the action
+ * @action: Function implementing the action
+ * @data: Pointer to data passed to @action implementation
+ *
+ * Removes instance of @action previously added by devm_add_action().
+ * Both action and data should match one of the existing entries.
+ */
+static inline
+void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
+{
+ WARN_ON(devm_remove_action_nowarn(dev, action, data));
+}
+
+void devm_release_action(struct device *dev, void (*action)(void *), void *data);
+
+int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name);
+#define devm_add_action(dev, action, data) \
+ __devm_add_action(dev, action, data, #action)
+
+static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(void *),
+ void *data, const char *name)
+{
+ int ret;
+
+ ret = __devm_add_action(dev, action, data, name);
+ if (ret)
+ action(data);
+
+ return ret;
+}
+#define devm_add_action_or_reset(dev, action, data) \
+ __devm_add_action_or_reset(dev, action, data, #action)
+
+bool devm_is_action_added(struct device *dev, void (*action)(void *), void *data);
+
#endif /* _DEVICE_DEVRES_H_ */
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
index d02f32b7514e..0864773a57e8 100644
--- a/include/linux/device_cgroup.h
+++ b/include/linux/device_cgroup.h
@@ -18,15 +18,16 @@ static inline int devcgroup_inode_permission(struct inode *inode, int mask)
{
short type, access = 0;
+ if (likely(!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode)))
+ return 0;
+
if (likely(!inode->i_rdev))
return 0;
if (S_ISBLK(inode->i_mode))
type = DEVCG_DEV_BLOCK;
- else if (S_ISCHR(inode->i_mode))
+ else /* S_ISCHR by the test above */
type = DEVCG_DEV_CHAR;
- else
- return 0;
if (mask & MAY_WRITE)
access |= DEVCG_ACC_WRITE;
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 36216d28d8bd..544f8f8c3f44 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -35,15 +35,6 @@ struct dma_buf_attachment;
*/
struct dma_buf_ops {
/**
- * @cache_sgt_mapping:
- *
- * If true the framework will cache the first mapping made for each
- * attachment. This avoids creating mappings for attachments multiple
- * times.
- */
- bool cache_sgt_mapping;
-
- /**
* @attach:
*
* This is called from dma_buf_attach() to make sure that a given
@@ -493,8 +484,6 @@ struct dma_buf_attach_ops {
* @dmabuf: buffer for this attachment.
* @dev: device attached to the buffer.
* @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
- * @sgt: cached mapping.
- * @dir: direction of cached mapping.
* @peer2peer: true if the importer can handle peer resources without pages.
* @priv: exporter specific attachment data.
* @importer_ops: importer operations for this attachment, if provided
@@ -514,8 +503,6 @@ struct dma_buf_attachment {
struct dma_buf *dmabuf;
struct device *dev;
struct list_head node;
- struct sg_table *sgt;
- enum dma_data_direction dir;
bool peer2peer;
const struct dma_buf_attach_ops *importer_ops;
void *importer_priv;
@@ -583,20 +570,6 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
return !!dmabuf->ops->pin;
}
-/**
- * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
- * mappings
- * @attach: the DMA-buf attachment to check
- *
- * Returns true if a DMA-buf importer wants to call the map/unmap functions with
- * the dma_resv lock held.
- */
-static inline bool
-dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
-{
- return !!attach->importer_ops;
-}
-
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev);
struct dma_buf_attachment *
diff --git a/include/linux/dma-fence-unwrap.h b/include/linux/dma-fence-unwrap.h
index 66b1e56fbb81..62df222fe0f1 100644
--- a/include/linux/dma-fence-unwrap.h
+++ b/include/linux/dma-fence-unwrap.h
@@ -52,6 +52,8 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence **fences,
struct dma_fence_unwrap *cursors);
+int dma_fence_dedup_array(struct dma_fence **array, int num_fences);
+
/**
* dma_fence_unwrap_merge - unwrap and merge fences
*
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index e7ad819962e3..b12776883d14 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -169,8 +169,8 @@ struct dma_fence_ops {
* implementation know that there is another driver waiting on the
* signal (ie. hw->sw case).
*
- * This function can be called from atomic context, but not
- * from irq context, so normal spinlocks can be used.
+ * This is called with irq's disabled, so only spinlocks which disable
+ * IRQ's can be used in the code outside of this callback.
*
* A return value of false indicates the fence already passed,
* or some failure occurred that made it impossible to enable
@@ -239,27 +239,6 @@ struct dma_fence_ops {
void (*release)(struct dma_fence *fence);
/**
- * @fence_value_str:
- *
- * Callback to fill in free-form debug info specific to this fence, like
- * the sequence number.
- *
- * This callback is optional.
- */
- void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
-
- /**
- * @timeline_value_str:
- *
- * Fills in the current value of the timeline as a string, like the
- * sequence number. Note that the specific fence passed to this function
- * should not matter, drivers should only use it to look up the
- * corresponding timeline structures.
- */
- void (*timeline_value_str)(struct dma_fence *fence,
- char *str, int size);
-
- /**
* @set_deadline:
*
* Callback to allow a fence waiter to inform the fence signaler of
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index e172522cd936..f48e5fb88bd5 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -434,58 +434,4 @@ static inline void debug_dma_dump_mappings(struct device *dev)
#endif /* CONFIG_DMA_API_DEBUG */
extern const struct dma_map_ops dma_dummy_ops;
-
-enum pci_p2pdma_map_type {
- /*
- * PCI_P2PDMA_MAP_UNKNOWN: Used internally for indicating the mapping
- * type hasn't been calculated yet. Functions that return this enum
- * never return this value.
- */
- PCI_P2PDMA_MAP_UNKNOWN = 0,
-
- /*
- * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will
- * traverse the host bridge and the host bridge is not in the
- * allowlist. DMA Mapping routines should return an error when
- * this is returned.
- */
- PCI_P2PDMA_MAP_NOT_SUPPORTED,
-
- /*
- * PCI_P2PDMA_BUS_ADDR: Indicates that two devices can talk to
- * each other directly through a PCI switch and the transaction will
- * not traverse the host bridge. Such a mapping should program
- * the DMA engine with PCI bus addresses.
- */
- PCI_P2PDMA_MAP_BUS_ADDR,
-
- /*
- * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk
- * to each other, but the transaction traverses a host bridge on the
- * allowlist. In this case, a normal mapping either with CPU physical
- * addresses (in the case of dma-direct) or IOVA addresses (in the
- * case of IOMMUs) should be used to program the DMA engine.
- */
- PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
-};
-
-struct pci_p2pdma_map_state {
- struct dev_pagemap *pgmap;
- int map;
- u64 bus_off;
-};
-
-#ifdef CONFIG_PCI_P2PDMA
-enum pci_p2pdma_map_type
-pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
- struct scatterlist *sg);
-#else /* CONFIG_PCI_P2PDMA */
-static inline enum pci_p2pdma_map_type
-pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
- struct scatterlist *sg)
-{
- return PCI_P2PDMA_MAP_NOT_SUPPORTED;
-}
-#endif /* CONFIG_PCI_P2PDMA */
-
#endif /* _LINUX_DMA_MAP_OPS_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 85ab710ec0e7..55c03e5fe8cb 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -72,6 +72,22 @@
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+struct dma_iova_state {
+ dma_addr_t addr;
+ u64 __size;
+};
+
+/*
+ * Use the high bit to mark if we used swiotlb for one or more ranges.
+ */
+#define DMA_IOVA_USE_SWIOTLB (1ULL << 63)
+
+static inline size_t dma_iova_size(struct dma_iova_state *state)
+{
+ /* Casting is needed for 32-bits systems */
+ return (size_t)(state->__size & ~DMA_IOVA_USE_SWIOTLB);
+}
+
#ifdef CONFIG_DMA_API_DEBUG
void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
void debug_dma_map_single(struct device *dev, const void *addr,
@@ -277,6 +293,70 @@ static inline int dma_mmap_noncontiguous(struct device *dev,
}
#endif /* CONFIG_HAS_DMA */
+#ifdef CONFIG_IOMMU_DMA
+/**
+ * dma_use_iova - check if the IOVA API is used for this state
+ * @state: IOVA state
+ *
+ * Return %true if the DMA transfers uses the dma_iova_*() calls or %false if
+ * they can't be used.
+ */
+static inline bool dma_use_iova(struct dma_iova_state *state)
+{
+ return state->__size != 0;
+}
+
+bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
+ phys_addr_t phys, size_t size);
+void dma_iova_free(struct device *dev, struct dma_iova_state *state);
+void dma_iova_destroy(struct device *dev, struct dma_iova_state *state,
+ size_t mapped_len, enum dma_data_direction dir,
+ unsigned long attrs);
+int dma_iova_sync(struct device *dev, struct dma_iova_state *state,
+ size_t offset, size_t size);
+int dma_iova_link(struct device *dev, struct dma_iova_state *state,
+ phys_addr_t phys, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void dma_iova_unlink(struct device *dev, struct dma_iova_state *state,
+ size_t offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+#else /* CONFIG_IOMMU_DMA */
+static inline bool dma_use_iova(struct dma_iova_state *state)
+{
+ return false;
+}
+static inline bool dma_iova_try_alloc(struct device *dev,
+ struct dma_iova_state *state, phys_addr_t phys, size_t size)
+{
+ return false;
+}
+static inline void dma_iova_free(struct device *dev,
+ struct dma_iova_state *state)
+{
+}
+static inline void dma_iova_destroy(struct device *dev,
+ struct dma_iova_state *state, size_t mapped_len,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+}
+static inline int dma_iova_sync(struct device *dev,
+ struct dma_iova_state *state, size_t offset, size_t size)
+{
+ return -EOPNOTSUPP;
+}
+static inline int dma_iova_link(struct device *dev,
+ struct dma_iova_state *state, phys_addr_t phys, size_t offset,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ return -EOPNOTSUPP;
+}
+static inline void dma_iova_unlink(struct device *dev,
+ struct dma_iova_state *state, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+}
+#endif /* CONFIG_IOMMU_DMA */
+
#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir);
@@ -326,6 +406,7 @@ static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
{
return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false;
}
+bool dma_need_unmap(struct device *dev);
#else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
static inline bool dma_dev_need_sync(const struct device *dev)
{
@@ -351,6 +432,10 @@ static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
{
return false;
}
+static inline bool dma_need_unmap(struct device *dev)
+{
+ return false;
+}
#endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
struct page *dma_alloc_pages(struct device *dev, size_t size,
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index f632ecfb4238..06c4de602b2f 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -11,6 +11,7 @@
#ifndef LINUX_DMAPOOL_H
#define LINUX_DMAPOOL_H
+#include <linux/nodemask_types.h>
#include <linux/scatterlist.h>
#include <asm/io.h>
@@ -18,8 +19,8 @@ struct device;
#ifdef CONFIG_HAS_DMA
-struct dma_pool *dma_pool_create(const char *name, struct device *dev,
- size_t size, size_t align, size_t allocation);
+struct dma_pool *dma_pool_create_node(const char *name, struct device *dev,
+ size_t size, size_t align, size_t boundary, int node);
void dma_pool_destroy(struct dma_pool *pool);
@@ -35,9 +36,12 @@ struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
void dmam_pool_destroy(struct dma_pool *pool);
#else /* !CONFIG_HAS_DMA */
-static inline struct dma_pool *dma_pool_create(const char *name,
- struct device *dev, size_t size, size_t align, size_t allocation)
-{ return NULL; }
+static inline struct dma_pool *dma_pool_create_node(const char *name,
+ struct device *dev, size_t size, size_t align, size_t boundary,
+ int node)
+{
+ return NULL;
+}
static inline void dma_pool_destroy(struct dma_pool *pool) { }
static inline void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle) { return NULL; }
@@ -49,6 +53,13 @@ static inline struct dma_pool *dmam_pool_create(const char *name,
static inline void dmam_pool_destroy(struct dma_pool *pool) { }
#endif /* !CONFIG_HAS_DMA */
+static inline struct dma_pool *dma_pool_create(const char *name,
+ struct device *dev, size_t size, size_t align, size_t boundary)
+{
+ return dma_pool_create_node(name, dev, size, align, boundary,
+ NUMA_NO_NODE);
+}
+
static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle)
{
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index d8eabbf86a5b..7fa1eb3cc823 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -179,6 +179,7 @@ int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
int em_dev_update_chip_binning(struct device *dev);
int em_update_performance_limits(struct em_perf_domain *pd,
unsigned long freq_min_khz, unsigned long freq_max_khz);
+void em_adjust_cpu_capacity(unsigned int cpu);
void em_rebuild_sched_domains(void);
/**
@@ -403,6 +404,7 @@ int em_update_performance_limits(struct em_perf_domain *pd,
{
return -EINVAL;
}
+static inline void em_adjust_cpu_capacity(unsigned int cpu) {}
static inline void em_rebuild_sched_domains(void) {}
#endif
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index fc61d0205c97..f94f3fdf15fc 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -14,6 +14,7 @@
#include <linux/kmsan.h>
#include <asm/entry-common.h>
+#include <asm/syscall.h>
/*
* Define dummy _TIF work flags if not defined by the architecture or for
@@ -367,6 +368,15 @@ static __always_inline void exit_to_user_mode(void)
}
/**
+ * syscall_exit_work - Handle work before returning to user mode
+ * @regs: Pointer to current pt_regs
+ * @work: Current thread syscall work
+ *
+ * Do one-time syscall specific work.
+ */
+void syscall_exit_work(struct pt_regs *regs, unsigned long work);
+
+/**
* syscall_exit_to_user_mode_work - Handle work before returning to user mode
* @regs: Pointer to currents pt_regs
*
@@ -379,7 +389,30 @@ static __always_inline void exit_to_user_mode(void)
* make the final state transitions. Interrupts must stay disabled between
* return from this function and the invocation of exit_to_user_mode().
*/
-void syscall_exit_to_user_mode_work(struct pt_regs *regs);
+static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
+{
+ unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
+ unsigned long nr = syscall_get_nr(current, regs);
+
+ CT_WARN_ON(ct_state() != CT_STATE_KERNEL);
+
+ if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
+ if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
+ local_irq_enable();
+ }
+
+ rseq_syscall(regs);
+
+ /*
+ * Do one-time syscall specific work. If these work items are
+ * enabled, we want to run them exactly once per syscall exit with
+ * interrupts enabled.
+ */
+ if (unlikely(work & SYSCALL_WORK_EXIT))
+ syscall_exit_work(regs, work);
+ local_irq_disable_exit_to_user();
+ exit_to_user_mode_prepare(regs);
+}
/**
* syscall_exit_to_user_mode - Handle work before returning to user mode
@@ -410,7 +443,13 @@ void syscall_exit_to_user_mode_work(struct pt_regs *regs);
* exit_to_user_mode(). This function is preferred unless there is a
* compelling architectural reason to use the separate functions.
*/
-void syscall_exit_to_user_mode(struct pt_regs *regs);
+static __always_inline void syscall_exit_to_user_mode(struct pt_regs *regs)
+{
+ instrumentation_begin();
+ syscall_exit_to_user_mode_work(regs);
+ instrumentation_end();
+ exit_to_user_mode();
+}
/**
* irqentry_enter_from_user_mode - Establish state before invoking the irq handler
diff --git a/include/linux/file.h b/include/linux/file.h
index 302f11355b10..af1768d934a0 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -59,7 +59,7 @@ static inline struct fd CLONED_FD(struct file *f)
static inline void fdput(struct fd fd)
{
- if (fd.word & FDPUT_FPUT)
+ if (unlikely(fd.word & FDPUT_FPUT))
fput(fd_file(fd));
}
diff --git a/include/linux/find.h b/include/linux/find.h
index 68685714bc18..5a2c267ea7f9 100644
--- a/include/linux/find.h
+++ b/include/linux/find.h
@@ -29,6 +29,8 @@ unsigned long __find_nth_and_andnot_bit(const unsigned long *addr1, const unsign
unsigned long n);
extern unsigned long _find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size);
+unsigned long _find_first_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size);
unsigned long _find_first_and_and_bit(const unsigned long *addr1, const unsigned long *addr2,
const unsigned long *addr3, unsigned long size);
extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
@@ -348,6 +350,29 @@ unsigned long find_first_and_bit(const unsigned long *addr1,
#endif
/**
+ * find_first_andnot_bit - find the first bit set in 1st memory region and unset in 2nd
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the first set bit
+ * If no bits are set, returns >= @size.
+ */
+static __always_inline
+unsigned long find_first_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & (~*addr2) & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_andnot_bit(addr1, addr2, size);
+}
+
+/**
* find_first_and_and_bit - find the first set bit in 3 memory regions
* @addr1: The first address to base the search on
* @addr2: The second address to base the search on
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 016b0fe1536e..0db87f8e676c 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -408,6 +408,7 @@ struct kiocb {
void *private;
int ki_flags;
u16 ki_ioprio; /* See linux/ioprio.h */
+ u8 ki_write_stream;
union {
/*
* Only used for async buffered reads, where it denotes the
@@ -433,7 +434,6 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
}
struct address_space_operations {
- int (*writepage)(struct page *page, struct writeback_control *wbc);
int (*read_folio)(struct file *, struct folio *);
/* Write back some dirty pages from this mapping. */
@@ -867,6 +867,11 @@ static inline void inode_lock(struct inode *inode)
down_write(&inode->i_rwsem);
}
+static inline __must_check int inode_lock_killable(struct inode *inode)
+{
+ return down_write_killable(&inode->i_rwsem);
+}
+
static inline void inode_unlock(struct inode *inode)
{
up_write(&inode->i_rwsem);
@@ -877,6 +882,11 @@ static inline void inode_lock_shared(struct inode *inode)
down_read(&inode->i_rwsem);
}
+static inline __must_check int inode_lock_shared_killable(struct inode *inode)
+{
+ return down_read_killable(&inode->i_rwsem);
+}
+
static inline void inode_unlock_shared(struct inode *inode)
{
up_read(&inode->i_rwsem);
@@ -1307,6 +1317,7 @@ struct sb_writers {
unsigned short frozen; /* Is sb frozen? */
int freeze_kcount; /* How many kernel freeze requests? */
int freeze_ucount; /* How many userspace freeze requests? */
+ const void *freeze_owner; /* Owner of the freeze */
struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
};
@@ -1780,7 +1791,7 @@ static inline void __sb_end_write(struct super_block *sb, int level)
static inline void __sb_start_write(struct super_block *sb, int level)
{
- percpu_down_read(sb->s_writers.rw_sem + level - 1);
+ percpu_down_read_freezable(sb->s_writers.rw_sem + level - 1, true);
}
static inline bool __sb_start_write_trylock(struct super_block *sb, int level)
@@ -2071,8 +2082,18 @@ typedef bool (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
struct dir_context {
filldir_t actor;
loff_t pos;
+ /*
+ * Filesystems MUST NOT MODIFY count, but may use as a hint:
+ * 0 unknown
+ * > 0 space in buffer (assume at least one entry)
+ * INT_MAX unlimited
+ */
+ int count;
};
+/* If OR-ed with d_type, pending signals are not checked */
+#define FILLDIR_FLAG_NOINTR 0x1000
+
/*
* These flags let !MMU mmap() govern direct device mapping vs immediate
* copying more easily for MAP_PRIVATE, especially for ROM filesystems.
@@ -2186,7 +2207,7 @@ struct file_operations {
/* Supports asynchronous lock callbacks */
#define FOP_ASYNC_LOCK ((__force fop_flags_t)(1 << 6))
/* File system supports uncached read/write buffered IO */
-#define FOP_DONTCACHE ((__force fop_flags_t)(1 << 7))
+#define FOP_DONTCACHE 0 /* ((__force fop_flags_t)(1 << 7)) */
/* Wrap a directory iterator that needs exclusive inode access */
int wrap_directory_iterator(struct file *, struct dir_context *,
@@ -2269,6 +2290,7 @@ extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
* @FREEZE_HOLDER_KERNEL: kernel wants to freeze or thaw filesystem
* @FREEZE_HOLDER_USERSPACE: userspace wants to freeze or thaw filesystem
* @FREEZE_MAY_NEST: whether nesting freeze and thaw requests is allowed
+ * @FREEZE_EXCL: a freeze that can only be undone by the owner
*
* Indicate who the owner of the freeze or thaw request is and whether
* the freeze needs to be exclusive or can nest.
@@ -2282,6 +2304,7 @@ enum freeze_holder {
FREEZE_HOLDER_KERNEL = (1U << 0),
FREEZE_HOLDER_USERSPACE = (1U << 1),
FREEZE_MAY_NEST = (1U << 2),
+ FREEZE_EXCL = (1U << 3),
};
struct super_operations {
@@ -2295,9 +2318,9 @@ struct super_operations {
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
- int (*freeze_super) (struct super_block *, enum freeze_holder who);
+ int (*freeze_super) (struct super_block *, enum freeze_holder who, const void *owner);
int (*freeze_fs) (struct super_block *);
- int (*thaw_super) (struct super_block *, enum freeze_holder who);
+ int (*thaw_super) (struct super_block *, enum freeze_holder who, const void *owner);
int (*unfreeze_fs) (struct super_block *);
int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
@@ -2344,6 +2367,7 @@ struct super_operations {
#define S_CASEFOLD (1 << 15) /* Casefolded file */
#define S_VERITY (1 << 16) /* Verity file (using fs/verity/) */
#define S_KERNEL_FILE (1 << 17) /* File is in use by the kernel (eg. fs/cachefiles) */
+#define S_ANON_INODE (1 << 19) /* Inode is an anonymous inode */
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
@@ -2400,6 +2424,7 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags
#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
(inode)->i_rdev == WHITEOUT_DEV)
+#define IS_ANON_FILE(inode) ((inode)->i_flags & S_ANON_INODE)
static inline bool HAS_UNMAPPED_ID(struct mnt_idmap *idmap,
struct inode *inode)
@@ -2705,8 +2730,10 @@ extern int unregister_filesystem(struct file_system_type *);
extern int vfs_statfs(const struct path *, struct kstatfs *);
extern int user_statfs(const char __user *, struct kstatfs *);
extern int fd_statfs(int, struct kstatfs *);
-int freeze_super(struct super_block *super, enum freeze_holder who);
-int thaw_super(struct super_block *super, enum freeze_holder who);
+int freeze_super(struct super_block *super, enum freeze_holder who,
+ const void *freeze_owner);
+int thaw_super(struct super_block *super, enum freeze_holder who,
+ const void *freeze_owner);
extern __printf(2, 3)
int super_setup_bdi_name(struct super_block *sb, char *fmt, ...);
extern int super_setup_bdi(struct super_block *sb);
@@ -3475,7 +3502,8 @@ void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *);
void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
void generic_fill_statx_atomic_writes(struct kstat *stat,
unsigned int unit_min,
- unsigned int unit_max);
+ unsigned int unit_max,
+ unsigned int unit_max_opt);
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
void __inode_add_bytes(struct inode *inode, loff_t bytes);
@@ -3515,9 +3543,11 @@ extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
extern void drop_super(struct super_block *sb);
extern void drop_super_exclusive(struct super_block *sb);
-extern void iterate_supers(void (*)(struct super_block *, void *), void *);
+extern void iterate_supers(void (*f)(struct super_block *, void *), void *arg);
extern void iterate_supers_type(struct file_system_type *,
void (*)(struct super_block *, void *), void *);
+void filesystems_freeze(void);
+void filesystems_thaw(void);
extern int dcache_dir_open(struct inode *, struct file *);
extern int dcache_dir_close(struct inode *, struct file *);
diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h
index 53e566efd5fd..5a0e897cae80 100644
--- a/include/linux/fs_parser.h
+++ b/include/linux/fs_parser.h
@@ -87,14 +87,9 @@ extern int lookup_constant(const struct constant_table tbl[], const char *name,
extern const struct constant_table bool_names[];
#ifdef CONFIG_VALIDATE_FS_PARSER
-extern bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
- int low, int high, int special);
extern bool fs_validate_description(const char *name,
const struct fs_parameter_spec *desc);
#else
-static inline bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
- int low, int high, int special)
-{ return true; }
static inline bool fs_validate_description(const char *name,
const struct fs_parameter_spec *desc)
{ return true; }
@@ -125,8 +120,6 @@ static inline bool fs_validate_description(const char *name,
#define fsparam_u32(NAME, OPT) __fsparam(fs_param_is_u32, NAME, OPT, 0, NULL)
#define fsparam_u32oct(NAME, OPT) \
__fsparam(fs_param_is_u32, NAME, OPT, 0, (void *)8)
-#define fsparam_u32hex(NAME, OPT) \
- __fsparam(fs_param_is_u32_hex, NAME, OPT, 0, (void *)16)
#define fsparam_s32(NAME, OPT) __fsparam(fs_param_is_s32, NAME, OPT, 0, NULL)
#define fsparam_u64(NAME, OPT) __fsparam(fs_param_is_u64, NAME, OPT, 0, NULL)
#define fsparam_enum(NAME, OPT, array) __fsparam(fs_param_is_enum, NAME, OPT, 0, array)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index fbabc3d848b3..95851a6fb942 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -569,8 +569,6 @@ static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
#ifdef CONFIG_STACK_TRACER
-extern int stack_tracer_enabled;
-
int stack_trace_sysctl(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
@@ -1298,16 +1296,9 @@ static inline void unpause_graph_tracing(void) { }
#ifdef CONFIG_TRACING
enum ftrace_dump_mode;
-#define MAX_TRACER_SIZE 100
-extern char ftrace_dump_on_oops[];
extern int ftrace_dump_on_oops_enabled(void);
-extern int tracepoint_printk;
extern void disable_trace_on_warning(void);
-extern int __disable_trace_on_warning;
-
-int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
#else /* CONFIG_TRACING */
static inline void disable_trace_on_warning(void) { }
diff --git a/include/linux/futex.h b/include/linux/futex.h
index b70df27d7e85..005b040c4791 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -4,11 +4,11 @@
#include <linux/sched.h>
#include <linux/ktime.h>
+#include <linux/mm_types.h>
#include <uapi/linux/futex.h>
struct inode;
-struct mm_struct;
struct task_struct;
/*
@@ -34,6 +34,7 @@ union futex_key {
u64 i_seq;
unsigned long pgoff;
unsigned int offset;
+ /* unsigned int node; */
} shared;
struct {
union {
@@ -42,11 +43,13 @@ union futex_key {
};
unsigned long address;
unsigned int offset;
+ /* unsigned int node; */
} private;
struct {
u64 ptr;
unsigned long word;
unsigned int offset;
+ unsigned int node; /* NOT hashed! */
} both;
};
@@ -77,7 +80,25 @@ void futex_exec_release(struct task_struct *tsk);
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
u32 __user *uaddr2, u32 val2, u32 val3);
-#else
+int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4);
+
+#ifdef CONFIG_FUTEX_PRIVATE_HASH
+int futex_hash_allocate_default(void);
+void futex_hash_free(struct mm_struct *mm);
+
+static inline void futex_mm_init(struct mm_struct *mm)
+{
+ RCU_INIT_POINTER(mm->futex_phash, NULL);
+ mutex_init(&mm->futex_hash_lock);
+}
+
+#else /* !CONFIG_FUTEX_PRIVATE_HASH */
+static inline int futex_hash_allocate_default(void) { return 0; }
+static inline void futex_hash_free(struct mm_struct *mm) { }
+static inline void futex_mm_init(struct mm_struct *mm) { }
+#endif /* CONFIG_FUTEX_PRIVATE_HASH */
+
+#else /* !CONFIG_FUTEX */
static inline void futex_init_task(struct task_struct *tsk) { }
static inline void futex_exit_recursive(struct task_struct *tsk) { }
static inline void futex_exit_release(struct task_struct *tsk) { }
@@ -88,6 +109,17 @@ static inline long do_futex(u32 __user *uaddr, int op, u32 val,
{
return -EINVAL;
}
+static inline int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4)
+{
+ return -EINVAL;
+}
+static inline int futex_hash_allocate_default(void)
+{
+ return 0;
+}
+static inline void futex_hash_free(struct mm_struct *mm) { }
+static inline void futex_mm_init(struct mm_struct *mm) { }
+
#endif
#endif
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 8adc8e9cb4a7..f0b1982da0cc 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -181,6 +181,8 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
enum gpiod_flags flags,
const char *label);
+bool gpiod_is_equal(struct gpio_desc *desc, struct gpio_desc *other);
+
#else /* CONFIG_GPIOLIB */
#include <linux/bug.h>
@@ -548,6 +550,13 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
return ERR_PTR(-ENOSYS);
}
+static inline bool
+gpiod_is_equal(struct gpio_desc *desc, struct gpio_desc *other)
+{
+ WARN_ON(desc || other);
+ return false;
+}
+
#endif /* CONFIG_GPIOLIB */
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_HTE)
@@ -588,7 +597,7 @@ struct gpio_desc *devm_fwnode_gpiod_get(struct device *dev,
struct acpi_gpio_params {
unsigned int crs_entry_index;
- unsigned int line_index;
+ unsigned short line_index;
bool active_low;
};
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 4c0294a9104d..b53233051bee 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -287,8 +287,9 @@ struct gpio_irq_chip {
/**
* @first:
*
- * Required for static IRQ allocation. If set, irq_domain_add_simple()
- * will allocate and map all IRQs during initialization.
+ * Required for static IRQ allocation. If set,
+ * irq_domain_create_simple() will allocate and map all IRQs
+ * during initialization.
*/
unsigned int first;
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
index dd100e849f5e..9a7683d79a4b 100644
--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -73,6 +73,14 @@ static inline void *kmap_local_page(struct page *page)
return __kmap_local_page_prot(page, kmap_prot);
}
+static inline void *kmap_local_page_try_from_panic(struct page *page)
+{
+ if (!PageHighMem(page))
+ return page_address(page);
+ /* If the page is in HighMem, it's not safe to kmap it.*/
+ return NULL;
+}
+
static inline void *kmap_local_folio(struct folio *folio, size_t offset)
{
struct page *page = folio_page(folio, offset / PAGE_SIZE);
@@ -180,6 +188,11 @@ static inline void *kmap_local_page(struct page *page)
return page_address(page);
}
+static inline void *kmap_local_page_try_from_panic(struct page *page)
+{
+ return page_address(page);
+}
+
static inline void *kmap_local_folio(struct folio *folio, size_t offset)
{
return page_address(&folio->page) + offset;
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 5c6bea81a90e..c698f8415675 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -461,7 +461,7 @@ static inline void memcpy_from_folio(char *to, struct folio *folio,
const char *from = kmap_local_folio(folio, offset);
size_t chunk = len;
- if (folio_test_highmem(folio) &&
+ if (folio_test_partial_kmap(folio) &&
chunk > PAGE_SIZE - offset_in_page(offset))
chunk = PAGE_SIZE - offset_in_page(offset);
memcpy(to, from, chunk);
@@ -489,7 +489,7 @@ static inline void memcpy_to_folio(struct folio *folio, size_t offset,
char *to = kmap_local_folio(folio, offset);
size_t chunk = len;
- if (folio_test_highmem(folio) &&
+ if (folio_test_partial_kmap(folio) &&
chunk > PAGE_SIZE - offset_in_page(offset))
chunk = PAGE_SIZE - offset_in_page(offset);
memcpy(to, from, chunk);
@@ -522,7 +522,7 @@ static inline __must_check void *folio_zero_tail(struct folio *folio,
{
size_t len = folio_size(folio) - offset;
- if (folio_test_highmem(folio)) {
+ if (folio_test_partial_kmap(folio)) {
size_t max = PAGE_SIZE - offset_in_page(offset);
while (len > max) {
@@ -560,7 +560,7 @@ static inline void folio_fill_tail(struct folio *folio, size_t offset,
VM_BUG_ON(offset + len > folio_size(folio));
- if (folio_test_highmem(folio)) {
+ if (folio_test_partial_kmap(folio)) {
size_t max = PAGE_SIZE - offset_in_page(offset);
while (len > max) {
@@ -597,7 +597,7 @@ static inline size_t memcpy_from_file_folio(char *to, struct folio *folio,
size_t offset = offset_in_folio(folio, pos);
char *from = kmap_local_folio(folio, offset);
- if (folio_test_highmem(folio)) {
+ if (folio_test_partial_kmap(folio)) {
offset = offset_in_page(offset);
len = min_t(size_t, len, PAGE_SIZE - offset);
} else
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 8f3ac832ee7f..4861a7e304bb 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -275,6 +275,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
bool is_hugetlb_entry_migration(pte_t pte);
bool is_hugetlb_entry_hwpoisoned(pte_t pte);
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
+void fixup_hugetlb_reservations(struct vm_area_struct *vma);
#else /* !CONFIG_HUGETLB_PAGE */
@@ -468,6 +469,10 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
+static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
+{
+}
+
#endif /* !CONFIG_HUGETLB_PAGE */
#ifndef pgd_write
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 0bae61a15b60..8e29cb4e6a01 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -32,6 +32,9 @@ static inline void ima_appraise_parse_cmdline(void) {}
#ifdef CONFIG_IMA_KEXEC
extern void ima_add_kexec_buffer(struct kimage *image);
+extern void ima_kexec_post_load(struct kimage *image);
+#else
+static inline void ima_kexec_post_load(struct kimage *image) {}
#endif
#else
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index c782a74d2a30..51b6484c0493 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -140,7 +140,7 @@ extern irqreturn_t no_action(int cpl, void *dev_id);
/*
* If a (PCI) device interrupt is not connected we set dev->irq to
* IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
- * can distingiush that case from other error returns.
+ * can distinguish that case from other error returns.
*
* 0x80000000 is guaranteed to be outside the available range of interrupts
* and easy to distinguish from other possible incorrect values.
diff --git a/include/linux/io.h b/include/linux/io.h
index 6a6bc4d46d0a..0642c7ee41db 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -183,4 +183,25 @@ static inline void arch_io_free_memtype_wc(resource_size_t base,
int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
resource_size_t size);
+#ifdef CONFIG_STRICT_DEVMEM
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+ u64 from = ((u64)pfn) << PAGE_SHIFT;
+ u64 to = from + size;
+ u64 cursor = from;
+
+ while (cursor < to) {
+ if (!devmem_is_allowed(pfn))
+ return 0;
+ cursor += PAGE_SIZE;
+ pfn++;
+ }
+ return 1;
+}
+#else
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+ return 1;
+}
+#endif
#endif /* _LINUX_IO_H */
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
index 0634a3de1782..53408124c1e5 100644
--- a/include/linux/io_uring/cmd.h
+++ b/include/linux/io_uring/cmd.h
@@ -140,6 +140,15 @@ static inline struct io_uring_cmd_data *io_uring_cmd_get_async_data(struct io_ur
return cmd_to_io_kiocb(cmd)->async_data;
}
+/*
+ * Return uring_cmd's context reference as its context handle for driver to
+ * track per-context resource, such as registered kernel IO buffer
+ */
+static inline void *io_uring_cmd_ctx_handle(struct io_uring_cmd *cmd)
+{
+ return cmd_to_io_kiocb(cmd)->ctx;
+}
+
int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
void (*release)(void *), unsigned int index,
unsigned int issue_flags);
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index b44d201520d8..2922635986f5 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -40,8 +40,6 @@ enum io_uring_cmd_flags {
IO_URING_F_TASK_DEAD = (1 << 13),
};
-struct io_zcrx_ifq;
-
struct io_wq_work_node {
struct io_wq_work_node *next;
};
@@ -343,7 +341,6 @@ struct io_ring_ctx {
unsigned cached_cq_tail;
unsigned cq_entries;
struct io_ev_fd __rcu *io_ev_fd;
- unsigned cq_extra;
void *cq_wait_arg;
size_t cq_wait_size;
@@ -394,7 +391,8 @@ struct io_ring_ctx {
struct wait_queue_head poll_wq;
struct io_restriction restrictions;
- struct io_zcrx_ifq *ifq;
+ /* Stores zcrx object pointers of type struct io_zcrx_ifq */
+ struct xarray zcrx_ctxs;
u32 pers_next;
struct xarray personalities;
@@ -418,6 +416,7 @@ struct io_ring_ctx {
struct callback_head poll_wq_task_work;
struct list_head defer_list;
+ unsigned nr_drained;
struct io_alloc_cache msg_cache;
spinlock_t msg_lock;
@@ -436,6 +435,7 @@ struct io_ring_ctx {
/* protected by ->completion_lock */
unsigned evfd_last_cq_tail;
+ unsigned nr_req_allocated;
/*
* Protection for resize vs mmap races - both the mmap and resize
@@ -448,8 +448,6 @@ struct io_ring_ctx {
struct io_mapped_region ring_region;
/* used for optimised request parameter and wait argument passing */
struct io_mapped_region param_region;
- /* just one zcrx per ring for now, will move to io_zcrx_ifq eventually */
- struct io_mapped_region zcrx_region;
};
/*
@@ -653,8 +651,7 @@ struct io_kiocb {
u8 iopoll_completed;
/*
* Can be either a fixed buffer index, or used with provided buffers.
- * For the latter, before issue it points to the buffer group ID,
- * and after selection it points to the buffer ID itself.
+ * For the latter, it points to the selected buffer ID.
*/
u16 buf_index;
@@ -713,7 +710,7 @@ struct io_kiocb {
const struct cred *creds;
struct io_wq_work work;
- struct {
+ struct io_big_cqe {
u64 extra1;
u64 extra2;
} big_cqe;
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 3a8d35d41fda..15cdadace993 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -872,6 +872,10 @@ extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
+int iommu_map_nosync(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
+int iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
+ size_t size);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size);
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
diff --git a/include/linux/irq.h b/include/linux/irq.h
index dd5df1e2d032..1d6b606a81ef 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -597,7 +597,6 @@ enum {
struct irqaction;
extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
-extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
extern void irq_cpu_online(void);
@@ -700,7 +699,7 @@ extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
extern int noirqdebug_setup(char *str);
/* Checks whether the interrupt can be requested by request_irq(): */
-extern int can_request_irq(unsigned int irq, unsigned long irqflags);
+extern bool can_request_irq(unsigned int irq, unsigned long irqflags);
/* Dummy irq-chip implementations: */
extern struct irq_chip no_irq_chip;
@@ -1222,31 +1221,6 @@ static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
#define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)
-#ifdef CONFIG_SMP
-static inline void irq_gc_lock(struct irq_chip_generic *gc)
-{
- raw_spin_lock(&gc->lock);
-}
-
-static inline void irq_gc_unlock(struct irq_chip_generic *gc)
-{
- raw_spin_unlock(&gc->lock);
-}
-#else
-static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
-static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
-#endif
-
-/*
- * The irqsave variants are for usage in non interrupt code. Do not use
- * them in irq_chip callbacks. Use irq_gc_lock() instead.
- */
-#define irq_gc_lock_irqsave(gc, flags) \
- raw_spin_lock_irqsave(&(gc)->lock, flags)
-
-#define irq_gc_unlock_irqrestore(gc, flags) \
- raw_spin_unlock_irqrestore(&(gc)->lock, flags)
-
static inline void irq_reg_writel(struct irq_chip_generic *gc,
u32 val, int reg_offset)
{
diff --git a/include/linux/irqchip/irq-msi-lib.h b/include/linux/irqchip/irq-msi-lib.h
new file mode 100644
index 000000000000..dd8d1d138544
--- /dev/null
+++ b/include/linux/irqchip/irq-msi-lib.h
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2022 Linutronix GmbH
+// Copyright (C) 2022 Intel
+
+#ifndef _IRQCHIP_IRQ_MSI_LIB_H
+#define _IRQCHIP_IRQ_MSI_LIB_H
+
+#include <linux/bits.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+
+#ifdef CONFIG_PCI_MSI
+#define MATCH_PCI_MSI BIT(DOMAIN_BUS_PCI_MSI)
+#else
+#define MATCH_PCI_MSI (0)
+#endif
+
+#define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI)
+
+int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token);
+
+bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent,
+ struct msi_domain_info *info);
+
+#endif /* _IRQCHIP_IRQ_MSI_LIB_H */
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index bb7111105296..7387d183029b 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -1,30 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * irq_domain - IRQ translation domains
+ * irq_domain - IRQ Translation Domains
*
- * Translation infrastructure between hw and linux irq numbers. This is
- * helpful for interrupt controllers to implement mapping between hardware
- * irq numbers and the Linux irq number space.
- *
- * irq_domains also have hooks for translating device tree or other
- * firmware interrupt representations into a hardware irq number that
- * can be mapped back to a Linux irq number without any extra platform
- * support code.
- *
- * Interrupt controller "domain" data structure. This could be defined as a
- * irq domain controller. That is, it handles the mapping between hardware
- * and virtual interrupt numbers for a given interrupt domain. The domain
- * structure is generally created by the PIC code for a given PIC instance
- * (though a domain can cover more than one PIC if they have a flat number
- * model). It's the domain callbacks that are responsible for setting the
- * irq_chip on a given irq_desc after it's been mapped.
- *
- * The host code and data structures use a fwnode_handle pointer to
- * identify the domain. In some cases, and in order to preserve source
- * code compatibility, this fwnode pointer is "upgraded" to a DT
- * device_node. For those firmware infrastructures that do not provide
- * a unique identifier for an interrupt controller, the irq_domain
- * code offers a fwnode allocator.
+ * See Documentation/core-api/irq/irq-domain.rst for the details.
*/
#ifndef _LINUX_IRQDOMAIN_H
@@ -61,9 +39,9 @@ struct msi_parent_ops;
* pass a device-specific description of an interrupt.
*/
struct irq_fwspec {
- struct fwnode_handle *fwnode;
- int param_count;
- u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS];
+ struct fwnode_handle *fwnode;
+ int param_count;
+ u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS];
};
/* Conversion function from of_phandle_args fields to fwspec */
@@ -72,26 +50,26 @@ void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
/**
* struct irq_domain_ops - Methods for irq_domain objects
- * @match: Match an interrupt controller device node to a domain, returns
- * 1 on a match
- * @select: Match an interrupt controller fw specification. It is more generic
- * than @match as it receives a complete struct irq_fwspec. Therefore,
- * @select is preferred if provided. Returns 1 on a match.
- * @map: Create or update a mapping between a virtual irq number and a hw
- * irq number. This is called only once for a given mapping.
- * @unmap: Dispose of such a mapping
- * @xlate: Given a device tree node and interrupt specifier, decode
- * the hardware irq number and linux irq type value.
- * @alloc: Allocate @nr_irqs interrupts starting from @virq.
- * @free: Free @nr_irqs interrupts starting from @virq.
- * @activate: Activate one interrupt in HW (@irqd). If @reserve is set, only
- * reserve the vector. If unset, assign the vector (called from
- * request_irq()).
- * @deactivate: Disarm one interrupt (@irqd).
- * @translate: Given @fwspec, decode the hardware irq number (@out_hwirq) and
- * linux irq type value (@out_type). This is a generalised @xlate
- * (over struct irq_fwspec) and is preferred if provided.
- * @debug_show: For domains to show specific data for an interrupt in debugfs.
+ * @match: Match an interrupt controller device node to a domain, returns
+ * 1 on a match
+ * @select: Match an interrupt controller fw specification. It is more generic
+ * than @match as it receives a complete struct irq_fwspec. Therefore,
+ * @select is preferred if provided. Returns 1 on a match.
+ * @map: Create or update a mapping between a virtual irq number and a hw
+ * irq number. This is called only once for a given mapping.
+ * @unmap: Dispose of such a mapping
+ * @xlate: Given a device tree node and interrupt specifier, decode
+ * the hardware irq number and linux irq type value.
+ * @alloc: Allocate @nr_irqs interrupts starting from @virq.
+ * @free: Free @nr_irqs interrupts starting from @virq.
+ * @activate: Activate one interrupt in HW (@irqd). If @reserve is set, only
+ * reserve the vector. If unset, assign the vector (called from
+ * request_irq()).
+ * @deactivate: Disarm one interrupt (@irqd).
+ * @translate: Given @fwspec, decode the hardware irq number (@out_hwirq) and
+ * linux irq type value (@out_type). This is a generalised @xlate
+ * (over struct irq_fwspec) and is preferred if provided.
+ * @debug_show: For domains to show specific data for an interrupt in debugfs.
*
* Functions below are provided by the driver and called whenever a new mapping
* is created or an old mapping is disposed. The driver can then proceed to
@@ -99,29 +77,29 @@ void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
* to setup the irq_desc when returning from map().
*/
struct irq_domain_ops {
- int (*match)(struct irq_domain *d, struct device_node *node,
- enum irq_domain_bus_token bus_token);
- int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec,
- enum irq_domain_bus_token bus_token);
- int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
- void (*unmap)(struct irq_domain *d, unsigned int virq);
- int (*xlate)(struct irq_domain *d, struct device_node *node,
- const u32 *intspec, unsigned int intsize,
- unsigned long *out_hwirq, unsigned int *out_type);
+ int (*match)(struct irq_domain *d, struct device_node *node,
+ enum irq_domain_bus_token bus_token);
+ int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token);
+ int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
+ void (*unmap)(struct irq_domain *d, unsigned int virq);
+ int (*xlate)(struct irq_domain *d, struct device_node *node,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/* extended V2 interfaces to support hierarchy irq_domains */
- int (*alloc)(struct irq_domain *d, unsigned int virq,
- unsigned int nr_irqs, void *arg);
- void (*free)(struct irq_domain *d, unsigned int virq,
- unsigned int nr_irqs);
- int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve);
- void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
- int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
- unsigned long *out_hwirq, unsigned int *out_type);
+ int (*alloc)(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *arg);
+ void (*free)(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs);
+ int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve);
+ void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
+ int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
#endif
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
- void (*debug_show)(struct seq_file *m, struct irq_domain *d,
- struct irq_data *irqd, int ind);
+ void (*debug_show)(struct seq_file *m, struct irq_domain *d,
+ struct irq_data *irqd, int ind);
#endif
};
@@ -231,6 +209,9 @@ enum {
/* Irq domain must destroy generic chips when removed */
IRQ_DOMAIN_FLAG_DESTROY_GC = (1 << 10),
+ /* Address and data pair is mutable when irq_set_affinity() */
+ IRQ_DOMAIN_FLAG_MSI_IMMUTABLE = (1 << 11),
+
/*
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
* for implementation specific purposes and ignored by the
@@ -244,8 +225,7 @@ static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d)
return to_of_node(d->fwnode);
}
-static inline void irq_domain_set_pm_device(struct irq_domain *d,
- struct device *dev)
+static inline void irq_domain_set_pm_device(struct irq_domain *d, struct device *dev)
{
if (d)
d->pm_dev = dev;
@@ -261,14 +241,12 @@ enum {
IRQCHIP_FWNODE_NAMED_ID,
};
-static inline
-struct fwnode_handle *irq_domain_alloc_named_fwnode(const char *name)
+static inline struct fwnode_handle *irq_domain_alloc_named_fwnode(const char *name)
{
return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED, 0, name, NULL);
}
-static inline
-struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id)
+static inline struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id)
{
return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED_ID, id, name,
NULL);
@@ -281,6 +259,8 @@ static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa)
void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
+DEFINE_FREE(irq_domain_free_fwnode, struct fwnode_handle *, if (_T) irq_domain_free_fwnode(_T))
+
struct irq_domain_chip_generic_info;
/**
@@ -333,36 +313,19 @@ struct irq_domain *irq_domain_instantiate(const struct irq_domain_info *info);
struct irq_domain *devm_irq_domain_instantiate(struct device *dev,
const struct irq_domain_info *info);
-struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode,
- unsigned int size,
+struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode, unsigned int size,
unsigned int first_irq,
- const struct irq_domain_ops *ops,
- void *host_data);
-struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
- unsigned int size,
- unsigned int first_irq,
- irq_hw_number_t first_hwirq,
- const struct irq_domain_ops *ops,
- void *host_data);
-struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode,
- unsigned int size,
- unsigned int first_irq,
- irq_hw_number_t first_hwirq,
- const struct irq_domain_ops *ops,
- void *host_data);
+ const struct irq_domain_ops *ops, void *host_data);
+struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode, unsigned int size,
+ unsigned int first_irq, irq_hw_number_t first_hwirq,
+ const struct irq_domain_ops *ops, void *host_data);
struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token);
void irq_set_default_domain(struct irq_domain *domain);
struct irq_domain *irq_get_default_domain(void);
-int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
- irq_hw_number_t hwirq, int node,
+int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, irq_hw_number_t hwirq, int node,
const struct irq_affinity_desc *affinity);
-static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
-{
- return node ? &node->fwnode : NULL;
-}
-
extern const struct fwnode_operations irqchip_fwnode_ops;
static inline bool is_fwnode_irqchip(const struct fwnode_handle *fwnode)
@@ -370,12 +333,10 @@ static inline bool is_fwnode_irqchip(const struct fwnode_handle *fwnode)
return fwnode && fwnode->ops == &irqchip_fwnode_ops;
}
-void irq_domain_update_bus_token(struct irq_domain *domain,
- enum irq_domain_bus_token bus_token);
+void irq_domain_update_bus_token(struct irq_domain *domain, enum irq_domain_bus_token bus_token);
-static inline
-struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
- enum irq_domain_bus_token bus_token)
+static inline struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+ enum irq_domain_bus_token bus_token)
{
struct irq_fwspec fwspec = {
.fwnode = fwnode,
@@ -387,7 +348,7 @@ struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
static inline struct irq_domain *irq_find_matching_host(struct device_node *node,
enum irq_domain_bus_token bus_token)
{
- return irq_find_matching_fwnode(of_node_to_fwnode(node), bus_token);
+ return irq_find_matching_fwnode(of_fwnode_handle(node), bus_token);
}
static inline struct irq_domain *irq_find_host(struct device_node *node)
@@ -401,128 +362,92 @@ static inline struct irq_domain *irq_find_host(struct device_node *node)
return d;
}
-static inline struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
- unsigned int size,
- unsigned int first_irq,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return irq_domain_create_simple(of_node_to_fwnode(of_node), size, first_irq, ops, host_data);
-}
-
-/**
- * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
- * @of_node: pointer to interrupt controller's device tree node.
- * @size: Number of interrupts in the domain.
- * @ops: map/unmap domain callbacks
- * @host_data: Controller private data pointer
- */
-static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
- unsigned int size,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- struct irq_domain_info info = {
- .fwnode = of_node_to_fwnode(of_node),
- .size = size,
- .hwirq_max = size,
- .ops = ops,
- .host_data = host_data,
- };
- struct irq_domain *d;
-
- d = irq_domain_instantiate(&info);
- return IS_ERR(d) ? NULL : d;
-}
-
#ifdef CONFIG_IRQ_DOMAIN_NOMAP
-static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
- unsigned int max_irq,
- const struct irq_domain_ops *ops,
- void *host_data)
+static inline struct irq_domain *irq_domain_create_nomap(struct fwnode_handle *fwnode,
+ unsigned int max_irq,
+ const struct irq_domain_ops *ops,
+ void *host_data)
{
- struct irq_domain_info info = {
- .fwnode = of_node_to_fwnode(of_node),
+ const struct irq_domain_info info = {
+ .fwnode = fwnode,
.hwirq_max = max_irq,
.direct_max = max_irq,
.ops = ops,
.host_data = host_data,
};
- struct irq_domain *d;
+ struct irq_domain *d = irq_domain_instantiate(&info);
- d = irq_domain_instantiate(&info);
return IS_ERR(d) ? NULL : d;
}
unsigned int irq_create_direct_mapping(struct irq_domain *domain);
#endif
-static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- struct irq_domain_info info = {
- .fwnode = of_node_to_fwnode(of_node),
- .hwirq_max = ~0U,
- .ops = ops,
- .host_data = host_data,
- };
- struct irq_domain *d;
-
- d = irq_domain_instantiate(&info);
- return IS_ERR(d) ? NULL : d;
-}
-
+/**
+ * irq_domain_create_linear - Allocate and register a linear revmap irq_domain.
+ * @fwnode: pointer to interrupt controller's FW node.
+ * @size: Number of interrupts in the domain.
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * Returns: Newly created irq_domain
+ */
static inline struct irq_domain *irq_domain_create_linear(struct fwnode_handle *fwnode,
- unsigned int size,
- const struct irq_domain_ops *ops,
- void *host_data)
+ unsigned int size,
+ const struct irq_domain_ops *ops,
+ void *host_data)
{
- struct irq_domain_info info = {
+ const struct irq_domain_info info = {
.fwnode = fwnode,
.size = size,
.hwirq_max = size,
.ops = ops,
.host_data = host_data,
};
- struct irq_domain *d;
+ struct irq_domain *d = irq_domain_instantiate(&info);
- d = irq_domain_instantiate(&info);
return IS_ERR(d) ? NULL : d;
}
static inline struct irq_domain *irq_domain_create_tree(struct fwnode_handle *fwnode,
- const struct irq_domain_ops *ops,
- void *host_data)
+ const struct irq_domain_ops *ops,
+ void *host_data)
{
- struct irq_domain_info info = {
+ const struct irq_domain_info info = {
.fwnode = fwnode,
.hwirq_max = ~0,
.ops = ops,
.host_data = host_data,
};
- struct irq_domain *d;
+ struct irq_domain *d = irq_domain_instantiate(&info);
- d = irq_domain_instantiate(&info);
return IS_ERR(d) ? NULL : d;
}
void irq_domain_remove(struct irq_domain *domain);
-int irq_domain_associate(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq);
-void irq_domain_associate_many(struct irq_domain *domain,
- unsigned int irq_base,
+int irq_domain_associate(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq);
+void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
irq_hw_number_t hwirq_base, int count);
-unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
- irq_hw_number_t hwirq,
+unsigned int irq_create_mapping_affinity(struct irq_domain *domain, irq_hw_number_t hwirq,
const struct irq_affinity_desc *affinity);
unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec);
void irq_dispose_mapping(unsigned int virq);
-static inline unsigned int irq_create_mapping(struct irq_domain *domain,
- irq_hw_number_t hwirq)
+/**
+ * irq_create_mapping - Map a hardware interrupt into linux irq space
+ * @domain: domain owning this hardware interrupt or NULL for default domain
+ * @hwirq: hardware irq number in that domain space
+ *
+ * Only one mapping per hardware interrupt is permitted.
+ *
+ * If the sense/trigger is to be specified, set_irq_type() should be called
+ * on the number returned from that call.
+ *
+ * Returns: Linux irq number or 0 on error
+ */
+static inline unsigned int irq_create_mapping(struct irq_domain *domain, irq_hw_number_t hwirq)
{
return irq_create_mapping_affinity(domain, hwirq, NULL);
}
@@ -531,6 +456,13 @@ struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq,
unsigned int *irq);
+/**
+ * irq_resolve_mapping - Find a linux irq from a hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * Returns: Interrupt descriptor
+ */
static inline struct irq_desc *irq_resolve_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
@@ -539,8 +471,10 @@ static inline struct irq_desc *irq_resolve_mapping(struct irq_domain *domain,
/**
* irq_find_mapping() - Find a linux irq from a hw irq number.
- * @domain: domain owning this hardware interrupt
- * @hwirq: hardware irq number in that domain space
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * Returns: Linux irq number or 0 if not found
*/
static inline unsigned int irq_find_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq)
@@ -553,107 +487,115 @@ static inline unsigned int irq_find_mapping(struct irq_domain *domain,
return 0;
}
-static inline unsigned int irq_linear_revmap(struct irq_domain *domain,
- irq_hw_number_t hwirq)
-{
- return irq_find_mapping(domain, hwirq);
-}
-
extern const struct irq_domain_ops irq_domain_simple_ops;
/* stock xlate functions */
int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type);
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type);
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type);
-
-int irq_domain_translate_twocell(struct irq_domain *d,
- struct irq_fwspec *fwspec,
- unsigned long *out_hwirq,
- unsigned int *out_type);
-
-int irq_domain_translate_onecell(struct irq_domain *d,
- struct irq_fwspec *fwspec,
- unsigned long *out_hwirq,
- unsigned int *out_type);
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
+int irq_domain_xlate_twothreecell(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
+
+int irq_domain_translate_onecell(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
+int irq_domain_translate_twocell(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
+int irq_domain_translate_twothreecell(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
/* IPI functions */
int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest);
int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest);
/* V2 interfaces to support hierarchy IRQ domains. */
-struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
- unsigned int virq);
-void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
- irq_hw_number_t hwirq,
- const struct irq_chip *chip,
- void *chip_data, irq_flow_handler_t handler,
+struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, unsigned int virq);
+void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq,
+ const struct irq_chip *chip, void *chip_data, irq_flow_handler_t handler,
void *handler_data, const char *handler_name);
void irq_domain_reset_irq_data(struct irq_data *irq_data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
-struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
- unsigned int flags,
- unsigned int size,
- struct fwnode_handle *fwnode,
- const struct irq_domain_ops *ops,
- void *host_data);
-
-static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
- unsigned int flags,
- unsigned int size,
- struct device_node *node,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return irq_domain_create_hierarchy(parent, flags, size,
- of_node_to_fwnode(node),
- ops, host_data);
-}
-
-int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
- unsigned int nr_irqs, int node, void *arg,
- bool realloc,
+/**
+ * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
+ * @parent: Parent irq domain to associate with the new domain
+ * @flags: Irq domain flags associated to the domain
+ * @size: Size of the domain. See below
+ * @fwnode: Optional fwnode of the interrupt controller
+ * @ops: Pointer to the interrupt domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * If @size is 0 a tree domain is created, otherwise a linear domain.
+ *
+ * If successful the parent is associated to the new domain and the
+ * domain flags are set.
+ *
+ * Returns: A pointer to IRQ domain, or %NULL on failure.
+ */
+static inline struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
+ unsigned int flags, unsigned int size,
+ struct fwnode_handle *fwnode,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ const struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .size = size,
+ .hwirq_max = size ? : ~0U,
+ .ops = ops,
+ .host_data = host_data,
+ .domain_flags = flags,
+ .parent = parent,
+ };
+ struct irq_domain *d = irq_domain_instantiate(&info);
+
+ return IS_ERR(d) ? NULL : d;
+}
+
+int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned int nr_irqs,
+ int node, void *arg, bool realloc,
const struct irq_affinity_desc *affinity);
void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
int irq_domain_activate_irq(struct irq_data *irq_data, bool early);
void irq_domain_deactivate_irq(struct irq_data *irq_data);
-static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
- unsigned int nr_irqs, int node, void *arg)
+/**
+ * irq_domain_alloc_irqs - Allocate IRQs from domain
+ * @domain: domain to allocate from
+ * @nr_irqs: number of IRQs to allocate
+ * @node: NUMA node id for memory allocation
+ * @arg: domain specific argument
+ *
+ * See __irq_domain_alloc_irqs()' documentation.
+ */
+static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs,
+ int node, void *arg)
{
- return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false,
- NULL);
+ return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false, NULL);
}
-int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
- unsigned int virq,
- irq_hw_number_t hwirq,
- const struct irq_chip *chip,
+int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq, const struct irq_chip *chip,
void *chip_data);
-void irq_domain_free_irqs_common(struct irq_domain *domain,
- unsigned int virq,
+void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs);
-void irq_domain_free_irqs_top(struct irq_domain *domain,
- unsigned int virq, unsigned int nr_irqs);
+void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs);
int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg);
int irq_domain_pop_irq(struct irq_domain *domain, int virq);
-int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
- unsigned int irq_base,
+int irq_domain_alloc_irqs_parent(struct irq_domain *domain, unsigned int irq_base,
unsigned int nr_irqs, void *arg);
-void irq_domain_free_irqs_parent(struct irq_domain *domain,
- unsigned int irq_base,
+void irq_domain_free_irqs_parent(struct irq_domain *domain, unsigned int irq_base,
unsigned int nr_irqs);
-int irq_domain_disconnect_hierarchy(struct irq_domain *domain,
- unsigned int virq);
+int irq_domain_disconnect_hierarchy(struct irq_domain *domain, unsigned int virq);
static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{
@@ -662,8 +604,7 @@ static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
static inline bool irq_domain_is_ipi(struct irq_domain *domain)
{
- return domain->flags &
- (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE);
+ return domain->flags & (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE);
}
static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain)
@@ -691,15 +632,18 @@ static inline bool irq_domain_is_msi_device(struct irq_domain *domain)
return domain->flags & IRQ_DOMAIN_FLAG_MSI_DEVICE;
}
+static inline bool irq_domain_is_msi_immutable(struct irq_domain *domain)
+{
+ return domain->flags & IRQ_DOMAIN_FLAG_MSI_IMMUTABLE;
+}
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
-static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
- unsigned int nr_irqs, int node, void *arg)
+static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs,
+ int node, void *arg)
{
return -1;
}
-static inline void irq_domain_free_irqs(unsigned int virq,
- unsigned int nr_irqs) { }
+static inline void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) { }
static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{
@@ -739,8 +683,7 @@ static inline bool irq_domain_is_msi_device(struct irq_domain *domain)
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
#ifdef CONFIG_GENERIC_MSI_IRQ
-int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq,
- unsigned int type);
+int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq, unsigned int type);
void msi_device_domain_free_wired(struct irq_domain *domain, unsigned int virq);
#else
static inline int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq,
@@ -755,10 +698,50 @@ static inline void msi_device_domain_free_wired(struct irq_domain *domain, unsig
}
#endif
+/* Deprecated functions. Will be removed in the merge window */
+static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
+{
+ return node ? &node->fwnode : NULL;
+}
+
+static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(of_node),
+ .hwirq_max = ~0U,
+ .ops = ops,
+ .host_data = host_data,
+ };
+ struct irq_domain *d;
+
+ d = irq_domain_instantiate(&info);
+ return IS_ERR(d) ? NULL : d;
+}
+
+static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
+ unsigned int size,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(of_node),
+ .size = size,
+ .hwirq_max = size,
+ .ops = ops,
+ .host_data = host_data,
+ };
+ struct irq_domain *d;
+
+ d = irq_domain_instantiate(&info);
+ return IS_ERR(d) ? NULL : d;
+}
+
#else /* CONFIG_IRQ_DOMAIN */
static inline void irq_dispose_mapping(unsigned int virq) { }
-static inline struct irq_domain *irq_find_matching_fwnode(
- struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token)
+static inline struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+ enum irq_domain_bus_token bus_token)
{
return NULL;
}
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 0ea8c9887429..91b20788273d 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -59,7 +59,7 @@
/* LATCH is used in the interval timer and ftape setup. */
#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
-extern int register_refined_jiffies(long clock_tick_rate);
+extern void register_refined_jiffies(long clock_tick_rate);
/* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */
#define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ)
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index c8971861521a..53ef1b6c8712 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -369,6 +369,9 @@ struct kimage {
phys_addr_t ima_buffer_addr;
size_t ima_buffer_size;
+
+ unsigned long ima_segment_index;
+ bool is_ima_segment_index_set;
#endif
/* Core ELF header buffer */
@@ -474,13 +477,19 @@ extern bool kexec_file_dbg_print;
#define kexec_dprintk(fmt, arg...) \
do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0)
+extern void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size);
+extern void kimage_unmap_segment(void *buffer);
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
+struct kimage;
static inline void __crash_kexec(struct pt_regs *regs) { }
static inline void crash_kexec(struct pt_regs *regs) { }
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
static inline int kexec_crash_loaded(void) { return 0; }
+static inline void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size)
+{ return NULL; }
+static inline void kimage_unmap_segment(void *buffer) { }
#define kexec_in_progress false
#endif /* CONFIG_KEXEC_CORE */
diff --git a/include/linux/livepatch_sched.h b/include/linux/livepatch_sched.h
index 013794fb5da0..065c185f2763 100644
--- a/include/linux/livepatch_sched.h
+++ b/include/linux/livepatch_sched.h
@@ -3,27 +3,23 @@
#define _LINUX_LIVEPATCH_SCHED_H_
#include <linux/jump_label.h>
-#include <linux/static_call_types.h>
+#include <linux/sched.h>
#ifdef CONFIG_LIVEPATCH
void __klp_sched_try_switch(void);
-#if !defined(CONFIG_PREEMPT_DYNAMIC) || !defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-
DECLARE_STATIC_KEY_FALSE(klp_sched_try_switch_key);
-static __always_inline void klp_sched_try_switch(void)
+static __always_inline void klp_sched_try_switch(struct task_struct *curr)
{
- if (static_branch_unlikely(&klp_sched_try_switch_key))
+ if (static_branch_unlikely(&klp_sched_try_switch_key) &&
+ READ_ONCE(curr->__state) & TASK_FREEZABLE)
__klp_sched_try_switch();
}
-#endif /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
-
#else /* !CONFIG_LIVEPATCH */
-static inline void klp_sched_try_switch(void) {}
-static inline void __klp_sched_try_switch(void) {}
+static inline void klp_sched_try_switch(struct task_struct *curr) {}
#endif /* CONFIG_LIVEPATCH */
#endif /* _LINUX_LIVEPATCH_SCHED_H_ */
diff --git a/include/linux/mfd/max77759.h b/include/linux/mfd/max77759.h
new file mode 100644
index 000000000000..c6face34e385
--- /dev/null
+++ b/include/linux/mfd/max77759.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Google Inc.
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Maxim MAX77759 core driver
+ */
+
+#ifndef __LINUX_MFD_MAX77759_H
+#define __LINUX_MFD_MAX77759_H
+
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+#define MAX77759_PMIC_REG_PMIC_ID 0x00
+#define MAX77759_PMIC_REG_PMIC_REVISION 0x01
+#define MAX77759_PMIC_REG_OTP_REVISION 0x02
+#define MAX77759_PMIC_REG_INTSRC 0x22
+#define MAX77759_PMIC_REG_INTSRCMASK 0x23
+#define MAX77759_PMIC_REG_INTSRC_MAXQ BIT(3)
+#define MAX77759_PMIC_REG_INTSRC_TOPSYS BIT(1)
+#define MAX77759_PMIC_REG_INTSRC_CHGR BIT(0)
+#define MAX77759_PMIC_REG_TOPSYS_INT 0x24
+#define MAX77759_PMIC_REG_TOPSYS_INT_MASK 0x26
+#define MAX77759_PMIC_REG_TOPSYS_INT_TSHDN BIT(6)
+#define MAX77759_PMIC_REG_TOPSYS_INT_SYSOVLO BIT(5)
+#define MAX77759_PMIC_REG_TOPSYS_INT_SYSUVLO BIT(4)
+#define MAX77759_PMIC_REG_TOPSYS_INT_FSHIP BIT(0)
+#define MAX77759_PMIC_REG_I2C_CNFG 0x40
+#define MAX77759_PMIC_REG_SWRESET 0x50
+#define MAX77759_PMIC_REG_CONTROL_FG 0x51
+
+#define MAX77759_MAXQ_REG_UIC_INT1 0x64
+#define MAX77759_MAXQ_REG_UIC_INT1_APCMDRESI BIT(7)
+#define MAX77759_MAXQ_REG_UIC_INT1_SYSMSGI BIT(6)
+#define MAX77759_MAXQ_REG_UIC_INT1_GPIO6I BIT(1)
+#define MAX77759_MAXQ_REG_UIC_INT1_GPIO5I BIT(0)
+#define MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(offs, en) (((en) & 1) << (offs))
+#define MAX77759_MAXQ_REG_UIC_INT1_GPIOxI_MASK(offs) \
+ MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(offs, ~0)
+#define MAX77759_MAXQ_REG_UIC_INT2 0x65
+#define MAX77759_MAXQ_REG_UIC_INT3 0x66
+#define MAX77759_MAXQ_REG_UIC_INT4 0x67
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS1 0x68
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS2 0x69
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS3 0x6a
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS4 0x6b
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS5 0x6c
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS6 0x6d
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS7 0x6f
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS8 0x6f
+#define MAX77759_MAXQ_REG_UIC_INT1_M 0x70
+#define MAX77759_MAXQ_REG_UIC_INT2_M 0x71
+#define MAX77759_MAXQ_REG_UIC_INT3_M 0x72
+#define MAX77759_MAXQ_REG_UIC_INT4_M 0x73
+#define MAX77759_MAXQ_REG_AP_DATAOUT0 0x81
+#define MAX77759_MAXQ_REG_AP_DATAOUT32 0xa1
+#define MAX77759_MAXQ_REG_AP_DATAIN0 0xb1
+#define MAX77759_MAXQ_REG_UIC_SWRST 0xe0
+
+#define MAX77759_CHGR_REG_CHG_INT 0xb0
+#define MAX77759_CHGR_REG_CHG_INT2 0xb1
+#define MAX77759_CHGR_REG_CHG_INT_MASK 0xb2
+#define MAX77759_CHGR_REG_CHG_INT2_MASK 0xb3
+#define MAX77759_CHGR_REG_CHG_INT_OK 0xb4
+#define MAX77759_CHGR_REG_CHG_DETAILS_00 0xb5
+#define MAX77759_CHGR_REG_CHG_DETAILS_01 0xb6
+#define MAX77759_CHGR_REG_CHG_DETAILS_02 0xb7
+#define MAX77759_CHGR_REG_CHG_DETAILS_03 0xb8
+#define MAX77759_CHGR_REG_CHG_CNFG_00 0xb9
+#define MAX77759_CHGR_REG_CHG_CNFG_01 0xba
+#define MAX77759_CHGR_REG_CHG_CNFG_02 0xbb
+#define MAX77759_CHGR_REG_CHG_CNFG_03 0xbc
+#define MAX77759_CHGR_REG_CHG_CNFG_04 0xbd
+#define MAX77759_CHGR_REG_CHG_CNFG_05 0xbe
+#define MAX77759_CHGR_REG_CHG_CNFG_06 0xbf
+#define MAX77759_CHGR_REG_CHG_CNFG_07 0xc0
+#define MAX77759_CHGR_REG_CHG_CNFG_08 0xc1
+#define MAX77759_CHGR_REG_CHG_CNFG_09 0xc2
+#define MAX77759_CHGR_REG_CHG_CNFG_10 0xc3
+#define MAX77759_CHGR_REG_CHG_CNFG_11 0xc4
+#define MAX77759_CHGR_REG_CHG_CNFG_12 0xc5
+#define MAX77759_CHGR_REG_CHG_CNFG_13 0xc6
+#define MAX77759_CHGR_REG_CHG_CNFG_14 0xc7
+#define MAX77759_CHGR_REG_CHG_CNFG_15 0xc8
+#define MAX77759_CHGR_REG_CHG_CNFG_16 0xc9
+#define MAX77759_CHGR_REG_CHG_CNFG_17 0xca
+#define MAX77759_CHGR_REG_CHG_CNFG_18 0xcb
+#define MAX77759_CHGR_REG_CHG_CNFG_19 0xcc
+
+/* MaxQ opcodes for max77759_maxq_command() */
+#define MAX77759_MAXQ_OPCODE_MAXLENGTH (MAX77759_MAXQ_REG_AP_DATAOUT32 - \
+ MAX77759_MAXQ_REG_AP_DATAOUT0 + \
+ 1)
+
+#define MAX77759_MAXQ_OPCODE_GPIO_TRIGGER_READ 0x21
+#define MAX77759_MAXQ_OPCODE_GPIO_TRIGGER_WRITE 0x22
+#define MAX77759_MAXQ_OPCODE_GPIO_CONTROL_READ 0x23
+#define MAX77759_MAXQ_OPCODE_GPIO_CONTROL_WRITE 0x24
+#define MAX77759_MAXQ_OPCODE_USER_SPACE_READ 0x81
+#define MAX77759_MAXQ_OPCODE_USER_SPACE_WRITE 0x82
+
+/**
+ * struct max77759 - core max77759 internal data structure
+ *
+ * @regmap_top: Regmap for accessing TOP registers
+ * @maxq_lock: Lock for serializing access to MaxQ
+ * @regmap_maxq: Regmap for accessing MaxQ registers
+ * @cmd_done: Used to signal completion of a MaxQ command
+ * @regmap_charger: Regmap for accessing charger registers
+ *
+ * The MAX77759 comprises several sub-blocks, namely TOP, MaxQ, Charger,
+ * Fuel Gauge, and TCPCI.
+ */
+struct max77759 {
+ struct regmap *regmap_top;
+
+ /* This protects MaxQ commands - only one can be active */
+ struct mutex maxq_lock;
+ struct regmap *regmap_maxq;
+ struct completion cmd_done;
+
+ struct regmap *regmap_charger;
+};
+
+/**
+ * struct max77759_maxq_command - structure containing the MaxQ command to
+ * send
+ *
+ * @length: The number of bytes to send.
+ * @cmd: The data to send.
+ */
+struct max77759_maxq_command {
+ u8 length;
+ u8 cmd[] __counted_by(length);
+};
+
+/**
+ * struct max77759_maxq_response - structure containing the MaxQ response
+ *
+ * @length: The number of bytes to receive.
+ * @rsp: The data received. Must have at least @length bytes space.
+ */
+struct max77759_maxq_response {
+ u8 length;
+ u8 rsp[] __counted_by(length);
+};
+
+/**
+ * max77759_maxq_command() - issue a MaxQ command and wait for the response
+ * and associated data
+ *
+ * @max77759: The core max77759 device handle.
+ * @cmd: The command to be sent.
+ * @rsp: Any response data associated with the command will be copied here;
+ * can be %NULL if the command has no response (other than ACK).
+ *
+ * Return: 0 on success, a negative error number otherwise.
+ */
+int max77759_maxq_command(struct max77759 *max77759,
+ const struct max77759_maxq_command *cmd,
+ struct max77759_maxq_response *rsp);
+
+#endif /* __LINUX_MFD_MAX77759_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index bf55206935c4..fdda6b16263b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -385,7 +385,7 @@ extern unsigned int kobjsize(const void *objp);
#endif
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
-# define VM_UFFD_MINOR_BIT 38
+# define VM_UFFD_MINOR_BIT 41
# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */
#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
# define VM_UFFD_MINOR VM_NONE
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 56d07edd01f9..32ba5126e221 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -31,6 +31,7 @@
#define INIT_PASID 0
struct address_space;
+struct futex_private_hash;
struct mem_cgroup;
/*
@@ -1031,7 +1032,11 @@ struct mm_struct {
*/
seqcount_t mm_lock_seq;
#endif
-
+#ifdef CONFIG_FUTEX_PRIVATE_HASH
+ struct mutex futex_hash_lock;
+ struct futex_private_hash __rcu *futex_phash;
+ struct futex_private_hash *futex_phash_new;
+#endif
unsigned long hiwater_rss; /* High-watermark of RSS usage */
unsigned long hiwater_vm; /* High-water virtual memory usage */
diff --git a/include/linux/mman.h b/include/linux/mman.h
index bce214fece16..f4c6346a8fcd 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -155,7 +155,9 @@ calc_vm_flag_bits(struct file *file, unsigned long flags)
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
_calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) |
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
_calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) |
+#endif
arch_calc_vm_flag_bits(file, flags);
}
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 4706c6769902..e0eddfd306ef 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -7,6 +7,7 @@
#include <linux/rwsem.h>
#include <linux/tracepoint-defs.h>
#include <linux/types.h>
+#include <linux/cleanup.h>
#define MMAP_LOCK_INITIALIZER(name) \
.mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
@@ -211,6 +212,9 @@ static inline void mmap_read_unlock(struct mm_struct *mm)
up_read(&mm->mmap_lock);
}
+DEFINE_GUARD(mmap_read_lock, struct mm_struct *,
+ mmap_read_lock(_T), mmap_read_unlock(_T))
+
static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
{
__mmap_lock_trace_released(mm, false);
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 526fce581657..ddcdf23d731c 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -329,6 +329,7 @@ struct mmc_card {
#define MMC_QUIRK_BROKEN_SD_CACHE (1<<15) /* Disable broken SD cache support */
#define MMC_QUIRK_BROKEN_CACHE_FLUSH (1<<16) /* Don't flush cache until the write has occurred */
#define MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY (1<<17) /* Disable broken SD poweroff notify support */
+#define MMC_QUIRK_NO_UHS_DDR50_TUNING (1<<18) /* Disable DDR50 tuning */
bool written_flag; /* Indicates eMMC has been written since power on */
bool reenable_cmdq; /* Re-enable Command Queue */
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 1ed7b0d1e4f9..23ac5696fa38 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -24,7 +24,7 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config);
int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on);
void mmc_gpiod_request_cd_irq(struct mmc_host *host);
-bool mmc_can_gpio_cd(struct mmc_host *host);
-bool mmc_can_gpio_ro(struct mmc_host *host);
+bool mmc_host_can_gpio_cd(struct mmc_host *host);
+bool mmc_host_can_gpio_ro(struct mmc_host *host);
#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6ccec1bf2896..b1c459f7a485 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -148,7 +148,6 @@ enum zone_stat_item {
NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
/* Second 128 byte cacheline */
- NR_BOUNCE,
#if IS_ENABLED(CONFIG_ZSMALLOC)
NR_ZSPAGES, /* allocated in zsmalloc */
#endif
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index bd7e60c0b72f..ebcee9328168 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -601,7 +601,7 @@ struct dmi_system_id {
#define DMI_MATCH(a, b) { .slot = a, .substr = b }
#define DMI_EXACT_MATCH(a, b) { .slot = a, .substr = b, .exact_match = 1 }
-#define PLATFORM_NAME_SIZE 20
+#define PLATFORM_NAME_SIZE 24
#define PLATFORM_MODULE_PREFIX "platform:"
struct platform_device_id {
diff --git a/include/linux/mount.h b/include/linux/mount.h
index dcc17ce8a959..6904ad33ee7a 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -22,48 +22,51 @@ struct fs_context;
struct file;
struct path;
-#define MNT_NOSUID 0x01
-#define MNT_NODEV 0x02
-#define MNT_NOEXEC 0x04
-#define MNT_NOATIME 0x08
-#define MNT_NODIRATIME 0x10
-#define MNT_RELATIME 0x20
-#define MNT_READONLY 0x40 /* does the user want this to be r/o? */
-#define MNT_NOSYMFOLLOW 0x80
-
-#define MNT_SHRINKABLE 0x100
-#define MNT_WRITE_HOLD 0x200
-
-#define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */
-#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */
-/*
- * MNT_SHARED_MASK is the set of flags that should be cleared when a
- * mount becomes shared. Currently, this is only the flag that says a
- * mount cannot be bind mounted, since this is how we create a mount
- * that shares events with another mount. If you add a new MNT_*
- * flag, consider how it interacts with shared mounts.
- */
-#define MNT_SHARED_MASK (MNT_UNBINDABLE)
-#define MNT_USER_SETTABLE_MASK (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \
- | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \
- | MNT_READONLY | MNT_NOSYMFOLLOW)
-#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
-
-#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
- MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED)
-
-#define MNT_INTERNAL 0x4000
-
-#define MNT_LOCK_ATIME 0x040000
-#define MNT_LOCK_NOEXEC 0x080000
-#define MNT_LOCK_NOSUID 0x100000
-#define MNT_LOCK_NODEV 0x200000
-#define MNT_LOCK_READONLY 0x400000
-#define MNT_LOCKED 0x800000
-#define MNT_DOOMED 0x1000000
-#define MNT_SYNC_UMOUNT 0x2000000
-#define MNT_MARKED 0x4000000
-#define MNT_UMOUNT 0x8000000
+enum mount_flags {
+ MNT_NOSUID = 0x01,
+ MNT_NODEV = 0x02,
+ MNT_NOEXEC = 0x04,
+ MNT_NOATIME = 0x08,
+ MNT_NODIRATIME = 0x10,
+ MNT_RELATIME = 0x20,
+ MNT_READONLY = 0x40, /* does the user want this to be r/o? */
+ MNT_NOSYMFOLLOW = 0x80,
+
+ MNT_SHRINKABLE = 0x100,
+ MNT_WRITE_HOLD = 0x200,
+
+ MNT_SHARED = 0x1000, /* if the vfsmount is a shared mount */
+ MNT_UNBINDABLE = 0x2000, /* if the vfsmount is a unbindable mount */
+
+ MNT_INTERNAL = 0x4000,
+
+ MNT_LOCK_ATIME = 0x040000,
+ MNT_LOCK_NOEXEC = 0x080000,
+ MNT_LOCK_NOSUID = 0x100000,
+ MNT_LOCK_NODEV = 0x200000,
+ MNT_LOCK_READONLY = 0x400000,
+ MNT_LOCKED = 0x800000,
+ MNT_DOOMED = 0x1000000,
+ MNT_SYNC_UMOUNT = 0x2000000,
+ MNT_MARKED = 0x4000000,
+ MNT_UMOUNT = 0x8000000,
+
+ /*
+ * MNT_SHARED_MASK is the set of flags that should be cleared when a
+ * mount becomes shared. Currently, this is only the flag that says a
+ * mount cannot be bind mounted, since this is how we create a mount
+ * that shares events with another mount. If you add a new MNT_*
+ * flag, consider how it interacts with shared mounts.
+ */
+ MNT_SHARED_MASK = MNT_UNBINDABLE,
+ MNT_USER_SETTABLE_MASK = MNT_NOSUID | MNT_NODEV | MNT_NOEXEC
+ | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME
+ | MNT_READONLY | MNT_NOSYMFOLLOW,
+ MNT_ATIME_MASK = MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME,
+
+ MNT_INTERNAL_FLAGS = MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL |
+ MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED,
+};
struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 86e42742fd0f..6863540f4b71 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -229,8 +229,11 @@ struct msi_dev_domain {
int msi_setup_device_data(struct device *dev);
-void msi_lock_descs(struct device *dev);
-void msi_unlock_descs(struct device *dev);
+void __msi_lock_descs(struct device *dev);
+void __msi_unlock_descs(struct device *dev);
+
+DEFINE_LOCK_GUARD_1(msi_descs_lock, struct device, __msi_lock_descs(_T->lock),
+ __msi_unlock_descs(_T->lock));
struct msi_desc *msi_domain_first_desc(struct device *dev, unsigned int domid,
enum msi_desc_filter filter);
@@ -420,6 +423,7 @@ struct msi_domain_info;
* @msi_init: Domain specific init function for MSI interrupts
* @msi_free: Domain specific function to free a MSI interrupts
* @msi_prepare: Prepare the allocation of the interrupts in the domain
+ * @msi_teardown: Reverse the effects of @msi_prepare
* @prepare_desc: Optional function to prepare the allocated MSI descriptor
* in the domain
* @set_desc: Set the msi descriptor for an interrupt
@@ -435,8 +439,9 @@ struct msi_domain_info;
* @get_hwirq, @msi_init and @msi_free are callbacks used by the underlying
* irqdomain.
*
- * @msi_check, @msi_prepare, @prepare_desc and @set_desc are callbacks used by the
- * msi_domain_alloc/free_irqs*() variants.
+ * @msi_check, @msi_prepare, @msi_teardown, @prepare_desc and
+ * @set_desc are callbacks used by the msi_domain_alloc/free_irqs*()
+ * variants.
*
* @domain_alloc_irqs, @domain_free_irqs can be used to override the
* default allocation/free functions (__msi_domain_alloc/free_irqs). This
@@ -458,6 +463,8 @@ struct msi_domain_ops {
int (*msi_prepare)(struct irq_domain *domain,
struct device *dev, int nvec,
msi_alloc_info_t *arg);
+ void (*msi_teardown)(struct irq_domain *domain,
+ msi_alloc_info_t *arg);
void (*prepare_desc)(struct irq_domain *domain, msi_alloc_info_t *arg,
struct msi_desc *desc);
void (*set_desc)(msi_alloc_info_t *arg,
@@ -486,6 +493,7 @@ struct msi_domain_ops {
* @handler: Optional: associated interrupt flow handler
* @handler_data: Optional: associated interrupt flow handler data
* @handler_name: Optional: associated interrupt flow handler name
+ * @alloc_data: Optional: associated interrupt allocation data
* @data: Optional: domain specific data
*/
struct msi_domain_info {
@@ -498,6 +506,7 @@ struct msi_domain_info {
irq_flow_handler_t handler;
void *handler_data;
const char *handler_name;
+ msi_alloc_info_t *alloc_data;
void *data;
};
@@ -507,12 +516,14 @@ struct msi_domain_info {
* @chip: Interrupt chip for this domain
* @ops: MSI domain ops
* @info: MSI domain info data
+ * @alloc_info: MSI domain allocation data (architecture specific)
*/
struct msi_domain_template {
char name[48];
struct irq_chip chip;
struct msi_domain_ops ops;
struct msi_domain_info info;
+ msi_alloc_info_t alloc_info;
};
/*
@@ -625,6 +636,10 @@ struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
+struct irq_domain_info;
+struct irq_domain *msi_create_parent_irq_domain(struct irq_domain_info *info,
+ const struct msi_parent_ops *msi_parent_ops);
+
bool msi_create_device_irq_domain(struct device *dev, unsigned int domid,
const struct msi_domain_template *template,
unsigned int hwsize, void *domain_data,
diff --git a/include/linux/namei.h b/include/linux/namei.h
index bbaf55fb3101..5d085428e471 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -70,17 +70,16 @@ int vfs_path_parent_lookup(struct filename *filename, unsigned int flags,
int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *,
unsigned int, struct path *);
-extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int);
-extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
-extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
-extern struct dentry *lookup_positive_unlocked(const char *, struct dentry *, int);
-struct dentry *lookup_one(struct mnt_idmap *, const char *, struct dentry *, int);
+extern struct dentry *try_lookup_noperm(struct qstr *, struct dentry *);
+extern struct dentry *lookup_noperm(struct qstr *, struct dentry *);
+extern struct dentry *lookup_noperm_unlocked(struct qstr *, struct dentry *);
+extern struct dentry *lookup_noperm_positive_unlocked(struct qstr *, struct dentry *);
+struct dentry *lookup_one(struct mnt_idmap *, struct qstr *, struct dentry *);
struct dentry *lookup_one_unlocked(struct mnt_idmap *idmap,
- const char *name, struct dentry *base,
- int len);
+ struct qstr *name, struct dentry *base);
struct dentry *lookup_one_positive_unlocked(struct mnt_idmap *idmap,
- const char *name,
- struct dentry *base, int len);
+ struct qstr *name,
+ struct dentry *base);
extern int follow_down_one(struct path *);
extern int follow_down(struct path *path, unsigned int flags);
diff --git a/include/linux/net.h b/include/linux/net.h
index 0ff950eecc6b..f60fff91e1df 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -70,6 +70,7 @@ enum sock_type {
SOCK_DCCP = 6,
SOCK_PACKET = 10,
};
+#endif /* ARCH_HAS_SOCKET_TYPES */
#define SOCK_MAX (SOCK_PACKET + 1)
/* Mask which covers at least up to SOCK_MASK-1. The
@@ -81,8 +82,7 @@ enum sock_type {
#ifndef SOCK_NONBLOCK
#define SOCK_NONBLOCK O_NONBLOCK
#endif
-
-#endif /* ARCH_HAS_SOCKET_TYPES */
+#define SOCK_COREDUMP O_NOCTTY
/**
* enum sock_shutdown_cmd - Shutdown types
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 2479ed10f53e..51308f65b72f 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -303,6 +303,7 @@ enum nvme_ctrl_attr {
NVME_CTRL_ATTR_TBKAS = (1 << 6),
NVME_CTRL_ATTR_ELBAS = (1 << 15),
NVME_CTRL_ATTR_RHII = (1 << 18),
+ NVME_CTRL_ATTR_FDPS = (1 << 19),
};
struct nvme_id_ctrl {
@@ -689,6 +690,44 @@ struct nvme_rotational_media_log {
__u8 rsvd24[488];
};
+struct nvme_fdp_config {
+ __u8 flags;
+#define FDPCFG_FDPE (1U << 0)
+ __u8 fdpcidx;
+ __le16 reserved;
+};
+
+struct nvme_fdp_ruh_desc {
+ __u8 ruht;
+ __u8 reserved[3];
+};
+
+struct nvme_fdp_config_desc {
+ __le16 dsze;
+ __u8 fdpa;
+ __u8 vss;
+ __le32 nrg;
+ __le16 nruh;
+ __le16 maxpids;
+ __le32 nns;
+ __le64 runs;
+ __le32 erutl;
+ __u8 rsvd28[36];
+ struct nvme_fdp_ruh_desc ruhs[];
+};
+
+struct nvme_fdp_config_log {
+ __le16 numfdpc;
+ __u8 ver;
+ __u8 rsvd3;
+ __le32 sze;
+ __u8 rsvd8[8];
+ /*
+ * This is followed by variable number of nvme_fdp_config_desc
+ * structures, but sparse doesn't like nested variable sized arrays.
+ */
+};
+
struct nvme_smart_log {
__u8 critical_warning;
__u8 temperature[2];
@@ -915,6 +954,7 @@ enum nvme_opcode {
nvme_cmd_resv_register = 0x0d,
nvme_cmd_resv_report = 0x0e,
nvme_cmd_resv_acquire = 0x11,
+ nvme_cmd_io_mgmt_recv = 0x12,
nvme_cmd_resv_release = 0x15,
nvme_cmd_zone_mgmt_send = 0x79,
nvme_cmd_zone_mgmt_recv = 0x7a,
@@ -936,6 +976,7 @@ enum nvme_opcode {
nvme_opcode_name(nvme_cmd_resv_register), \
nvme_opcode_name(nvme_cmd_resv_report), \
nvme_opcode_name(nvme_cmd_resv_acquire), \
+ nvme_opcode_name(nvme_cmd_io_mgmt_recv), \
nvme_opcode_name(nvme_cmd_resv_release), \
nvme_opcode_name(nvme_cmd_zone_mgmt_send), \
nvme_opcode_name(nvme_cmd_zone_mgmt_recv), \
@@ -1087,6 +1128,7 @@ enum {
NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
NVME_RW_PRINFO_PRACT = 1 << 13,
NVME_RW_DTYPE_STREAMS = 1 << 4,
+ NVME_RW_DTYPE_DPLCMT = 2 << 4,
NVME_WZ_DEAC = 1 << 9,
};
@@ -1174,6 +1216,38 @@ struct nvme_zone_mgmt_recv_cmd {
__le32 cdw14[2];
};
+struct nvme_io_mgmt_recv_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __le64 rsvd2[2];
+ union nvme_data_ptr dptr;
+ __u8 mo;
+ __u8 rsvd11;
+ __u16 mos;
+ __le32 numd;
+ __le32 cdw12[4];
+};
+
+enum {
+ NVME_IO_MGMT_RECV_MO_RUHS = 1,
+};
+
+struct nvme_fdp_ruh_status_desc {
+ __le16 pid;
+ __le16 ruhid;
+ __le32 earutr;
+ __le64 ruamw;
+ __u8 reserved[16];
+};
+
+struct nvme_fdp_ruh_status {
+ __u8 rsvd0[14];
+ __le16 nruhsd;
+ struct nvme_fdp_ruh_status_desc ruhsd[];
+};
+
enum {
NVME_ZRA_ZONE_REPORT = 0,
NVME_ZRASF_ZONE_REPORT_ALL = 0,
@@ -1309,6 +1383,7 @@ enum {
NVME_FEAT_PLM_WINDOW = 0x14,
NVME_FEAT_HOST_BEHAVIOR = 0x16,
NVME_FEAT_SANITIZE = 0x17,
+ NVME_FEAT_FDP = 0x1d,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
NVME_FEAT_RESV_MASK = 0x82,
@@ -1329,6 +1404,7 @@ enum {
NVME_LOG_ANA = 0x0c,
NVME_LOG_FEATURES = 0x12,
NVME_LOG_RMI = 0x16,
+ NVME_LOG_FDP_CONFIGS = 0x20,
NVME_LOG_DISC = 0x70,
NVME_LOG_RESERVATION = 0x80,
NVME_FWACT_REPL = (0 << 3),
@@ -1923,6 +1999,7 @@ struct nvme_command {
struct nvmf_auth_receive_command auth_receive;
struct nvme_dbbuf dbbuf;
struct nvme_directive_cmd directive;
+ struct nvme_io_mgmt_recv_cmd imr;
};
};
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 0c7e3dcfe867..7b7be27ca113 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -396,7 +396,7 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
* @name: Name for a variable to define.
* @member: Name of the array member.
* @count: Number of elements in the array; must be compile-time const.
- * @initializer: initializer expression (could be empty for no init).
+ * @initializer: Initializer expression (e.g., pass `= { }` at minimum).
*/
#define _DEFINE_FLEX(type, name, member, count, initializer...) \
_Static_assert(__builtin_constant_p(count), \
@@ -404,7 +404,7 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
union { \
u8 bytes[struct_size_t(type, member, count)]; \
type obj; \
- } name##_u initializer; \
+ } name##_u = { .obj initializer }; \
type *name = (type *)&name##_u
/**
@@ -419,6 +419,9 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
* Define a zeroed, on-stack, instance of @type structure with a trailing
* flexible array member.
* Use __struct_size(@name) to get compile-time size of it afterwards.
+ * Use __member_size(@name->member) to get compile-time size of @name members.
+ * Use STACK_FLEX_ARRAY_SIZE(@name, @member) to get compile-time number of
+ * elements in array @member.
*/
#define DEFINE_RAW_FLEX(type, name, member, count) \
_DEFINE_FLEX(type, name, member, count, = {})
@@ -436,8 +439,22 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
* Define a zeroed, on-stack, instance of @TYPE structure with a trailing
* flexible array member.
* Use __struct_size(@NAME) to get compile-time size of it afterwards.
+ * Use __member_size(@NAME->member) to get compile-time size of @NAME members.
+ * Use STACK_FLEX_ARRAY_SIZE(@name, @member) to get compile-time number of
+ * elements in array @member.
*/
#define DEFINE_FLEX(TYPE, NAME, MEMBER, COUNTER, COUNT) \
- _DEFINE_FLEX(TYPE, NAME, MEMBER, COUNT, = { .obj.COUNTER = COUNT, })
+ _DEFINE_FLEX(TYPE, NAME, MEMBER, COUNT, = { .COUNTER = COUNT, })
+
+/**
+ * STACK_FLEX_ARRAY_SIZE() - helper macro for DEFINE_FLEX() family.
+ * Returns the number of elements in @array.
+ *
+ * @name: Name for a variable defined in DEFINE_RAW_FLEX()/DEFINE_FLEX().
+ * @array: Name of the array member.
+ */
+#define STACK_FLEX_ARRAY_SIZE(name, array) \
+ (__member_size((name)->array) / sizeof(*(name)->array) + \
+ __must_be_array((name)->array))
#endif /* __LINUX_OVERFLOW_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e6a21b62dcce..3b814ce08331 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -615,6 +615,13 @@ FOLIO_FLAG(dropbehind, FOLIO_HEAD_PAGE)
PAGEFLAG_FALSE(HighMem, highmem)
#endif
+/* Does kmap_local_folio() only allow access to one page of the folio? */
+#ifdef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
+#define folio_test_partial_kmap(f) true
+#else
+#define folio_test_partial_kmap(f) folio_test_highmem(f)
+#endif
+
#ifdef CONFIG_SWAP
static __always_inline bool folio_test_swapcache(const struct folio *folio)
{
diff --git a/include/linux/panic.h b/include/linux/panic.h
index 2494d51707ef..4adc65766935 100644
--- a/include/linux/panic.h
+++ b/include/linux/panic.h
@@ -20,8 +20,6 @@ extern bool panic_triggering_all_cpu_backtrace;
extern int panic_timeout;
extern unsigned long panic_print;
extern int panic_on_oops;
-extern int panic_on_unrecovered_nmi;
-extern int panic_on_io_nmi;
extern int panic_on_warn;
extern unsigned long panic_on_taint;
diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h
index c5e9cac0575e..eeeff2a04529 100644
--- a/include/linux/part_stat.h
+++ b/include/linux/part_stat.h
@@ -79,4 +79,6 @@ static inline void part_stat_set_all(struct block_device *part, int value)
#define part_stat_local_read_cpu(part, field, cpu) \
local_read(&(part_stat_get_cpu(part, field, cpu)))
+unsigned int bdev_count_inflight(struct block_device *part);
+
#endif /* _LINUX_PART_STAT_H */
diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h
index 2c07aa6b7665..075c20b161d9 100644
--- a/include/linux/pci-p2pdma.h
+++ b/include/linux/pci-p2pdma.h
@@ -104,4 +104,89 @@ static inline struct pci_dev *pci_p2pmem_find(struct device *client)
return pci_p2pmem_find_many(&client, 1);
}
+enum pci_p2pdma_map_type {
+ /*
+ * PCI_P2PDMA_MAP_UNKNOWN: Used internally as an initial state before
+ * the mapping type has been calculated. Exported routines for the API
+ * will never return this value.
+ */
+ PCI_P2PDMA_MAP_UNKNOWN = 0,
+
+ /*
+ * Not a PCI P2PDMA transfer.
+ */
+ PCI_P2PDMA_MAP_NONE,
+
+ /*
+ * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will
+ * traverse the host bridge and the host bridge is not in the
+ * allowlist. DMA Mapping routines should return an error when
+ * this is returned.
+ */
+ PCI_P2PDMA_MAP_NOT_SUPPORTED,
+
+ /*
+ * PCI_P2PDMA_MAP_BUS_ADDR: Indicates that two devices can talk to
+ * each other directly through a PCI switch and the transaction will
+ * not traverse the host bridge. Such a mapping should program
+ * the DMA engine with PCI bus addresses.
+ */
+ PCI_P2PDMA_MAP_BUS_ADDR,
+
+ /*
+ * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk
+ * to each other, but the transaction traverses a host bridge on the
+ * allowlist. In this case, a normal mapping either with CPU physical
+ * addresses (in the case of dma-direct) or IOVA addresses (in the
+ * case of IOMMUs) should be used to program the DMA engine.
+ */
+ PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
+};
+
+struct pci_p2pdma_map_state {
+ struct dev_pagemap *pgmap;
+ enum pci_p2pdma_map_type map;
+ u64 bus_off;
+};
+
+/* helper for pci_p2pdma_state(), do not use directly */
+void __pci_p2pdma_update_state(struct pci_p2pdma_map_state *state,
+ struct device *dev, struct page *page);
+
+/**
+ * pci_p2pdma_state - check the P2P transfer state of a page
+ * @state: P2P state structure
+ * @dev: device to transfer to/from
+ * @page: page to map
+ *
+ * Check if @page is a PCI P2PDMA page, and if yes of what kind. Returns the
+ * map type, and updates @state with all information needed for a P2P transfer.
+ */
+static inline enum pci_p2pdma_map_type
+pci_p2pdma_state(struct pci_p2pdma_map_state *state, struct device *dev,
+ struct page *page)
+{
+ if (IS_ENABLED(CONFIG_PCI_P2PDMA) && is_pci_p2pdma_page(page)) {
+ if (state->pgmap != page_pgmap(page))
+ __pci_p2pdma_update_state(state, dev, page);
+ return state->map;
+ }
+ return PCI_P2PDMA_MAP_NONE;
+}
+
+/**
+ * pci_p2pdma_bus_addr_map - Translate a physical address to a bus address
+ * for a PCI_P2PDMA_MAP_BUS_ADDR transfer.
+ * @state: P2P state structure
+ * @paddr: physical address to map
+ *
+ * Map a physically contiguous PCI_P2PDMA_MAP_BUS_ADDR transfer.
+ */
+static inline dma_addr_t
+pci_p2pdma_bus_addr_map(struct pci_p2pdma_map_state *state, phys_addr_t paddr)
+{
+ WARN_ON_ONCE(state->map != PCI_P2PDMA_MAP_BUS_ADDR);
+ return paddr + state->bus_off;
+}
+
#endif /* _LINUX_PCI_P2P_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 51e2bd6405cd..b231cbc67a35 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1671,7 +1671,7 @@ void pci_disable_msi(struct pci_dev *dev);
int pci_msix_vec_count(struct pci_dev *dev);
void pci_disable_msix(struct pci_dev *dev);
void pci_restore_msi_state(struct pci_dev *dev);
-int pci_msi_enabled(void);
+bool pci_msi_enabled(void);
int pci_enable_msi(struct pci_dev *dev);
int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
int minvec, int maxvec);
@@ -1704,7 +1704,7 @@ static inline void pci_disable_msi(struct pci_dev *dev) { }
static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
static inline void pci_disable_msix(struct pci_dev *dev) { }
static inline void pci_restore_msi_state(struct pci_dev *dev) { }
-static inline int pci_msi_enabled(void) { return 0; }
+static inline bool pci_msi_enabled(void) { return false; }
static inline int pci_enable_msi(struct pci_dev *dev)
{ return -ENOSYS; }
static inline int pci_enable_msix_range(struct pci_dev *dev,
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 2e28182c3af0..e2d71b6fdd84 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -3049,6 +3049,7 @@
#define PCI_DEVICE_ID_INTEL_HDA_DG1 0x490d
#define PCI_DEVICE_ID_INTEL_HDA_EHL_0 0x4b55
#define PCI_DEVICE_ID_INTEL_HDA_EHL_3 0x4b58
+#define PCI_DEVICE_ID_INTEL_HDA_WCL 0x4d28
#define PCI_DEVICE_ID_INTEL_HDA_JSL_N 0x4dc8
#define PCI_DEVICE_ID_INTEL_HDA_DG2_0 0x4f90
#define PCI_DEVICE_ID_INTEL_HDA_DG2_1 0x4f91
@@ -3070,6 +3071,7 @@
#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6
#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff
+#define PCI_DEVICE_ID_INTEL_HDA_FCL 0x67a8
#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index af7d75ede619..288f5235649a 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -43,9 +43,10 @@ is_static struct percpu_rw_semaphore name = { \
#define DEFINE_STATIC_PERCPU_RWSEM(name) \
__DEFINE_PERCPU_RWSEM(name, static)
-extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool);
+extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool, bool);
-static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+static inline void percpu_down_read_internal(struct percpu_rw_semaphore *sem,
+ bool freezable)
{
might_sleep();
@@ -63,7 +64,7 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
if (likely(rcu_sync_is_idle(&sem->rss)))
this_cpu_inc(*sem->read_count);
else
- __percpu_down_read(sem, false); /* Unconditional memory barrier */
+ __percpu_down_read(sem, false, freezable); /* Unconditional memory barrier */
/*
* The preempt_enable() prevents the compiler from
* bleeding the critical section out.
@@ -71,6 +72,17 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
preempt_enable();
}
+static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+{
+ percpu_down_read_internal(sem, false);
+}
+
+static inline void percpu_down_read_freezable(struct percpu_rw_semaphore *sem,
+ bool freeze)
+{
+ percpu_down_read_internal(sem, freeze);
+}
+
static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
{
bool ret = true;
@@ -82,7 +94,7 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
if (likely(rcu_sync_is_idle(&sem->rss)))
this_cpu_inc(*sem->read_count);
else
- ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
+ ret = __percpu_down_read(sem, true, false); /* Unconditional memory barrier */
preempt_enable();
/*
* The barrier() from preempt_enable() prevents the compiler from
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 52b5ea663b9f..85bf8dd9f087 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -15,11 +15,7 @@
/* enough to cover all DEFINE_PER_CPUs in modules */
#ifdef CONFIG_MODULES
-#ifdef CONFIG_MEM_ALLOC_PROFILING
-#define PERCPU_MODULE_RESERVE (8 << 13)
-#else
#define PERCPU_MODULE_RESERVE (8 << 10)
-#endif
#else
#define PERCPU_MODULE_RESERVE 0
#endif
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 0069ba6866a4..52dc7cfab0e0 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -26,18 +26,9 @@
# include <asm/local64.h>
#endif
-#define PERF_GUEST_ACTIVE 0x01
-#define PERF_GUEST_USER 0x02
-
-struct perf_guest_info_callbacks {
- unsigned int (*state)(void);
- unsigned long (*get_ip)(void);
- unsigned int (*handle_intel_pt_intr)(void);
-};
-
#ifdef CONFIG_HAVE_HW_BREAKPOINT
-#include <linux/rhashtable-types.h>
-#include <asm/hw_breakpoint.h>
+# include <linux/rhashtable-types.h>
+# include <asm/hw_breakpoint.h>
#endif
#include <linux/list.h>
@@ -62,19 +53,20 @@ struct perf_guest_info_callbacks {
#include <linux/security.h>
#include <linux/static_call.h>
#include <linux/lockdep.h>
+
#include <asm/local.h>
struct perf_callchain_entry {
- __u64 nr;
- __u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */
+ u64 nr;
+ u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */
};
struct perf_callchain_entry_ctx {
- struct perf_callchain_entry *entry;
- u32 max_stack;
- u32 nr;
- short contexts;
- bool contexts_maxed;
+ struct perf_callchain_entry *entry;
+ u32 max_stack;
+ u32 nr;
+ short contexts;
+ bool contexts_maxed;
};
typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
@@ -121,8 +113,8 @@ static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
* already stored in age order, the hw_idx should be 0.
*/
struct perf_branch_stack {
- __u64 nr;
- __u64 hw_idx;
+ u64 nr;
+ u64 hw_idx;
struct perf_branch_entry entries[];
};
@@ -132,10 +124,10 @@ struct task_struct;
* extra PMU register associated with an event
*/
struct hw_perf_event_extra {
- u64 config; /* register value */
- unsigned int reg; /* register address or index */
- int alloc; /* extra register already allocated */
- int idx; /* index in shared_regs->regs[] */
+ u64 config; /* register value */
+ unsigned int reg; /* register address or index */
+ int alloc; /* extra register already allocated */
+ int idx; /* index in shared_regs->regs[] */
};
/**
@@ -144,8 +136,8 @@ struct hw_perf_event_extra {
* PERF_EVENT_FLAG_ARCH bits are reserved for architecture-specific
* usage.
*/
-#define PERF_EVENT_FLAG_ARCH 0x000fffff
-#define PERF_EVENT_FLAG_USER_READ_CNT 0x80000000
+#define PERF_EVENT_FLAG_ARCH 0x0fffffff
+#define PERF_EVENT_FLAG_USER_READ_CNT 0x80000000
static_assert((PERF_EVENT_FLAG_USER_READ_CNT & PERF_EVENT_FLAG_ARCH) == 0);
@@ -157,7 +149,9 @@ struct hw_perf_event {
union {
struct { /* hardware */
u64 config;
+ u64 config1;
u64 last_tag;
+ u64 dyn_constraint;
unsigned long config_base;
unsigned long event_base;
int event_base_rdpmc;
@@ -225,9 +219,14 @@ struct hw_perf_event {
/*
* hw_perf_event::state flags; used to track the PERF_EF_* state.
*/
-#define PERF_HES_STOPPED 0x01 /* the counter is stopped */
-#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
-#define PERF_HES_ARCH 0x04
+
+/* the counter is stopped */
+#define PERF_HES_STOPPED 0x01
+
+/* event->count up-to-date */
+#define PERF_HES_UPTODATE 0x02
+
+#define PERF_HES_ARCH 0x04
int state;
@@ -276,7 +275,7 @@ struct hw_perf_event {
*/
u64 freq_time_stamp;
u64 freq_count_stamp;
-#endif
+#endif /* CONFIG_PERF_EVENTS */
};
struct perf_event;
@@ -285,28 +284,33 @@ struct perf_event_pmu_context;
/*
* Common implementation detail of pmu::{start,commit,cancel}_txn
*/
-#define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */
-#define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */
+
+/* txn to add/schedule event on PMU */
+#define PERF_PMU_TXN_ADD 0x1
+
+/* txn to read event group from PMU */
+#define PERF_PMU_TXN_READ 0x2
/**
* pmu::capabilities flags
*/
-#define PERF_PMU_CAP_NO_INTERRUPT 0x0001
-#define PERF_PMU_CAP_NO_NMI 0x0002
-#define PERF_PMU_CAP_AUX_NO_SG 0x0004
-#define PERF_PMU_CAP_EXTENDED_REGS 0x0008
-#define PERF_PMU_CAP_EXCLUSIVE 0x0010
-#define PERF_PMU_CAP_ITRACE 0x0020
-#define PERF_PMU_CAP_NO_EXCLUDE 0x0040
-#define PERF_PMU_CAP_AUX_OUTPUT 0x0080
-#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
-#define PERF_PMU_CAP_AUX_PAUSE 0x0200
+#define PERF_PMU_CAP_NO_INTERRUPT 0x0001
+#define PERF_PMU_CAP_NO_NMI 0x0002
+#define PERF_PMU_CAP_AUX_NO_SG 0x0004
+#define PERF_PMU_CAP_EXTENDED_REGS 0x0008
+#define PERF_PMU_CAP_EXCLUSIVE 0x0010
+#define PERF_PMU_CAP_ITRACE 0x0020
+#define PERF_PMU_CAP_NO_EXCLUDE 0x0040
+#define PERF_PMU_CAP_AUX_OUTPUT 0x0080
+#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
+#define PERF_PMU_CAP_AUX_PAUSE 0x0200
+#define PERF_PMU_CAP_AUX_PREFER_LARGE 0x0400
/**
* pmu::scope
*/
enum perf_pmu_scope {
- PERF_PMU_SCOPE_NONE = 0,
+ PERF_PMU_SCOPE_NONE = 0,
PERF_PMU_SCOPE_CORE,
PERF_PMU_SCOPE_DIE,
PERF_PMU_SCOPE_CLUSTER,
@@ -325,6 +329,9 @@ struct perf_output_handle;
struct pmu {
struct list_head entry;
+ spinlock_t events_lock;
+ struct list_head events;
+
struct module *module;
struct device *dev;
struct device *parent;
@@ -387,11 +394,21 @@ struct pmu {
* Flags for ->add()/->del()/ ->start()/->stop(). There are
* matching hw_perf_event::state flags.
*/
-#define PERF_EF_START 0x01 /* start the counter when adding */
-#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
-#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
-#define PERF_EF_PAUSE 0x08 /* AUX area event, pause tracing */
-#define PERF_EF_RESUME 0x10 /* AUX area event, resume tracing */
+
+/* start the counter when adding */
+#define PERF_EF_START 0x01
+
+/* reload the counter when starting */
+#define PERF_EF_RELOAD 0x02
+
+/* update the counter when stopping */
+#define PERF_EF_UPDATE 0x04
+
+/* AUX area event, pause tracing */
+#define PERF_EF_PAUSE 0x08
+
+/* AUX area event, resume tracing */
+#define PERF_EF_RESUME 0x10
/*
* Adds/Removes a counter to/from the PMU, can be done inside a
@@ -590,10 +607,10 @@ enum perf_addr_filter_action_t {
* This is a hardware-agnostic filter configuration as specified by the user.
*/
struct perf_addr_filter {
- struct list_head entry;
- struct path path;
- unsigned long offset;
- unsigned long size;
+ struct list_head entry;
+ struct path path;
+ unsigned long offset;
+ unsigned long size;
enum perf_addr_filter_action_t action;
};
@@ -608,23 +625,24 @@ struct perf_addr_filter {
* bundled together; see perf_event_addr_filters().
*/
struct perf_addr_filters_head {
- struct list_head list;
- raw_spinlock_t lock;
- unsigned int nr_file_filters;
+ struct list_head list;
+ raw_spinlock_t lock;
+ unsigned int nr_file_filters;
};
struct perf_addr_filter_range {
- unsigned long start;
- unsigned long size;
+ unsigned long start;
+ unsigned long size;
};
/**
* enum perf_event_state - the states of an event:
*/
enum perf_event_state {
- PERF_EVENT_STATE_DEAD = -4,
- PERF_EVENT_STATE_EXIT = -3,
- PERF_EVENT_STATE_ERROR = -2,
+ PERF_EVENT_STATE_DEAD = -5,
+ PERF_EVENT_STATE_REVOKED = -4, /* pmu gone, must not touch */
+ PERF_EVENT_STATE_EXIT = -3, /* task died, still inherit */
+ PERF_EVENT_STATE_ERROR = -2, /* scheduling error, can enable */
PERF_EVENT_STATE_OFF = -1,
PERF_EVENT_STATE_INACTIVE = 0,
PERF_EVENT_STATE_ACTIVE = 1,
@@ -662,24 +680,24 @@ struct swevent_hlist {
struct rcu_head rcu_head;
};
-#define PERF_ATTACH_CONTEXT 0x0001
-#define PERF_ATTACH_GROUP 0x0002
-#define PERF_ATTACH_TASK 0x0004
-#define PERF_ATTACH_TASK_DATA 0x0008
-#define PERF_ATTACH_GLOBAL_DATA 0x0010
-#define PERF_ATTACH_SCHED_CB 0x0020
-#define PERF_ATTACH_CHILD 0x0040
-#define PERF_ATTACH_EXCLUSIVE 0x0080
-#define PERF_ATTACH_CALLCHAIN 0x0100
-#define PERF_ATTACH_ITRACE 0x0200
+#define PERF_ATTACH_CONTEXT 0x0001
+#define PERF_ATTACH_GROUP 0x0002
+#define PERF_ATTACH_TASK 0x0004
+#define PERF_ATTACH_TASK_DATA 0x0008
+#define PERF_ATTACH_GLOBAL_DATA 0x0010
+#define PERF_ATTACH_SCHED_CB 0x0020
+#define PERF_ATTACH_CHILD 0x0040
+#define PERF_ATTACH_EXCLUSIVE 0x0080
+#define PERF_ATTACH_CALLCHAIN 0x0100
+#define PERF_ATTACH_ITRACE 0x0200
struct bpf_prog;
struct perf_cgroup;
struct perf_buffer;
struct pmu_event_list {
- raw_spinlock_t lock;
- struct list_head list;
+ raw_spinlock_t lock;
+ struct list_head list;
};
/*
@@ -689,12 +707,12 @@ struct pmu_event_list {
* disabled is sufficient since it will hold-off the IPIs.
*/
#ifdef CONFIG_PROVE_LOCKING
-#define lockdep_assert_event_ctx(event) \
+# define lockdep_assert_event_ctx(event) \
WARN_ON_ONCE(__lockdep_enabled && \
(this_cpu_read(hardirqs_enabled) && \
lockdep_is_held(&(event)->ctx->mutex) != LOCK_STATE_HELD))
#else
-#define lockdep_assert_event_ctx(event)
+# define lockdep_assert_event_ctx(event)
#endif
#define for_each_sibling_event(sibling, event) \
@@ -852,9 +870,9 @@ struct perf_event {
#ifdef CONFIG_EVENT_TRACING
struct trace_event_call *tp_event;
struct event_filter *filter;
-#ifdef CONFIG_FUNCTION_TRACER
+# ifdef CONFIG_FUNCTION_TRACER
struct ftrace_ops ftrace_ops;
-#endif
+# endif
#endif
#ifdef CONFIG_CGROUP_PERF
@@ -865,6 +883,7 @@ struct perf_event {
void *security;
#endif
struct list_head sb_list;
+ struct list_head pmu_list;
/*
* Certain events gets forwarded to another pmu internally by over-
@@ -872,7 +891,7 @@ struct perf_event {
* of it. event->orig_type contains original 'type' requested by
* user.
*/
- __u32 orig_type;
+ u32 orig_type;
#endif /* CONFIG_PERF_EVENTS */
};
@@ -937,8 +956,8 @@ static inline bool perf_pmu_ctx_is_active(struct perf_event_pmu_context *epc)
}
struct perf_event_groups {
- struct rb_root tree;
- u64 index;
+ struct rb_root tree;
+ u64 index;
};
@@ -1155,7 +1174,7 @@ extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
extern void perf_event_itrace_started(struct perf_event *event);
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
-extern void perf_pmu_unregister(struct pmu *pmu);
+extern int perf_pmu_unregister(struct pmu *pmu);
extern void __perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task);
@@ -1181,16 +1200,18 @@ extern void perf_pmu_resched(struct pmu *pmu);
extern int perf_event_refresh(struct perf_event *event, int refresh);
extern void perf_event_update_userpage(struct perf_event *event);
extern int perf_event_release_kernel(struct perf_event *event);
+
extern struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr,
- int cpu,
- struct task_struct *task,
- perf_overflow_handler_t callback,
- void *context);
+ int cpu,
+ struct task_struct *task,
+ perf_overflow_handler_t callback,
+ void *context);
+
extern void perf_pmu_migrate_context(struct pmu *pmu,
- int src_cpu, int dst_cpu);
-int perf_event_read_local(struct perf_event *event, u64 *value,
- u64 *enabled, u64 *running);
+ int src_cpu, int dst_cpu);
+extern int perf_event_read_local(struct perf_event *event, u64 *value,
+ u64 *enabled, u64 *running);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
@@ -1407,14 +1428,14 @@ static inline u32 perf_sample_data_size(struct perf_sample_data *data,
*/
static inline void perf_clear_branch_entry_bitfields(struct perf_branch_entry *br)
{
- br->mispred = 0;
- br->predicted = 0;
- br->in_tx = 0;
- br->abort = 0;
- br->cycles = 0;
- br->type = 0;
- br->spec = PERF_BR_SPEC_NA;
- br->reserved = 0;
+ br->mispred = 0;
+ br->predicted = 0;
+ br->in_tx = 0;
+ br->abort = 0;
+ br->cycles = 0;
+ br->type = 0;
+ br->spec = PERF_BR_SPEC_NA;
+ br->reserved = 0;
}
extern void perf_output_sample(struct perf_output_handle *handle,
@@ -1603,7 +1624,17 @@ extern void perf_event_bpf_event(struct bpf_prog *prog,
enum perf_bpf_event_type type,
u16 flags);
+#define PERF_GUEST_ACTIVE 0x01
+#define PERF_GUEST_USER 0x02
+
+struct perf_guest_info_callbacks {
+ unsigned int (*state)(void);
+ unsigned long (*get_ip)(void);
+ unsigned int (*handle_intel_pt_intr)(void);
+};
+
#ifdef CONFIG_GUEST_PERF_EVENTS
+
extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
DECLARE_STATIC_CALL(__perf_guest_state, *perf_guest_cbs->state);
@@ -1614,21 +1645,27 @@ static inline unsigned int perf_guest_state(void)
{
return static_call(__perf_guest_state)();
}
+
static inline unsigned long perf_guest_get_ip(void)
{
return static_call(__perf_guest_get_ip)();
}
+
static inline unsigned int perf_guest_handle_intel_pt_intr(void)
{
return static_call(__perf_guest_handle_intel_pt_intr)();
}
+
extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
extern void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
-#else
+
+#else /* !CONFIG_GUEST_PERF_EVENTS: */
+
static inline unsigned int perf_guest_state(void) { return 0; }
static inline unsigned long perf_guest_get_ip(void) { return 0; }
static inline unsigned int perf_guest_handle_intel_pt_intr(void) { return 0; }
-#endif /* CONFIG_GUEST_PERF_EVENTS */
+
+#endif /* !CONFIG_GUEST_PERF_EVENTS */
extern void perf_event_exec(void);
extern void perf_event_comm(struct task_struct *tsk, bool exec);
@@ -1658,6 +1695,7 @@ static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *
{
if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
struct perf_callchain_entry *entry = ctx->entry;
+
entry->ip[entry->nr++] = ip;
++ctx->contexts;
return 0;
@@ -1671,6 +1709,7 @@ static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64
{
if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
struct perf_callchain_entry *entry = ctx->entry;
+
entry->ip[entry->nr++] = ip;
++ctx->nr;
return 0;
@@ -1697,7 +1736,7 @@ static inline int perf_is_paranoid(void)
return sysctl_perf_event_paranoid > -1;
}
-int perf_allow_kernel(void);
+extern int perf_allow_kernel(void);
static inline int perf_allow_cpu(void)
{
@@ -1760,7 +1799,7 @@ static inline bool needs_branch_stack(struct perf_event *event)
static inline bool has_aux(struct perf_event *event)
{
- return event->pmu->setup_aux;
+ return event->pmu && event->pmu->setup_aux;
}
static inline bool has_aux_action(struct perf_event *event)
@@ -1819,7 +1858,7 @@ extern int perf_output_begin_backward(struct perf_output_handle *handle,
extern void perf_output_end(struct perf_output_handle *handle);
extern unsigned int perf_output_copy(struct perf_output_handle *handle,
- const void *buf, unsigned int len);
+ const void *buf, unsigned int len);
extern unsigned int perf_output_skip(struct perf_output_handle *handle,
unsigned int len);
extern long perf_output_copy_aux(struct perf_output_handle *aux_handle,
@@ -1836,7 +1875,9 @@ extern void perf_event_task_tick(void);
extern int perf_event_account_interrupt(struct perf_event *event);
extern int perf_event_period(struct perf_event *event, u64 value);
extern u64 perf_event_pause(struct perf_event *event, bool reset);
+
#else /* !CONFIG_PERF_EVENTS: */
+
static inline void *
perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *event) { return NULL; }
@@ -1914,19 +1955,14 @@ static inline void perf_event_disable(struct perf_event *event) { }
static inline int __perf_event_disable(void *info) { return -1; }
static inline void perf_event_task_tick(void) { }
static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
-static inline int perf_event_period(struct perf_event *event, u64 value)
-{
- return -EINVAL;
-}
-static inline u64 perf_event_pause(struct perf_event *event, bool reset)
-{
- return 0;
-}
-static inline int perf_exclude_event(struct perf_event *event, struct pt_regs *regs)
-{
- return 0;
-}
-#endif
+static inline int
+perf_event_period(struct perf_event *event, u64 value) { return -EINVAL; }
+static inline u64
+perf_event_pause(struct perf_event *event, bool reset) { return 0; }
+static inline int
+perf_exclude_event(struct perf_event *event, struct pt_regs *regs) { return 0; }
+
+#endif /* !CONFIG_PERF_EVENTS */
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
extern void perf_restore_debug_store(void);
@@ -1934,31 +1970,31 @@ extern void perf_restore_debug_store(void);
static inline void perf_restore_debug_store(void) { }
#endif
-#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
+#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
struct perf_pmu_events_attr {
- struct device_attribute attr;
- u64 id;
- const char *event_str;
+ struct device_attribute attr;
+ u64 id;
+ const char *event_str;
};
struct perf_pmu_events_ht_attr {
- struct device_attribute attr;
- u64 id;
- const char *event_str_ht;
- const char *event_str_noht;
+ struct device_attribute attr;
+ u64 id;
+ const char *event_str_ht;
+ const char *event_str_noht;
};
struct perf_pmu_events_hybrid_attr {
- struct device_attribute attr;
- u64 id;
- const char *event_str;
- u64 pmu_type;
+ struct device_attribute attr;
+ u64 id;
+ const char *event_str;
+ u64 pmu_type;
};
struct perf_pmu_format_hybrid_attr {
- struct device_attribute attr;
- u64 pmu_type;
+ struct device_attribute attr;
+ u64 pmu_type;
};
ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
@@ -2000,11 +2036,11 @@ static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
/* Performance counter hotplug functions */
#ifdef CONFIG_PERF_EVENTS
-int perf_event_init_cpu(unsigned int cpu);
-int perf_event_exit_cpu(unsigned int cpu);
+extern int perf_event_init_cpu(unsigned int cpu);
+extern int perf_event_exit_cpu(unsigned int cpu);
#else
-#define perf_event_init_cpu NULL
-#define perf_event_exit_cpu NULL
+# define perf_event_init_cpu NULL
+# define perf_event_exit_cpu NULL
#endif
extern void arch_perf_update_userpage(struct perf_event *event,
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 311ecebd7d56..453ae6d8a68d 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -77,7 +77,7 @@ struct file;
struct pid *pidfd_pid(const struct file *file);
struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags);
struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags);
-int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret);
+int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret_file);
void do_notify_pidfd(struct task_struct *task);
static inline struct pid *get_pid(struct pid *pid)
diff --git a/include/linux/pidfs.h b/include/linux/pidfs.h
index 05e6f8f4a026..77e7db194914 100644
--- a/include/linux/pidfs.h
+++ b/include/linux/pidfs.h
@@ -2,11 +2,19 @@
#ifndef _LINUX_PID_FS_H
#define _LINUX_PID_FS_H
+struct coredump_params;
+
struct file *pidfs_alloc_file(struct pid *pid, unsigned int flags);
void __init pidfs_init(void);
void pidfs_add_pid(struct pid *pid);
void pidfs_remove_pid(struct pid *pid);
void pidfs_exit(struct task_struct *tsk);
+#ifdef CONFIG_COREDUMP
+void pidfs_coredump(const struct coredump_params *cprm);
+#endif
extern const struct dentry_operations pidfs_dentry_operations;
+int pidfs_register_pid(struct pid *pid);
+void pidfs_get_pid(struct pid *pid);
+void pidfs_put_pid(struct pid *pid);
#endif /* _LINUX_PID_FS_H */
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index d56a78af4af1..0b18160901a2 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -142,6 +142,8 @@ struct genpd_governor_data {
bool max_off_time_changed;
ktime_t next_wakeup;
ktime_t next_hrtimer;
+ ktime_t last_enter;
+ bool reflect_residency;
bool cached_power_down_ok;
bool cached_power_down_state_idx;
};
@@ -153,6 +155,8 @@ struct genpd_power_state {
s64 residency_ns;
u64 usage;
u64 rejected;
+ u64 above;
+ u64 below;
struct fwnode_handle *fwnode;
u64 idle_time;
void *data;
@@ -285,6 +289,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
int pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off);
int pm_genpd_remove(struct generic_pm_domain *genpd);
+void pm_genpd_inc_rejected(struct generic_pm_domain *genpd,
+ unsigned int state_idx);
struct device *dev_to_genpd_dev(struct device *dev);
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state);
int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb);
@@ -336,6 +342,10 @@ static inline int pm_genpd_remove(struct generic_pm_domain *genpd)
return -EOPNOTSUPP;
}
+static inline void pm_genpd_inc_rejected(struct generic_pm_domain *genpd,
+ unsigned int state_idx)
+{ }
+
static inline struct device *dev_to_genpd_dev(struct device *dev)
{
return ERR_PTR(-EOPNOTSUPP);
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index c247317aae38..cf477beae4bb 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -11,6 +11,7 @@
#ifndef __LINUX_OPP_H__
#define __LINUX_OPP_H__
+#include <linux/cleanup.h>
#include <linux/energy_model.h>
#include <linux/err.h>
#include <linux/notifier.h>
@@ -100,7 +101,7 @@ struct dev_pm_opp_data {
#if defined(CONFIG_PM_OPP)
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
-void dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table);
+struct opp_table *dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table);
void dev_pm_opp_put_opp_table(struct opp_table *opp_table);
unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index);
@@ -161,7 +162,7 @@ struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
unsigned int *bw, int index);
-void dev_pm_opp_get(struct dev_pm_opp *opp);
+struct dev_pm_opp *dev_pm_opp_get(struct dev_pm_opp *opp);
void dev_pm_opp_put(struct dev_pm_opp *opp);
int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *opp);
@@ -196,6 +197,7 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
void dev_pm_opp_remove_table(struct device *dev);
void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask);
int dev_pm_opp_sync_regulators(struct device *dev);
+
#else
static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
{
@@ -207,7 +209,10 @@ static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *
return ERR_PTR(-EOPNOTSUPP);
}
-static inline void dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table) {}
+static inline struct opp_table *dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table)
+{
+ return opp_table;
+}
static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {}
@@ -345,7 +350,10 @@ static inline struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
return ERR_PTR(-EOPNOTSUPP);
}
-static inline void dev_pm_opp_get(struct dev_pm_opp *opp) {}
+static inline struct dev_pm_opp *dev_pm_opp_get(struct dev_pm_opp *opp)
+{
+ return opp;
+}
static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {}
@@ -573,6 +581,12 @@ static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_ta
}
#endif
+/* Scope based cleanup macro for OPP reference counting */
+DEFINE_FREE(put_opp, struct dev_pm_opp *, if (!IS_ERR_OR_NULL(_T)) dev_pm_opp_put(_T))
+
+/* Scope based cleanup macro for OPP table reference counting */
+DEFINE_FREE(put_opp_table, struct opp_table *, if (!IS_ERR_OR_NULL(_T)) dev_pm_opp_put_opp_table(_T))
+
/* OPP Configuration helpers */
static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
@@ -704,4 +718,14 @@ static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
return dev_pm_opp_get_freq_indexed(opp, 0);
}
+static inline int dev_pm_opp_set_level(struct device *dev, unsigned int level)
+{
+ struct dev_pm_opp *opp __free(put_opp) = dev_pm_opp_find_level_exact(dev, level);
+
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ return dev_pm_opp_set_opp(dev, opp);
+}
+
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 7fb5a459847e..756b842dcd30 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -96,7 +96,9 @@ extern void pm_runtime_new_link(struct device *dev);
extern void pm_runtime_drop_link(struct device_link *link);
extern void pm_runtime_release_supplier(struct device_link *link);
+int devm_pm_runtime_set_active_enabled(struct device *dev);
extern int devm_pm_runtime_enable(struct device *dev);
+int devm_pm_runtime_get_noresume(struct device *dev);
/**
* pm_suspend_ignore_children - Set runtime PM behavior regarding children.
@@ -294,7 +296,9 @@ static inline bool pm_runtime_blocked(struct device *dev) { return true; }
static inline void pm_runtime_allow(struct device *dev) {}
static inline void pm_runtime_forbid(struct device *dev) {}
+static inline int devm_pm_runtime_set_active_enabled(struct device *dev) { return 0; }
static inline int devm_pm_runtime_enable(struct device *dev) { return 0; }
+static inline int devm_pm_runtime_get_noresume(struct device *dev) { return 0; }
static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
static inline void pm_runtime_get_noresume(struct device *dev) {}
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 888824592953..c4cb854971f5 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -288,6 +288,7 @@ struct power_supply_desc {
struct power_supply_ext {
const char *const name;
u8 charge_behaviours;
+ u32 charge_types;
const enum power_supply_property *properties;
size_t num_properties;
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
index f3cad182d4ef..0b3a36bdaa90 100644
--- a/include/linux/psp-sev.h
+++ b/include/linux/psp-sev.h
@@ -954,6 +954,7 @@ int sev_do_cmd(int cmd, void *data, int *psp_ret);
void *psp_copy_user_blob(u64 uaddr, u32 len);
void *snp_alloc_firmware_page(gfp_t mask);
void snp_free_firmware_page(void *addr);
+void sev_platform_shutdown(void);
#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
@@ -988,6 +989,8 @@ static inline void *snp_alloc_firmware_page(gfp_t mask)
static inline void snp_free_firmware_page(void *addr) { }
+static inline void sev_platform_shutdown(void) { }
+
#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
#endif /* __PSP_SEV_H__ */
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 9ece4e5d3815..63a17d2b4ec8 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -218,6 +218,8 @@ static inline void pwm_init_state(const struct pwm_device *pwm,
*
* pwm_get_state(pwm, &state);
* duty = pwm_get_relative_duty_cycle(&state, 100);
+ *
+ * Returns: rounded relative duty cycle multiplied by @scale
*/
static inline unsigned int
pwm_get_relative_duty_cycle(const struct pwm_state *state, unsigned int scale)
@@ -244,8 +246,8 @@ pwm_get_relative_duty_cycle(const struct pwm_state *state, unsigned int scale)
* pwm_set_relative_duty_cycle(&state, 50, 100);
* pwm_apply_might_sleep(pwm, &state);
*
- * This functions returns -EINVAL if @duty_cycle and/or @scale are
- * inconsistent (@scale == 0 or @duty_cycle > @scale).
+ * Returns: 0 on success or ``-EINVAL`` if @duty_cycle and/or @scale are
+ * inconsistent (@scale == 0 or @duty_cycle > @scale)
*/
static inline int
pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle,
@@ -351,7 +353,7 @@ struct pwm_chip {
* pwmchip_supports_waveform() - checks if the given chip supports waveform callbacks
* @chip: The pwm_chip to test
*
- * Returns true iff the pwm chip support the waveform functions like
+ * Returns: true iff the pwm chip support the waveform functions like
* pwm_set_waveform_might_sleep() and pwm_round_waveform_might_sleep()
*/
static inline bool pwmchip_supports_waveform(struct pwm_chip *chip)
@@ -369,7 +371,7 @@ static inline struct device *pwmchip_parent(const struct pwm_chip *chip)
return chip->dev.parent;
}
-static inline void *pwmchip_get_drvdata(struct pwm_chip *chip)
+static inline void *pwmchip_get_drvdata(const struct pwm_chip *chip)
{
return dev_get_drvdata(&chip->dev);
}
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index b17e0cd0a30c..7aaad158ee37 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -22,16 +22,43 @@ static inline void ratelimit_default_init(struct ratelimit_state *rs)
DEFAULT_RATELIMIT_BURST);
}
+static inline void ratelimit_state_inc_miss(struct ratelimit_state *rs)
+{
+ atomic_inc(&rs->missed);
+}
+
+static inline int ratelimit_state_get_miss(struct ratelimit_state *rs)
+{
+ return atomic_read(&rs->missed);
+}
+
+static inline int ratelimit_state_reset_miss(struct ratelimit_state *rs)
+{
+ return atomic_xchg_relaxed(&rs->missed, 0);
+}
+
+static inline void ratelimit_state_reset_interval(struct ratelimit_state *rs, int interval_init)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&rs->lock, flags);
+ rs->interval = interval_init;
+ rs->flags &= ~RATELIMIT_INITIALIZED;
+ atomic_set(&rs->rs_n_left, rs->burst);
+ ratelimit_state_reset_miss(rs);
+ raw_spin_unlock_irqrestore(&rs->lock, flags);
+}
+
static inline void ratelimit_state_exit(struct ratelimit_state *rs)
{
+ int m;
+
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE))
return;
- if (rs->missed) {
- pr_warn("%s: %d output lines suppressed due to ratelimiting\n",
- current->comm, rs->missed);
- rs->missed = 0;
- }
+ m = ratelimit_state_reset_miss(rs);
+ if (m)
+ pr_warn("%s: %d output lines suppressed due to ratelimiting\n", current->comm, m);
}
static inline void
diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h
index 765232ce0b5e..b19c4354540a 100644
--- a/include/linux/ratelimit_types.h
+++ b/include/linux/ratelimit_types.h
@@ -11,14 +11,15 @@
/* issue num suppressed message on exit */
#define RATELIMIT_MSG_ON_RELEASE BIT(0)
+#define RATELIMIT_INITIALIZED BIT(1)
struct ratelimit_state {
raw_spinlock_t lock; /* protect the state */
int interval;
int burst;
- int printed;
- int missed;
+ atomic_t rs_n_left;
+ atomic_t missed;
unsigned int flags;
unsigned long begin;
};
diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h
index 6322d8c1c6b4..2fb2af6d9824 100644
--- a/include/linux/rcuref.h
+++ b/include/linux/rcuref.h
@@ -30,7 +30,11 @@ static inline void rcuref_init(rcuref_t *ref, unsigned int cnt)
* rcuref_read - Read the number of held reference counts of a rcuref
* @ref: Pointer to the reference count
*
- * Return: The number of held references (0 ... N)
+ * Return: The number of held references (0 ... N). The value 0 does not
+ * indicate that it is safe to schedule the object, protected by this reference
+ * counter, for deconstruction.
+ * If you want to know if the reference counter has been marked DEAD (as
+ * signaled by rcuref_put()) please use rcuread_is_dead().
*/
static inline unsigned int rcuref_read(rcuref_t *ref)
{
@@ -40,6 +44,22 @@ static inline unsigned int rcuref_read(rcuref_t *ref)
return c >= RCUREF_RELEASED ? 0 : c + 1;
}
+/**
+ * rcuref_is_dead - Check if the rcuref has been already marked dead
+ * @ref: Pointer to the reference count
+ *
+ * Return: True if the object has been marked DEAD. This signals that a previous
+ * invocation of rcuref_put() returned true on this reference counter meaning
+ * the protected object can safely be scheduled for deconstruction.
+ * Otherwise, returns false.
+ */
+static inline bool rcuref_is_dead(rcuref_t *ref)
+{
+ unsigned int c = atomic_read(&ref->refcnt);
+
+ return (c >= RCUREF_RELEASED) && (c < RCUREF_NOREF);
+}
+
extern __must_check bool rcuref_get_slowpath(rcuref_t *ref);
/**
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index d17c5ea3d55d..02b83f5499b8 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -1641,6 +1641,8 @@ struct regmap_irq_chip_data;
* @ack_invert: Inverted ack register: cleared bits for ack.
* @clear_ack: Use this to set 1 and 0 or vice-versa to clear interrupts.
* @status_invert: Inverted status register: cleared bits are active interrupts.
+ * @status_is_level: Status register is actuall signal level: Xor status
+ * register with previous value to get active interrupts.
* @wake_invert: Inverted wake register: cleared bits are wake enabled.
* @type_in_mask: Use the mask registers for controlling irq type. Use this if
* the hardware provides separate bits for rising/falling edge
@@ -1704,6 +1706,7 @@ struct regmap_irq_chip {
unsigned int ack_invert:1;
unsigned int clear_ack:1;
unsigned int status_invert:1;
+ unsigned int status_is_level:1;
unsigned int wake_invert:1;
unsigned int type_in_mask:1;
unsigned int clear_on_unmask:1;
diff --git a/include/linux/regulator/max8952.h b/include/linux/regulator/max8952.h
index 8712c091abf0..61dcd8e00a2f 100644
--- a/include/linux/regulator/max8952.h
+++ b/include/linux/regulator/max8952.h
@@ -2,7 +2,7 @@
/*
* max8952.h - Voltage regulation for the Maxim 8952
*
- * Copyright (C) 2010 Samsung Electrnoics
+ * Copyright (C) 2010 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@samsung.com>
*/
diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h
index b427b5873de1..85b4fecc10d8 100644
--- a/include/linux/regulator/pca9450.h
+++ b/include/linux/regulator/pca9450.h
@@ -35,6 +35,8 @@ enum {
PCA9450_DVS_LEVEL_MAX,
};
+#define PCA9450_RESTART_HANDLER_PRIORITY 130
+
#define PCA9450_BUCK1_VOLTAGE_NUM 0x80
#define PCA9450_BUCK2_VOLTAGE_NUM 0x80
#define PCA9450_BUCK3_VOLTAGE_NUM 0x80
@@ -235,4 +237,7 @@ enum {
#define I2C_LT_ON_RUN 0x02
#define I2C_LT_FORCE_ENABLE 0x03
+/* PCA9450_REG_SW_RST command */
+#define SW_RST_COMMAND 0x14
+
#endif /* __LINUX_REG_PCA9450_H__ */
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index 880351ca3dfc..9ba771f2ddea 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -8,6 +8,10 @@
#include <linux/pid.h>
#include <linux/resctrl_types.h>
+#ifdef CONFIG_ARCH_HAS_CPU_RESCTRL
+#include <asm/resctrl.h>
+#endif
+
/* CLOSID, RMID value used by the default control group */
#define RESCTRL_RESERVED_CLOSID 0
#define RESCTRL_RESERVED_RMID 0
@@ -44,6 +48,16 @@ int proc_resctrl_show(struct seq_file *m,
for_each_rdt_resource((r)) \
if ((r)->mon_capable)
+enum resctrl_res_level {
+ RDT_RESOURCE_L3,
+ RDT_RESOURCE_L2,
+ RDT_RESOURCE_MBA,
+ RDT_RESOURCE_SMBA,
+
+ /* Must be the last */
+ RDT_NUM_RESOURCES,
+};
+
/**
* enum resctrl_conf_type - The type of configuration.
* @CDP_NONE: No prioritisation, both code and data are controlled or monitored.
@@ -358,7 +372,7 @@ u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
u32 resctrl_arch_system_num_rmid_idx(void);
int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
-__init bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt);
+bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt);
/**
* resctrl_arch_mon_event_config_write() - Write the config for an event.
@@ -399,6 +413,9 @@ static inline u32 resctrl_get_config_index(u32 closid,
}
}
+bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l);
+int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable);
+
/*
* Update the ctrl_val and apply this config right now.
* Must be called on one of the domain's CPUs.
@@ -514,7 +531,20 @@ void resctrl_arch_reset_all_ctrls(struct rdt_resource *r);
extern unsigned int resctrl_rmid_realloc_threshold;
extern unsigned int resctrl_rmid_realloc_limit;
-int __init resctrl_init(void);
-void __exit resctrl_exit(void);
-
+int resctrl_init(void);
+void resctrl_exit(void);
+
+#ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK
+u64 resctrl_arch_get_prefetch_disable_bits(void);
+int resctrl_arch_pseudo_lock_fn(void *_plr);
+int resctrl_arch_measure_cycles_lat_fn(void *_plr);
+int resctrl_arch_measure_l2_residency(void *_plr);
+int resctrl_arch_measure_l3_residency(void *_plr);
+#else
+static inline u64 resctrl_arch_get_prefetch_disable_bits(void) { return 0; }
+static inline int resctrl_arch_pseudo_lock_fn(void *_plr) { return 0; }
+static inline int resctrl_arch_measure_cycles_lat_fn(void *_plr) { return 0; }
+static inline int resctrl_arch_measure_l2_residency(void *_plr) { return 0; }
+static inline int resctrl_arch_measure_l3_residency(void *_plr) { return 0; }
+#endif /* CONFIG_RESCTRL_FS_PSEUDO_LOCK */
#endif /* _RESCTRL_H */
diff --git a/include/linux/resctrl_types.h b/include/linux/resctrl_types.h
index f26450b3326b..a25fb9c4070d 100644
--- a/include/linux/resctrl_types.h
+++ b/include/linux/resctrl_types.h
@@ -7,6 +7,9 @@
#ifndef __LINUX_RESCTRL_TYPES_H
#define __LINUX_RESCTRL_TYPES_H
+#define MAX_MBA_BW 100u
+#define MBM_OVERFLOW_INTERVAL 1000
+
/* Reads to Local DRAM Memory */
#define READS_TO_LOCAL_MEM BIT(0)
@@ -31,16 +34,6 @@
/* Max event bits supported */
#define MAX_EVT_CONFIG_BITS GENMASK(6, 0)
-enum resctrl_res_level {
- RDT_RESOURCE_L3,
- RDT_RESOURCE_L2,
- RDT_RESOURCE_MBA,
- RDT_RESOURCE_SMBA,
-
- /* Must be the last */
- RDT_NUM_RESOURCES,
-};
-
/*
* Event IDs, the values match those used to program IA32_QM_EVTSEL before
* reading IA32_QM_CTR on RDT systems.
@@ -49,6 +42,9 @@ enum resctrl_event_id {
QOS_L3_OCCUP_EVENT_ID = 0x01,
QOS_L3_MBM_TOTAL_EVENT_ID = 0x02,
QOS_L3_MBM_LOCAL_EVENT_ID = 0x03,
+
+ /* Must be the last */
+ QOS_NUM_EVENTS,
};
#endif /* __LINUX_RESCTRL_TYPES_H */
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
index 13f17676c5f4..7e50bbc94e47 100644
--- a/include/linux/restart_block.h
+++ b/include/linux/restart_block.h
@@ -26,7 +26,7 @@ struct restart_block {
unsigned long arch_data;
long (*fn)(struct restart_block *);
union {
- /* For futex_wait and futex_wait_requeue_pi */
+ /* For futex_wait() */
struct {
u32 __user *uaddr;
u32 val;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f96ac1982893..45e5953b8f32 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -44,7 +44,6 @@
#include <linux/seqlock_types.h>
#include <linux/kcsan.h>
#include <linux/rv.h>
-#include <linux/livepatch_sched.h>
#include <linux/uidgid_types.h>
#include <linux/tracepoint-defs.h>
#include <asm/kmap_size.h>
@@ -1646,22 +1645,15 @@ struct task_struct {
struct user_event_mm *user_event_mm;
#endif
- /*
- * New fields for task_struct should be added above here, so that
- * they are included in the randomized portion of task_struct.
- */
- randomized_struct_fields_end
-
/* CPU-specific state of this task: */
struct thread_struct thread;
/*
- * WARNING: on x86, 'thread_struct' contains a variable-sized
- * structure. It *MUST* be at the end of 'task_struct'.
- *
- * Do not put anything below here!
+ * New fields for task_struct should be added above here, so that
+ * they are included in the randomized portion of task_struct.
*/
-};
+ randomized_struct_fields_end
+} __attribute__ ((aligned (64)));
#define TASK_REPORT_IDLE (TASK_REPORT + 1)
#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
@@ -2089,9 +2081,6 @@ extern int __cond_resched(void);
#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-void sched_dynamic_klp_enable(void);
-void sched_dynamic_klp_disable(void);
-
DECLARE_STATIC_CALL(cond_resched, __cond_resched);
static __always_inline int _cond_resched(void)
@@ -2112,7 +2101,6 @@ static __always_inline int _cond_resched(void)
static inline int _cond_resched(void)
{
- klp_sched_try_switch();
return __cond_resched();
}
@@ -2122,7 +2110,6 @@ static inline int _cond_resched(void)
static inline int _cond_resched(void)
{
- klp_sched_try_switch();
return 0;
}
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 7b4301b7235f..198bb5cc1774 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -195,6 +195,8 @@ struct sched_domain_topology_level {
};
extern void __init set_sched_topology(struct sched_domain_topology_level *tl);
+extern void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio);
+
# define SD_INIT_NAME(type) .name = #type
@@ -223,6 +225,10 @@ static inline bool cpus_share_resources(int this_cpu, int that_cpu)
return true;
}
+static inline void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio)
+{
+}
+
#endif /* !CONFIG_SMP */
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
index 6a4a3cec4638..923d68e07679 100644
--- a/include/linux/screen_info.h
+++ b/include/linux/screen_info.h
@@ -126,8 +126,17 @@ static inline unsigned int screen_info_video_type(const struct screen_info *si)
return VIDEO_TYPE_CGA;
}
+static inline u32 __screen_info_vesapm_info_base(const struct screen_info *si)
+{
+ if (si->vesapm_seg < 0xc000)
+ return 0;
+ return (si->vesapm_seg << 4) + si->vesapm_off;
+}
+
ssize_t screen_info_resources(const struct screen_info *si, struct resource *r, size_t num);
+u32 __screen_info_lfb_bits_per_pixel(const struct screen_info *si);
+
#if defined(CONFIG_PCI)
void screen_info_apply_fixups(void);
struct pci_dev *screen_info_pci_dev(const struct screen_info *si);
diff --git a/include/linux/security.h b/include/linux/security.h
index cc9b54d95d22..dba349629229 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -563,7 +563,6 @@ int security_setselfattr(unsigned int attr, struct lsm_ctx __user *ctx,
int security_getprocattr(struct task_struct *p, int lsmid, const char *name,
char **value);
int security_setprocattr(int lsmid, const char *name, void *value, size_t size);
-int security_netlink_send(struct sock *sk, struct sk_buff *skb);
int security_ismaclabel(const char *name);
int security_secid_to_secctx(u32 secid, struct lsm_context *cp);
int security_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp);
@@ -1527,11 +1526,6 @@ static inline int security_setprocattr(int lsmid, char *name, void *value,
return -EINVAL;
}
-static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb)
-{
- return 0;
-}
-
static inline int security_ismaclabel(const char *name)
{
return 0;
@@ -1629,6 +1623,7 @@ static inline int security_watch_key(struct key *key)
#ifdef CONFIG_SECURITY_NETWORK
+int security_netlink_send(struct sock *sk, struct sk_buff *skb);
int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk);
int security_unix_may_send(struct socket *sock, struct socket *other);
int security_socket_create(int family, int type, int protocol, int kern);
@@ -1684,6 +1679,11 @@ int security_sctp_assoc_established(struct sctp_association *asoc,
int security_mptcp_add_subflow(struct sock *sk, struct sock *ssk);
#else /* CONFIG_SECURITY_NETWORK */
+static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb)
+{
+ return 0;
+}
+
static inline int security_unix_stream_connect(struct sock *sock,
struct sock *other,
struct sock *newsk)
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 0b273a7b9f01..5f03a39a26f7 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -104,10 +104,11 @@ static inline bool shmem_mapping(struct address_space *mapping)
return false;
}
#endif /* CONFIG_SHMEM */
-extern void shmem_unlock_mapping(struct address_space *mapping);
-extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
+void shmem_unlock_mapping(struct address_space *mapping);
+struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
-extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
+int shmem_writeout(struct folio *folio, struct writeback_control *wbc);
+void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
int shmem_unuse(unsigned int type);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
index f950d280461b..9fbef3fd4056 100644
--- a/include/linux/spi/sh_msiof.h
+++ b/include/linux/spi/sh_msiof.h
@@ -2,6 +2,131 @@
#ifndef __SPI_SH_MSIOF_H__
#define __SPI_SH_MSIOF_H__
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
+#define SITMDR1 0x00 /* Transmit Mode Register 1 */
+#define SITMDR2 0x04 /* Transmit Mode Register 2 */
+#define SITMDR3 0x08 /* Transmit Mode Register 3 */
+#define SIRMDR1 0x10 /* Receive Mode Register 1 */
+#define SIRMDR2 0x14 /* Receive Mode Register 2 */
+#define SIRMDR3 0x18 /* Receive Mode Register 3 */
+#define SITSCR 0x20 /* Transmit Clock Select Register */
+#define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
+#define SICTR 0x28 /* Control Register */
+#define SIFCTR 0x30 /* FIFO Control Register */
+#define SISTR 0x40 /* Status Register */
+#define SIIER 0x44 /* Interrupt Enable Register */
+#define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
+#define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
+#define SITFDR 0x50 /* Transmit FIFO Data Register */
+#define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
+#define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
+#define SIRFDR 0x60 /* Receive FIFO Data Register */
+
+/* SITMDR1 and SIRMDR1 */
+#define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
+#define SIMDR1_SYNCMD GENMASK(29, 28) /* SYNC Mode */
+#define SIMDR1_SYNCMD_PULSE 0U /* Frame start sync pulse */
+#define SIMDR1_SYNCMD_SPI 2U /* Level mode/SPI */
+#define SIMDR1_SYNCMD_LR 3U /* L/R mode */
+#define SIMDR1_SYNCAC BIT(25) /* Sync Polarity (1 = Active-low) */
+#define SIMDR1_BITLSB BIT(24) /* MSB/LSB First (1 = LSB first) */
+#define SIMDR1_DTDL GENMASK(22, 20) /* Data Pin Bit Delay for MSIOF_SYNC */
+#define SIMDR1_SYNCDL GENMASK(18, 16) /* Frame Sync Signal Timing Delay */
+#define SIMDR1_FLD GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
+#define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
+/* SITMDR1 */
+#define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */
+#define SITMDR1_SYNCCH GENMASK(27, 26) /* Sync Signal Channel Select */
+ /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
+
+/* SITMDR2 and SIRMDR2 */
+#define SIMDR2_GRP GENMASK(31, 30) /* Group Count */
+#define SIMDR2_BITLEN1 GENMASK(28, 24) /* Data Size (8-32 bits) */
+#define SIMDR2_WDLEN1 GENMASK(23, 16) /* Word Count (1-64/256 (SH, A1))) */
+#define SIMDR2_GRPMASK GENMASK(3, 0) /* Group Output Mask 1-4 (SH, A1) */
+
+/* SITMDR3 and SIRMDR3 */
+#define SIMDR3_BITLEN2 GENMASK(28, 24) /* Data Size (8-32 bits) */
+#define SIMDR3_WDLEN2 GENMASK(23, 16) /* Word Count (1-64/256 (SH, A1))) */
+
+/* SITSCR and SIRSCR */
+#define SISCR_BRPS GENMASK(12, 8) /* Prescaler Setting (1-32) */
+#define SISCR_BRDV GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
+
+/* SICTR */
+#define SICTR_TSCKIZ GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
+#define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
+#define SICTR_TSCKIZ_POL BIT(30) /* Transmit Clock Polarity */
+#define SICTR_RSCKIZ GENMASK(29, 28) /* Receive Clock Polarity Select */
+#define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
+#define SICTR_RSCKIZ_POL BIT(28) /* Receive Clock Polarity */
+#define SICTR_TEDG BIT(27) /* Transmit Timing (1 = falling edge) */
+#define SICTR_REDG BIT(26) /* Receive Timing (1 = falling edge) */
+#define SICTR_TXDIZ GENMASK(23, 22) /* Pin Output When TX is Disabled */
+#define SICTR_TXDIZ_LOW 0U /* 0 */
+#define SICTR_TXDIZ_HIGH 1U /* 1 */
+#define SICTR_TXDIZ_HIZ 2U /* High-impedance */
+#define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
+#define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
+#define SICTR_TXE BIT(9) /* Transmit Enable */
+#define SICTR_RXE BIT(8) /* Receive Enable */
+#define SICTR_TXRST BIT(1) /* Transmit Reset */
+#define SICTR_RXRST BIT(0) /* Receive Reset */
+
+/* SIFCTR */
+#define SIFCTR_TFWM GENMASK(31, 29) /* Transmit FIFO Watermark */
+#define SIFCTR_TFWM_64 0U /* Transfer Request when 64 empty stages */
+#define SIFCTR_TFWM_32 1U /* Transfer Request when 32 empty stages */
+#define SIFCTR_TFWM_24 2U /* Transfer Request when 24 empty stages */
+#define SIFCTR_TFWM_16 3U /* Transfer Request when 16 empty stages */
+#define SIFCTR_TFWM_12 4U /* Transfer Request when 12 empty stages */
+#define SIFCTR_TFWM_8 5U /* Transfer Request when 8 empty stages */
+#define SIFCTR_TFWM_4 6U /* Transfer Request when 4 empty stages */
+#define SIFCTR_TFWM_1 7U /* Transfer Request when 1 empty stage */
+#define SIFCTR_TFUA GENMASK(28, 20) /* Transmit FIFO Usable Area */
+#define SIFCTR_RFWM GENMASK(15, 13) /* Receive FIFO Watermark */
+#define SIFCTR_RFWM_1 0U /* Transfer Request when 1 valid stages */
+#define SIFCTR_RFWM_4 1U /* Transfer Request when 4 valid stages */
+#define SIFCTR_RFWM_8 2U /* Transfer Request when 8 valid stages */
+#define SIFCTR_RFWM_16 3U /* Transfer Request when 16 valid stages */
+#define SIFCTR_RFWM_32 4U /* Transfer Request when 32 valid stages */
+#define SIFCTR_RFWM_64 5U /* Transfer Request when 64 valid stages */
+#define SIFCTR_RFWM_128 6U /* Transfer Request when 128 valid stages */
+#define SIFCTR_RFWM_256 7U /* Transfer Request when 256 valid stages */
+#define SIFCTR_RFUA GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
+
+/* SISTR */
+#define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */
+#define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */
+#define SISTR_TEOF BIT(23) /* Frame Transmission End */
+#define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
+#define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */
+#define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */
+#define SISTR_RFFUL BIT(13) /* Receive FIFO Full */
+#define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */
+#define SISTR_REOF BIT(7) /* Frame Reception End */
+#define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
+#define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */
+#define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */
+
+/* SIIER */
+#define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
+#define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
+#define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
+#define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */
+#define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
+#define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
+#define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
+#define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
+#define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */
+#define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
+#define SIIER_REOFE BIT(7) /* Frame Reception End Enable */
+#define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
+#define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
+#define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
+
enum {
MSIOF_SPI_HOST,
MSIOF_SPI_TARGET,
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 0ba5e49bace4..4789f91dae94 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -136,13 +136,6 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* @max_speed_hz: Maximum clock rate to be used with this chip
* (on this board); may be changed by the device's driver.
* The spi_transfer.speed_hz can override this for each transfer.
- * @chip_select: Array of physical chipselect, spi->chipselect[i] gives
- * the corresponding physical CS for logical CS i.
- * @mode: The spi mode defines how data is clocked out and in.
- * This may be changed by the device's driver.
- * The "active low" default for chipselect mode can be overridden
- * (by specifying SPI_CS_HIGH) as can the "MSB first" default for
- * each word in a transfer (by specifying SPI_LSB_FIRST).
* @bits_per_word: Data transfers involve one or more words; word sizes
* like eight or 12 bits are common. In-memory wordsizes are
* powers of two bytes (e.g. 20 bit samples use 32 bits).
@@ -150,6 +143,11 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* default (0) indicating protocol words are eight bit bytes.
* The spi_transfer.bits_per_word can override this for each transfer.
* @rt: Make the pump thread real time priority.
+ * @mode: The spi mode defines how data is clocked out and in.
+ * This may be changed by the device's driver.
+ * The "active low" default for chipselect mode can be overridden
+ * (by specifying SPI_CS_HIGH) as can the "MSB first" default for
+ * each word in a transfer (by specifying SPI_LSB_FIRST).
* @irq: Negative, or the number passed to request_irq() to receive
* interrupts from this device.
* @controller_state: Controller's runtime state
@@ -162,8 +160,7 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* the device will bind to the named driver and only the named driver.
* Do not set directly, because core frees it; use driver_set_override() to
* set or clear it.
- * @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines
- * (optional, NULL when not using a GPIO line)
+ * @pcpu_statistics: statistics for the spi_device
* @word_delay: delay to be inserted between consecutive
* words of a transfer
* @cs_setup: delay to be introduced by the controller after CS is asserted
@@ -171,8 +168,11 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* @cs_inactive: delay to be introduced by the controller after CS is
* deasserted. If @cs_change_delay is used from @spi_transfer, then the
* two delays will be added up.
- * @pcpu_statistics: statistics for the spi_device
+ * @chip_select: Array of physical chipselect, spi->chipselect[i] gives
+ * the corresponding physical CS for logical CS i.
* @cs_index_mask: Bit mask of the active chipselect(s) in the chipselect array
+ * @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines
+ * (optional, NULL when not using a GPIO line)
*
* A @spi_device is used to interchange data between an SPI target device
* (usually a discrete chip) and CPU memory.
@@ -187,7 +187,6 @@ struct spi_device {
struct device dev;
struct spi_controller *controller;
u32 max_speed_hz;
- u8 chip_select[SPI_CS_CNT_MAX];
u8 bits_per_word;
bool rt;
#define SPI_NO_TX BIT(31) /* No transmit wire */
@@ -218,23 +217,29 @@ struct spi_device {
void *controller_data;
char modalias[SPI_NAME_SIZE];
const char *driver_override;
- struct gpio_desc *cs_gpiod[SPI_CS_CNT_MAX]; /* Chip select gpio desc */
+
+ /* The statistics */
+ struct spi_statistics __percpu *pcpu_statistics;
+
struct spi_delay word_delay; /* Inter-word delay */
+
/* CS delays */
struct spi_delay cs_setup;
struct spi_delay cs_hold;
struct spi_delay cs_inactive;
- /* The statistics */
- struct spi_statistics __percpu *pcpu_statistics;
+ u8 chip_select[SPI_CS_CNT_MAX];
- /* Bit mask of the chipselect(s) that the driver need to use from
- * the chipselect array.When the controller is capable to handle
+ /*
+ * Bit mask of the chipselect(s) that the driver need to use from
+ * the chipselect array. When the controller is capable to handle
* multiple chip selects & memories are connected in parallel
* then more than one bit need to be set in cs_index_mask.
*/
u32 cs_index_mask : SPI_CS_CNT_MAX;
+ struct gpio_desc *cs_gpiod[SPI_CS_CNT_MAX]; /* Chip select gpio desc */
+
/*
* Likely need more hooks for more protocol options affecting how
* the controller talks to each chip, like:
@@ -249,10 +254,7 @@ struct spi_device {
static_assert((SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK) == 0,
"SPI_MODE_USER_MASK & SPI_MODE_KERNEL_MASK must not overlap");
-static inline struct spi_device *to_spi_device(const struct device *dev)
-{
- return dev ? container_of(dev, struct spi_device, dev) : NULL;
-}
+#define to_spi_device(__dev) container_of_const(__dev, struct spi_device, dev)
/* Most drivers won't need to care about device refcounting */
static inline struct spi_device *spi_dev_get(struct spi_device *spi)
@@ -503,6 +505,8 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* found.
* @put_offload: release the offload instance acquired by @get_offload.
* @mem_caps: controller capabilities for the handling of memory operations.
+ * @dtr_caps: true if controller has dtr(single/dual transfer rate) capability.
+ * QSPI based controller should fill this based on controller's capability.
* @unprepare_message: undo any work done by prepare_message().
* @target_abort: abort the ongoing transfer request on an SPI target controller
* @cs_gpiods: Array of GPIO descriptors to use as chip select lines; one per CS
@@ -746,6 +750,9 @@ struct spi_controller {
const struct spi_controller_mem_ops *mem_ops;
const struct spi_controller_mem_caps *mem_caps;
+ /* SPI or QSPI controller can set to true if supports SDR/DDR transfer rate */
+ bool dtr_caps;
+
struct spi_offload *(*get_offload)(struct spi_device *spi,
const struct spi_offload_config *config);
void (*put_offload)(struct spi_offload *offload);
@@ -998,6 +1005,7 @@ struct spi_res {
* processed the word, i.e. the "pre" timestamp should be taken before
* transmitting the "pre" word, and the "post" timestamp after receiving
* transmit confirmation from the controller for the "post" word.
+ * @dtr_mode: true if supports double transfer rate.
* @timestamped: true if the transfer has been timestamped
* @error: Error status logged by SPI controller driver.
*
@@ -1049,6 +1057,9 @@ struct spi_res {
* two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x)
* SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer.
*
+ * User may also set dtr_mode to true to use dual transfer mode if desired. if
+ * not, default considered as single transfer mode.
+ *
* The code that submits an spi_message (and its spi_transfers)
* to the lower layers is responsible for managing its memory.
* Zero-initialize every field you don't set up explicitly, to
@@ -1083,6 +1094,7 @@ struct spi_transfer {
unsigned tx_nbits:4;
unsigned rx_nbits:4;
unsigned timestamped:1;
+ bool dtr_mode;
#define SPI_NBITS_SINGLE 0x01 /* 1-bit transfer */
#define SPI_NBITS_DUAL 0x02 /* 2-bit transfer */
#define SPI_NBITS_QUAD 0x04 /* 4-bit transfer */
@@ -1326,6 +1338,32 @@ static inline bool spi_is_bpw_supported(struct spi_device *spi, u32 bpw)
}
/**
+ * spi_bpw_to_bytes - Covert bits per word to bytes
+ * @bpw: Bits per word
+ *
+ * This function converts the given @bpw to bytes. The result is always
+ * power-of-two, e.g.,
+ *
+ * =============== =================
+ * Input (in bits) Output (in bytes)
+ * =============== =================
+ * 5 1
+ * 9 2
+ * 21 4
+ * 37 8
+ * =============== =================
+ *
+ * It will return 0 for the 0 input.
+ *
+ * Returns:
+ * Bytes for the given @bpw.
+ */
+static inline u32 spi_bpw_to_bytes(u32 bpw)
+{
+ return roundup_pow_of_two(BITS_TO_BYTES(bpw));
+}
+
+/**
* spi_controller_xfer_timeout - Compute a suitable timeout value
* @ctlr: SPI device
* @xfer: Transfer descriptor
diff --git a/include/linux/stat.h b/include/linux/stat.h
index be7496a6a0dd..e3d00e7bb26d 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -57,6 +57,7 @@ struct kstat {
u32 dio_read_offset_align;
u32 atomic_write_unit_min;
u32 atomic_write_unit_max;
+ u32 atomic_write_unit_max_opt;
u32 atomic_write_segments_max;
};
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index e93fbb5b0c01..3fb88a1e9898 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -31,6 +31,7 @@ enum string_size_units {
int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
char *buf, int len);
+int parse_int_array(const char *buf, size_t count, int **array);
int parse_int_array_user(const char __user *from, size_t count, int **array);
#define UNESCAPE_SPACE BIT(0)
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index da6ebca3ff77..b1c76c8f2c82 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -298,6 +298,11 @@ static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {}
static inline void s2idle_wake(void) {}
#endif /* !CONFIG_SUSPEND */
+static inline bool pm_suspend_in_progress(void)
+{
+ return pm_suspend_target_state != PM_SUSPEND_ON;
+}
+
/* struct pbe is used for creating lists of pages that should be restored
* atomically during the resume from disk, because the page frames they have
* occupied before the suspend are in use.
@@ -470,6 +475,8 @@ extern void pm_print_active_wakeup_sources(void);
extern unsigned int lock_system_sleep(void);
extern void unlock_system_sleep(unsigned int);
+extern bool pm_sleep_transition_in_progress(void);
+
#else /* !CONFIG_PM_SLEEP */
static inline int register_pm_notifier(struct notifier_block *nb)
@@ -498,6 +505,8 @@ static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
static inline unsigned int lock_system_sleep(void) { return 0; }
static inline void unlock_system_sleep(unsigned int flags) {}
+static inline bool pm_sleep_transition_in_progress(void) { return false; }
+
#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_SLEEP_DEBUG
diff --git a/include/linux/tick.h b/include/linux/tick.h
index b8ddc8e631a3..ac76ae9fa36d 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -195,12 +195,6 @@ static inline bool tick_nohz_full_enabled(void)
__ret; \
})
-static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
-{
- if (tick_nohz_full_enabled())
- cpumask_or(mask, mask, tick_nohz_full_mask);
-}
-
extern void tick_nohz_dep_set(enum tick_dep_bits bit);
extern void tick_nohz_dep_clear(enum tick_dep_bits bit);
extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit);
@@ -281,7 +275,6 @@ extern void __init tick_nohz_full_setup(cpumask_var_t cpumask);
#else
static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
-static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 10596d7c3a34..f636f55c427d 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -67,44 +67,44 @@
/*
* LOCKDEP and DEBUG timer interfaces.
*/
-void init_timer_key(struct timer_list *timer,
+void timer_init_key(struct timer_list *timer,
void (*func)(struct timer_list *), unsigned int flags,
const char *name, struct lock_class_key *key);
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
-extern void init_timer_on_stack_key(struct timer_list *timer,
+extern void timer_init_key_on_stack(struct timer_list *timer,
void (*func)(struct timer_list *),
unsigned int flags, const char *name,
struct lock_class_key *key);
#else
-static inline void init_timer_on_stack_key(struct timer_list *timer,
+static inline void timer_init_key_on_stack(struct timer_list *timer,
void (*func)(struct timer_list *),
unsigned int flags,
const char *name,
struct lock_class_key *key)
{
- init_timer_key(timer, func, flags, name, key);
+ timer_init_key(timer, func, flags, name, key);
}
#endif
#ifdef CONFIG_LOCKDEP
-#define __init_timer(_timer, _fn, _flags) \
+#define __timer_init(_timer, _fn, _flags) \
do { \
static struct lock_class_key __key; \
- init_timer_key((_timer), (_fn), (_flags), #_timer, &__key);\
+ timer_init_key((_timer), (_fn), (_flags), #_timer, &__key);\
} while (0)
-#define __init_timer_on_stack(_timer, _fn, _flags) \
+#define __timer_init_on_stack(_timer, _fn, _flags) \
do { \
static struct lock_class_key __key; \
- init_timer_on_stack_key((_timer), (_fn), (_flags), \
+ timer_init_key_on_stack((_timer), (_fn), (_flags), \
#_timer, &__key); \
} while (0)
#else
-#define __init_timer(_timer, _fn, _flags) \
- init_timer_key((_timer), (_fn), (_flags), NULL, NULL)
-#define __init_timer_on_stack(_timer, _fn, _flags) \
- init_timer_on_stack_key((_timer), (_fn), (_flags), NULL, NULL)
+#define __timer_init(_timer, _fn, _flags) \
+ timer_init_key((_timer), (_fn), (_flags), NULL, NULL)
+#define __timer_init_on_stack(_timer, _fn, _flags) \
+ timer_init_key_on_stack((_timer), (_fn), (_flags), NULL, NULL)
#endif
/**
@@ -115,18 +115,18 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
*
* Regular timer initialization should use either DEFINE_TIMER() above,
* or timer_setup(). For timers on the stack, timer_setup_on_stack() must
- * be used and must be balanced with a call to destroy_timer_on_stack().
+ * be used and must be balanced with a call to timer_destroy_on_stack().
*/
#define timer_setup(timer, callback, flags) \
- __init_timer((timer), (callback), (flags))
+ __timer_init((timer), (callback), (flags))
#define timer_setup_on_stack(timer, callback, flags) \
- __init_timer_on_stack((timer), (callback), (flags))
+ __timer_init_on_stack((timer), (callback), (flags))
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
-extern void destroy_timer_on_stack(struct timer_list *timer);
+extern void timer_destroy_on_stack(struct timer_list *timer);
#else
-static inline void destroy_timer_on_stack(struct timer_list *timer) { }
+static inline void timer_destroy_on_stack(struct timer_list *timer) { }
#endif
#define from_timer(var, callback_timer, timer_fieldname) \
@@ -156,28 +156,26 @@ extern int timer_reduce(struct timer_list *timer, unsigned long expires);
* The jiffies value which is added to now, when there is no timer
* in the timer wheel:
*/
-#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1)
+#define TIMER_NEXT_MAX_DELTA ((1UL << 30) - 1)
extern void add_timer(struct timer_list *timer);
extern void add_timer_local(struct timer_list *timer);
extern void add_timer_global(struct timer_list *timer);
-extern int try_to_del_timer_sync(struct timer_list *timer);
+extern int timer_delete_sync_try(struct timer_list *timer);
extern int timer_delete_sync(struct timer_list *timer);
extern int timer_delete(struct timer_list *timer);
extern int timer_shutdown_sync(struct timer_list *timer);
extern int timer_shutdown(struct timer_list *timer);
-extern void init_timers(void);
+extern void timers_init(void);
struct hrtimer;
extern enum hrtimer_restart it_real_fn(struct hrtimer *);
-unsigned long __round_jiffies(unsigned long j, int cpu);
unsigned long __round_jiffies_relative(unsigned long j, int cpu);
unsigned long round_jiffies(unsigned long j);
unsigned long round_jiffies_relative(unsigned long j);
-unsigned long __round_jiffies_up(unsigned long j, int cpu);
unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
unsigned long round_jiffies_up(unsigned long j);
unsigned long round_jiffies_up_relative(unsigned long j);
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 24e715f0f6d2..cd6b4bdc9cfd 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -332,4 +332,13 @@ sched_numa_hop_mask(unsigned int node, unsigned int hops)
!IS_ERR_OR_NULL(mask); \
__hops++)
+DECLARE_PER_CPU(unsigned long, cpu_scale);
+
+static inline unsigned long topology_get_cpu_scale(int cpu)
+{
+ return per_cpu(cpu_scale, cpu);
+}
+
+void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
+
#endif /* _LINUX_TOPOLOGY_H */
diff --git a/include/linux/tpm_svsm.h b/include/linux/tpm_svsm.h
new file mode 100644
index 000000000000..38e341f9761a
--- /dev/null
+++ b/include/linux/tpm_svsm.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 James.Bottomley@HansenPartnership.com
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ *
+ * Helpers for the SVSM_VTPM_CMD calls used by the vTPM protocol defined by the
+ * AMD SVSM spec [1].
+ *
+ * The vTPM protocol follows the Official TPM 2.0 Reference Implementation
+ * (originally by Microsoft, now part of the TCG) simulator protocol.
+ *
+ * [1] "Secure VM Service Module for SEV-SNP Guests"
+ * Publication # 58019 Revision: 1.00
+ */
+#ifndef _TPM_SVSM_H_
+#define _TPM_SVSM_H_
+
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#define SVSM_VTPM_MAX_BUFFER 4096 /* max req/resp buffer size */
+
+/**
+ * struct svsm_vtpm_request - Generic request for single word command
+ * @cmd: The command to send
+ *
+ * Defined by AMD SVSM spec [1] in section "8.2 SVSM_VTPM_CMD Call" -
+ * Table 15: vTPM Common Request/Response Structure
+ * Byte Size     In/Out    Description
+ * Offset    (Bytes)
+ * 0x000     4          In        Platform command
+ *                         Out       Platform command response size
+ */
+struct svsm_vtpm_request {
+ u32 cmd;
+};
+
+/**
+ * struct svsm_vtpm_response - Generic response
+ * @size: The response size (zero if nothing follows)
+ *
+ * Defined by AMD SVSM spec [1] in section "8.2 SVSM_VTPM_CMD Call" -
+ * Table 15: vTPM Common Request/Response Structure
+ * Byte Size     In/Out    Description
+ * Offset    (Bytes)
+ * 0x000     4          In        Platform command
+ *                         Out       Platform command response size
+ *
+ * Note: most TCG Simulator commands simply return zero here with no indication
+ * of success or failure.
+ */
+struct svsm_vtpm_response {
+ u32 size;
+};
+
+/**
+ * struct svsm_vtpm_cmd_request - Structure for a TPM_SEND_COMMAND request
+ * @cmd: The command to send (must be TPM_SEND_COMMAND)
+ * @locality: The locality
+ * @buf_size: The size of the input buffer following
+ * @buf: A buffer of size buf_size
+ *
+ * Defined by AMD SVSM spec [1] in section "8.2 SVSM_VTPM_CMD Call" -
+ * Table 16: TPM_SEND_COMMAND Request Structure
+ * Byte Size Meaning
+ * Offset    (Bytes)
+ * 0x000     4          Platform command (8)
+ * 0x004     1          Locality (must-be-0)
+ * 0x005     4          TPM Command size (in bytes)
+ * 0x009     Variable   TPM Command
+ *
+ * Note: the TCG Simulator expects @buf_size to be equal to the size of the
+ * specific TPM command, otherwise an TPM_RC_COMMAND_SIZE error is returned.
+ */
+struct svsm_vtpm_cmd_request {
+ u32 cmd;
+ u8 locality;
+ u32 buf_size;
+ u8 buf[];
+} __packed;
+
+/**
+ * struct svsm_vtpm_cmd_response - Structure for a TPM_SEND_COMMAND response
+ * @buf_size: The size of the output buffer following
+ * @buf: A buffer of size buf_size
+ *
+ * Defined by AMD SVSM spec [1] in section "8.2 SVSM_VTPM_CMD Call" -
+ * Table 17: TPM_SEND_COMMAND Response Structure
+ * Byte Size Meaning
+ * Offset    (Bytes)
+ * 0x000     4          Response size (in bytes)
+ * 0x004     Variable   Response
+ */
+struct svsm_vtpm_cmd_response {
+ u32 buf_size;
+ u8 buf[];
+};
+
+/**
+ * svsm_vtpm_cmd_request_fill() - Fill a TPM_SEND_COMMAND request to be sent to SVSM
+ * @req: The struct svsm_vtpm_cmd_request to fill
+ * @locality: The locality
+ * @buf: The buffer from where to copy the payload of the command
+ * @len: The size of the buffer
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static inline int
+svsm_vtpm_cmd_request_fill(struct svsm_vtpm_cmd_request *req, u8 locality,
+ const u8 *buf, size_t len)
+{
+ if (len > SVSM_VTPM_MAX_BUFFER - sizeof(*req))
+ return -EINVAL;
+
+ req->cmd = 8; /* TPM_SEND_COMMAND */
+ req->locality = locality;
+ req->buf_size = len;
+
+ memcpy(req->buf, buf, len);
+
+ return 0;
+}
+
+/**
+ * svsm_vtpm_cmd_response_parse() - Parse a TPM_SEND_COMMAND response received from SVSM
+ * @resp: The struct svsm_vtpm_cmd_response to parse
+ * @buf: The buffer where to copy the response
+ * @len: The size of the buffer
+ *
+ * Return: buffer size filled with the response on success, negative error
+ * code on failure.
+ */
+static inline int
+svsm_vtpm_cmd_response_parse(const struct svsm_vtpm_cmd_response *resp, u8 *buf,
+ size_t len)
+{
+ if (len < resp->buf_size)
+ return -E2BIG;
+
+ if (resp->buf_size > SVSM_VTPM_MAX_BUFFER - sizeof(*resp))
+ return -EINVAL; // Invalid response from the platform TPM
+
+ memcpy(buf, resp->buf, resp->buf_size);
+
+ return resp->buf_size;
+}
+
+#endif /* _TPM_SVSM_H_ */
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
index 939ceabcaf06..335c360d4f9b 100644
--- a/include/linux/vermagic.h
+++ b/include/linux/vermagic.h
@@ -33,7 +33,6 @@
#define MODULE_VERMAGIC_MODVERSIONS ""
#endif
#ifdef RANDSTRUCT
-#include <generated/randstruct_hash.h>
#define MODULE_RANDSTRUCT "RANDSTRUCT_" RANDSTRUCT_HASHED_SEED
#else
#define MODULE_RANDSTRUCT
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 5ca8d4dd149d..a40a905e5e1b 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -169,8 +169,13 @@ void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_m
int node, const void *caller) __alloc_size(1);
#define __vmalloc_node(...) alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__))
-void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
-#define vmalloc_huge(...) alloc_hooks(vmalloc_huge_noprof(__VA_ARGS__))
+void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node) __alloc_size(1);
+#define vmalloc_huge_node(...) alloc_hooks(vmalloc_huge_node_noprof(__VA_ARGS__))
+
+static inline void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
+{
+ return vmalloc_huge_node(size, gfp_mask, NUMA_NO_NODE);
+}
extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
#define __vmalloc_array(...) alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__))
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index b0dc957c3e56..6e30f275da77 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -316,7 +316,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
do { \
INIT_WORK(&(_work)->work, (_func)); \
- __init_timer(&(_work)->timer, \
+ __timer_init(&(_work)->timer, \
delayed_work_timer_fn, \
(_tflags) | TIMER_IRQSAFE); \
} while (0)
@@ -324,7 +324,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
do { \
INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
- __init_timer_on_stack(&(_work)->timer, \
+ __timer_init_on_stack(&(_work)->timer, \
delayed_work_timer_fn, \
(_tflags) | TIMER_IRQSAFE); \
} while (0)
@@ -480,7 +480,7 @@ void workqueue_softirq_dead(unsigned int cpu);
* executing at most one work item for the workqueue.
*
* For unbound workqueues, @max_active limits the number of in-flight work items
- * for the whole system. e.g. @max_active of 16 indicates that that there can be
+ * for the whole system. e.g. @max_active of 16 indicates that there can be
* at most 16 work items executing for the workqueue in the whole system.
*
* As sharing the same active counter for an unbound workqueue across multiple
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index fda903bb3674..0a43f56578bc 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -390,38 +390,72 @@ void v4l_bound_align_image(unsigned int *width, unsigned int wmin,
unsigned int salign);
/**
- * v4l2_find_nearest_size - Find the nearest size among a discrete
- * set of resolutions contained in an array of a driver specific struct.
+ * v4l2_find_nearest_size_conditional - Find the nearest size among a discrete
+ * set of resolutions contained in an array of a driver specific struct,
+ * with conditionally exlusion of certain modes
*
* @array: a driver specific array of image sizes
* @array_size: the length of the driver specific array of image sizes
* @width_field: the name of the width field in the driver specific struct
* @height_field: the name of the height field in the driver specific struct
- * @width: desired width.
- * @height: desired height.
+ * @width: desired width
+ * @height: desired height
+ * @func: ignores mode if returns false
+ * @context: context for the function
*
* Finds the closest resolution to minimize the width and height differences
* between what requested and the supported resolutions. The size of the width
* and height fields in the driver specific must equal to that of u32, i.e. four
- * bytes.
+ * bytes. @func is called for each mode considered, a mode is ignored if @func
+ * returns false for it.
*
* Returns the best match or NULL if the length of the array is zero.
*/
-#define v4l2_find_nearest_size(array, array_size, width_field, height_field, \
- width, height) \
+#define v4l2_find_nearest_size_conditional(array, array_size, width_field, \
+ height_field, width, height, \
+ func, context) \
({ \
BUILD_BUG_ON(sizeof((array)->width_field) != sizeof(u32) || \
sizeof((array)->height_field) != sizeof(u32)); \
- (typeof(&(array)[0]))__v4l2_find_nearest_size( \
+ (typeof(&(array)[0]))__v4l2_find_nearest_size_conditional( \
(array), array_size, sizeof(*(array)), \
offsetof(typeof(*(array)), width_field), \
offsetof(typeof(*(array)), height_field), \
- width, height); \
+ width, height, func, context); \
})
const void *
-__v4l2_find_nearest_size(const void *array, size_t array_size,
- size_t entry_size, size_t width_offset,
- size_t height_offset, s32 width, s32 height);
+__v4l2_find_nearest_size_conditional(const void *array, size_t array_size,
+ size_t entry_size, size_t width_offset,
+ size_t height_offset, s32 width,
+ s32 height,
+ bool (*func)(const void *array,
+ size_t index,
+ const void *context),
+ const void *context);
+
+/**
+ * v4l2_find_nearest_size - Find the nearest size among a discrete set of
+ * resolutions contained in an array of a driver specific struct
+ *
+ * @array: a driver specific array of image sizes
+ * @array_size: the length of the driver specific array of image sizes
+ * @width_field: the name of the width field in the driver specific struct
+ * @height_field: the name of the height field in the driver specific struct
+ * @width: desired width
+ * @height: desired height
+ *
+ * Finds the closest resolution to minimize the width and height differences
+ * between what requested and the supported resolutions. The size of the width
+ * and height fields in the driver specific must equal to that of u32, i.e. four
+ * bytes.
+ *
+ * Returns the best match or NULL if the length of the array is zero.
+ */
+#define v4l2_find_nearest_size(array, array_size, width_field, \
+ height_field, width, height) \
+ v4l2_find_nearest_size_conditional(array, array_size, width_field, \
+ height_field, width, height, NULL, \
+ NULL)
/**
* v4l2_g_parm_cap - helper routine for vidioc_g_parm to fill this in by
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index 48f4a5023d81..4ea6352fd63f 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -52,6 +52,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index,
* @alpha: alpha value (0: fully transparent, 255: fully opaque)
* @zpos: Z position of the plane (from 0 to number of planes minus 1)
* @premult: true for premultiplied alpha
+ * @color_encoding: color encoding (valid for YUV formats only)
+ * @color_range: color range (valid for YUV formats only)
*/
struct vsp1_du_atomic_config {
u32 pixelformat;
@@ -62,6 +64,8 @@ struct vsp1_du_atomic_config {
unsigned int alpha;
unsigned int zpos;
bool premult;
+ enum v4l2_ycbcr_encoding color_encoding;
+ enum v4l2_quantization color_range;
};
/**
diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h
index b8fa30fd6b50..53663c4e5ae3 100644
--- a/include/memory/renesas-rpc-if.h
+++ b/include/memory/renesas-rpc-if.h
@@ -61,12 +61,14 @@ enum rpcif_type {
RPCIF_RCAR_GEN3,
RPCIF_RCAR_GEN4,
RPCIF_RZ_G2L,
+ XSPI_RZ_G3E,
};
struct rpcif {
struct device *dev;
void __iomem *dirmap;
size_t size;
+ bool xspi;
};
int rpcif_sw_init(struct rpcif *rpc, struct device *dev);
@@ -75,5 +77,7 @@ void rpcif_prepare(struct device *dev, const struct rpcif_op *op, u64 *offs,
size_t *len);
int rpcif_manual_xfer(struct device *dev);
ssize_t rpcif_dirmap_read(struct device *dev, u64 offs, size_t len, void *buf);
+ssize_t xspi_dirmap_write(struct device *dev, u64 offs, size_t len,
+ const void *buf);
#endif // __RENESAS_RPC_IF_H
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 26bc23419cfd..c53812b9026f 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -670,8 +670,6 @@ struct Scsi_Host {
/* The transport requires the LUN bits NOT to be stored in CDB[1] */
unsigned no_scsi2_lun_in_cdb:1;
- unsigned no_highmem:1;
-
/*
* Optional work queue to be utilized by the transport
*/
diff --git a/include/sound/core.h b/include/sound/core.h
index 1f3f5dccd736..64327e971122 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -326,7 +326,6 @@ void snd_device_disconnect(struct snd_card *card, void *device_data);
void snd_device_disconnect_all(struct snd_card *card);
void snd_device_free(struct snd_card *card, void *device_data);
void snd_device_free_all(struct snd_card *card);
-int snd_device_get_state(struct snd_card *card, void *device_data);
/* isadma.c */
diff --git a/include/sound/cs-amp-lib.h b/include/sound/cs-amp-lib.h
index f481148735e1..5459c221badf 100644
--- a/include/sound/cs-amp-lib.h
+++ b/include/sound/cs-amp-lib.h
@@ -23,7 +23,7 @@ struct cirrus_amp_cal_data {
struct cirrus_amp_efi_data {
u32 size;
u32 count;
- struct cirrus_amp_cal_data data[];
+ struct cirrus_amp_cal_data data[] __counted_by(count);
} __packed;
/**
diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
index 5d653a3491d0..e17c4cadd04d 100644
--- a/include/sound/cs35l56.h
+++ b/include/sound/cs35l56.h
@@ -71,6 +71,8 @@
#define CS35L56_DSP_VIRTUAL1_MBOX_6 0x0011034
#define CS35L56_DSP_VIRTUAL1_MBOX_7 0x0011038
#define CS35L56_DSP_VIRTUAL1_MBOX_8 0x001103C
+#define CS35L56_DIE_STS1 0x0017040
+#define CS35L56_DIE_STS2 0x0017044
#define CS35L56_DSP_RESTRICT_STS1 0x00190F0
#define CS35L56_DSP1_XMEM_PACKED_0 0x2000000
#define CS35L56_DSP1_XMEM_PACKED_6143 0x2005FFC
@@ -104,6 +106,15 @@
#define CS35L56_DSP1_PMEM_0 0x3800000
#define CS35L56_DSP1_PMEM_5114 0x3804FE8
+#define CS35L63_DSP1_FW_VER CS35L56_DSP1_FW_VER
+#define CS35L63_DSP1_HALO_STATE 0x280396C
+#define CS35L63_DSP1_PM_CUR_STATE 0x28042C8
+#define CS35L63_PROTECTION_STATUS 0x340009C
+#define CS35L63_TRANSDUCER_ACTUAL_PS 0x34000F4
+#define CS35L63_MAIN_RENDER_USER_MUTE 0x3400020
+#define CS35L63_MAIN_RENDER_USER_VOLUME 0x3400028
+#define CS35L63_MAIN_POSTURE_NUMBER 0x3400068
+
/* DEVID */
#define CS35L56_DEVID_MASK 0x00FFFFFF
@@ -267,6 +278,17 @@ struct cs35l56_spi_payload {
} __packed;
static_assert(sizeof(struct cs35l56_spi_payload) == 10);
+struct cs35l56_fw_reg {
+ unsigned int fw_ver;
+ unsigned int halo_state;
+ unsigned int pm_cur_stat;
+ unsigned int prot_sts;
+ unsigned int transducer_actual_ps;
+ unsigned int user_mute;
+ unsigned int user_volume;
+ unsigned int posture_number;
+};
+
struct cs35l56_base {
struct device *dev;
struct regmap *regmap;
@@ -283,6 +305,7 @@ struct cs35l56_base {
struct cirrus_amp_cal_data cal_data;
struct gpio_desc *reset_gpio;
struct cs35l56_spi_payload *spi_payload_buf;
+ const struct cs35l56_fw_reg *fw_reg;
};
static inline bool cs35l56_is_otp_register(unsigned int reg)
@@ -310,6 +333,11 @@ static inline bool cs35l56_is_spi(struct cs35l56_base *cs35l56)
extern const struct regmap_config cs35l56_regmap_i2c;
extern const struct regmap_config cs35l56_regmap_spi;
extern const struct regmap_config cs35l56_regmap_sdw;
+extern const struct regmap_config cs35l63_regmap_i2c;
+extern const struct regmap_config cs35l63_regmap_sdw;
+
+extern const struct cs35l56_fw_reg cs35l56_fw_reg;
+extern const struct cs35l56_fw_reg cs35l63_fw_reg;
extern const struct cirrus_amp_cal_controls cs35l56_calibration_controls;
@@ -332,6 +360,7 @@ void cs35l56_init_cs_dsp(struct cs35l56_base *cs35l56_base, struct cs_dsp *cs_ds
int cs35l56_get_calibration(struct cs35l56_base *cs35l56_base);
int cs35l56_read_prot_status(struct cs35l56_base *cs35l56_base,
bool *fw_missing, unsigned int *fw_version);
+void cs35l56_log_tuning(struct cs35l56_base *cs35l56_base, struct cs_dsp *cs_dsp);
int cs35l56_hw_init(struct cs35l56_base *cs35l56_base);
int cs35l56_get_speaker_id(struct cs35l56_base *cs35l56_base);
int cs35l56_get_bclk_freq_id(unsigned int freq);
diff --git a/include/sound/cs42l52.h b/include/sound/cs42l52.h
deleted file mode 100644
index c20649666abe..000000000000
--- a/include/sound/cs42l52.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/sound/cs42l52.h -- Platform data for CS42L52
- *
- * Copyright (c) 2012 Cirrus Logic Inc.
- */
-
-#ifndef __CS42L52_H
-#define __CS42L52_H
-
-struct cs42l52_platform_data {
-
- /* MICBIAS Level. Check datasheet Pg48 */
- unsigned int micbias_lvl;
-
- /* MICA mode selection Differential or Single-ended */
- bool mica_diff_cfg;
-
- /* MICB mode selection Differential or Single-ended */
- bool micb_diff_cfg;
-
- /* Charge Pump Freq. Check datasheet Pg73 */
- unsigned int chgfreq;
-
- /* Reset GPIO */
- unsigned int reset_gpio;
-};
-
-#endif /* __CS42L52_H */
diff --git a/include/sound/cs42l56.h b/include/sound/cs42l56.h
deleted file mode 100644
index 62e9f7a3b414..000000000000
--- a/include/sound/cs42l56.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/sound/cs42l56.h -- Platform data for CS42L56
- *
- * Copyright (c) 2014 Cirrus Logic Inc.
- */
-
-#ifndef __CS42L56_H
-#define __CS42L56_H
-
-struct cs42l56_platform_data {
-
- /* GPIO for Reset */
- unsigned int gpio_nreset;
-
- /* MICBIAS Level. Check datasheet Pg48 */
- unsigned int micbias_lvl;
-
- /* Analog Input 1A Reference 0=Single 1=Pseudo-Differential */
- unsigned int ain1a_ref_cfg;
-
- /* Analog Input 2A Reference 0=Single 1=Pseudo-Differential */
- unsigned int ain2a_ref_cfg;
-
- /* Analog Input 1B Reference 0=Single 1=Pseudo-Differential */
- unsigned int ain1b_ref_cfg;
-
- /* Analog Input 2B Reference 0=Single 1=Pseudo-Differential */
- unsigned int ain2b_ref_cfg;
-
- /* Charge Pump Freq. Check datasheet Pg62 */
- unsigned int chgfreq;
-
- /* HighPass Filter Right Channel Corner Frequency */
- unsigned int hpfb_freq;
-
- /* HighPass Filter Left Channel Corner Frequency */
- unsigned int hpfa_freq;
-
- /* Adaptive Power Control for LO/HP */
- unsigned int adaptive_pwr;
-
-};
-
-#endif /* __CS42L56_H */
diff --git a/include/sound/cs42l73.h b/include/sound/cs42l73.h
deleted file mode 100644
index 5a93393b6124..000000000000
--- a/include/sound/cs42l73.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/sound/cs42l73.h -- Platform data for CS42L73
- *
- * Copyright (c) 2012 Cirrus Logic Inc.
- */
-
-#ifndef __CS42L73_H
-#define __CS42L73_H
-
-struct cs42l73_platform_data {
- /* RST GPIO */
- unsigned int reset_gpio;
- unsigned int chgfreq;
- int jack_detection;
- unsigned int mclk_freq;
-};
-
-#endif /* __CS42L73_H */
diff --git a/include/sound/cs48l32.h b/include/sound/cs48l32.h
new file mode 100644
index 000000000000..27b3e7cf999a
--- /dev/null
+++ b/include/sound/cs48l32.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Register definitions for Cirrus Logic CS48L32
+ *
+ * Copyright (C) 2017-2018, 2020, 2022, 2025 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef CS48L32_H
+#define CS48L32_H
+
+/* pll_id for snd_soc_component_set_pll() */
+#define CS48L32_FLL1_REFCLK 1
+
+/* source for snd_soc_component_set_pll() */
+#define CS48L32_FLL_SRC_NONE -1
+#define CS48L32_FLL_SRC_MCLK1 0
+#define CS48L32_FLL_SRC_PDMCLK 5
+#define CS48L32_FLL_SRC_ASP1_BCLK 8
+#define CS48L32_FLL_SRC_ASP2_BCLK 9
+#define CS48L32_FLL_SRC_ASP1_FSYNC 12
+#define CS48L32_FLL_SRC_ASP2_FSYNC 13
+
+/* clk_id for snd_soc_component_set_sysclk() and snd_soc_dai_set_sysclk() */
+#define CS48L32_CLK_SYSCLK_1 1
+#define CS48L32_CLK_SYSCLK_2 2
+#define CS48L32_CLK_SYSCLK_3 3
+#define CS48L32_CLK_SYSCLK_4 4
+#define CS48L32_CLK_DSPCLK 7
+#define CS48L32_CLK_PDM_FLLCLK 13
+
+/* source for snd_soc_component_set_sysclk() */
+#define CS48L32_CLK_SRC_MCLK1 0x0
+#define CS48L32_CLK_SRC_FLL1 0x4
+#define CS48L32_CLK_SRC_ASP1_BCLK 0x8
+#define CS48L32_CLK_SRC_ASP2_BCLK 0x9
+
+struct cs48l32 {
+ struct regmap *regmap;
+ struct device *dev;
+ struct gpio_desc *reset_gpio;
+ struct clk *mclk1;
+ struct regulator_bulk_data core_supplies[2];
+ struct regulator *vdd_d;
+ int irq;
+};
+#endif
diff --git a/include/sound/cs48l32_registers.h b/include/sound/cs48l32_registers.h
new file mode 100644
index 000000000000..f29410fdf76f
--- /dev/null
+++ b/include/sound/cs48l32_registers.h
@@ -0,0 +1,530 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Register definitions for Cirrus Logic CS48L32
+ *
+ * Copyright (C) 2017-2018, 2020, 2022, 2025 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef CS48L32_REGISTERS_H
+#define CS48L32_REGISTERS_H
+
+/* Register Addresses. */
+#define CS48L32_DEVID 0x0
+#define CS48L32_REVID 0x4
+#define CS48L32_OTPID 0x10
+#define CS48L32_SFT_RESET 0x20
+#define CS48L32_CTRL_IF_DEBUG3 0xA8
+#define CS48L32_MCU_CTRL1 0x804
+#define CS48L32_GPIO1_CTRL1 0xc08
+#define CS48L32_GPIO3_CTRL1 0xc10
+#define CS48L32_GPIO7_CTRL1 0xc20
+#define CS48L32_GPIO16_CTRL1 0xc44
+#define CS48L32_OUTPUT_SYS_CLK 0x1020
+#define CS48L32_AUXPDM_CTRL 0x1044
+#define CS48L32_AUXPDM_CTRL2 0x105c
+#define CS48L32_CLOCK32K 0x1400
+#define CS48L32_SYSTEM_CLOCK1 0x1404
+#define CS48L32_SYSTEM_CLOCK2 0x1408
+#define CS48L32_SAMPLE_RATE1 0x1420
+#define CS48L32_SAMPLE_RATE2 0x1424
+#define CS48L32_SAMPLE_RATE3 0x1428
+#define CS48L32_SAMPLE_RATE4 0x142c
+#define CS48L32_DSP_CLOCK1 0x1510
+#define CS48L32_FLL1_CONTROL1 0x1c00
+#define CS48L32_FLL1_CONTROL5 0x1c10
+#define CS48L32_FLL1_CONTROL6 0x1c14
+#define CS48L32_FLL1_GPIO_CLOCK 0x1ca0
+#define CS48L32_CHARGE_PUMP1 0x2000
+#define CS48L32_LDO2_CTRL1 0x2408
+#define CS48L32_MICBIAS_CTRL1 0x2410
+#define CS48L32_MICBIAS_CTRL5 0x2418
+#define CS48L32_IRQ1_CTRL_AOD 0x2710
+#define CS48L32_AOD_PAD_CTRL 0x2718
+#define CS48L32_INPUT_CONTROL 0x4000
+#define CS48L32_INPUT_STATUS 0x4004
+#define CS48L32_INPUT_RATE_CONTROL 0x4008
+#define CS48L32_INPUT_CONTROL2 0x400c
+#define CS48L32_INPUT_CONTROL3 0x4014
+#define CS48L32_INPUT1_CONTROL1 0x4020
+#define CS48L32_IN1L_CONTROL1 0x4024
+#define CS48L32_IN1L_CONTROL2 0x4028
+#define CS48L32_IN1R_CONTROL1 0x4044
+#define CS48L32_IN1R_CONTROL2 0x4048
+#define CS48L32_INPUT2_CONTROL1 0x4060
+#define CS48L32_IN2L_CONTROL1 0x4064
+#define CS48L32_IN2L_CONTROL2 0x4068
+#define CS48L32_IN2R_CONTROL1 0x4084
+#define CS48L32_IN2R_CONTROL2 0x4088
+#define CS48L32_INPUT_HPF_CONTROL 0x4244
+#define CS48L32_INPUT_VOL_CONTROL 0x4248
+#define CS48L32_AUXPDM_CONTROL1 0x4300
+#define CS48L32_AUXPDM_CONTROL2 0x4304
+#define CS48L32_AUXPDM1_CONTROL1 0x4308
+#define CS48L32_AUXPDM2_CONTROL1 0x4310
+#define CS48L32_ADC1L_ANA_CONTROL1 0x4688
+#define CS48L32_ADC1R_ANA_CONTROL1 0x468c
+#define CS48L32_ASP1_ENABLES1 0x6000
+#define CS48L32_ASP1_CONTROL3 0x600C
+#define CS48L32_ASP1_DATA_CONTROL5 0x6040
+#define CS48L32_ASP2_ENABLES1 0x6080
+#define CS48L32_ASP2_CONTROL3 0x608C
+#define CS48L32_ASP2_DATA_CONTROL5 0x60c0
+#define CS48L32_ASP1TX1_INPUT1 0x8200
+#define CS48L32_ASP1TX2_INPUT1 0x8210
+#define CS48L32_ASP1TX3_INPUT1 0x8220
+#define CS48L32_ASP1TX4_INPUT1 0x8230
+#define CS48L32_ASP1TX5_INPUT1 0x8240
+#define CS48L32_ASP1TX6_INPUT1 0x8250
+#define CS48L32_ASP1TX7_INPUT1 0x8260
+#define CS48L32_ASP1TX8_INPUT1 0x8270
+#define CS48L32_ASP1TX8_INPUT4 0x827c
+#define CS48L32_ASP2TX1_INPUT1 0x8300
+#define CS48L32_ASP2TX2_INPUT1 0x8310
+#define CS48L32_ASP2TX3_INPUT1 0x8320
+#define CS48L32_ASP2TX4_INPUT1 0x8330
+#define CS48L32_ASP2TX4_INPUT4 0x833c
+#define CS48L32_ISRC1INT1_INPUT1 0x8980
+#define CS48L32_ISRC1INT2_INPUT1 0x8990
+#define CS48L32_ISRC1INT3_INPUT1 0x89a0
+#define CS48L32_ISRC1INT4_INPUT1 0x89b0
+#define CS48L32_ISRC1DEC1_INPUT1 0x89c0
+#define CS48L32_ISRC1DEC2_INPUT1 0x89d0
+#define CS48L32_ISRC1DEC3_INPUT1 0x89e0
+#define CS48L32_ISRC1DEC4_INPUT1 0x89f0
+#define CS48L32_ISRC2INT1_INPUT1 0x8a00
+#define CS48L32_ISRC2INT2_INPUT1 0x8a10
+#define CS48L32_ISRC2DEC1_INPUT1 0x8a40
+#define CS48L32_ISRC2DEC2_INPUT1 0x8a50
+#define CS48L32_ISRC3INT1_INPUT1 0x8a80
+#define CS48L32_ISRC3INT2_INPUT1 0x8a90
+#define CS48L32_ISRC3DEC1_INPUT1 0x8ac0
+#define CS48L32_ISRC3DEC2_INPUT1 0x8ad0
+#define CS48L32_EQ1_INPUT1 0x8b80
+#define CS48L32_EQ2_INPUT1 0x8b90
+#define CS48L32_EQ3_INPUT1 0x8ba0
+#define CS48L32_EQ4_INPUT1 0x8bb0
+#define CS48L32_EQ4_INPUT4 0x8bbc
+#define CS48L32_DRC1L_INPUT1 0x8c00
+#define CS48L32_DRC1R_INPUT1 0x8c10
+#define CS48L32_DRC1R_INPUT4 0x8c1c
+#define CS48L32_DRC2L_INPUT1 0x8c20
+#define CS48L32_DRC2R_INPUT1 0x8c30
+#define CS48L32_DRC2R_INPUT4 0x8c3c
+#define CS48L32_LHPF1_INPUT1 0x8c80
+#define CS48L32_LHPF1_INPUT4 0x8c8c
+#define CS48L32_LHPF2_INPUT1 0x8c90
+#define CS48L32_LHPF2_INPUT4 0x8c9c
+#define CS48L32_LHPF3_INPUT1 0x8ca0
+#define CS48L32_LHPF3_INPUT4 0x8cac
+#define CS48L32_LHPF4_INPUT1 0x8cb0
+#define CS48L32_LHPF4_INPUT4 0x8cbc
+#define CS48L32_DSP1RX1_INPUT1 0x9000
+#define CS48L32_DSP1RX2_INPUT1 0x9010
+#define CS48L32_DSP1RX3_INPUT1 0x9020
+#define CS48L32_DSP1RX4_INPUT1 0x9030
+#define CS48L32_DSP1RX5_INPUT1 0x9040
+#define CS48L32_DSP1RX6_INPUT1 0x9050
+#define CS48L32_DSP1RX7_INPUT1 0x9060
+#define CS48L32_DSP1RX8_INPUT1 0x9070
+#define CS48L32_DSP1RX8_INPUT4 0x907c
+#define CS48L32_ISRC1_CONTROL1 0xa400
+#define CS48L32_ISRC1_CONTROL2 0xa404
+#define CS48L32_ISRC2_CONTROL1 0xa510
+#define CS48L32_ISRC2_CONTROL2 0xa514
+#define CS48L32_ISRC3_CONTROL1 0xa620
+#define CS48L32_ISRC3_CONTROL2 0xa624
+#define CS48L32_FX_SAMPLE_RATE 0xa800
+#define CS48L32_EQ_CONTROL1 0xa808
+#define CS48L32_EQ_CONTROL2 0xa80c
+#define CS48L32_EQ1_GAIN1 0xa810
+#define CS48L32_EQ1_GAIN2 0xa814
+#define CS48L32_EQ1_BAND1_COEFF1 0xa818
+#define CS48L32_EQ1_BAND1_COEFF2 0xa81c
+#define CS48L32_EQ1_BAND1_PG 0xa820
+#define CS48L32_EQ1_BAND2_COEFF1 0xa824
+#define CS48L32_EQ1_BAND2_COEFF2 0xa828
+#define CS48L32_EQ1_BAND2_PG 0xa82c
+#define CS48L32_EQ1_BAND3_COEFF1 0xa830
+#define CS48L32_EQ1_BAND3_COEFF2 0xa834
+#define CS48L32_EQ1_BAND3_PG 0xa838
+#define CS48L32_EQ1_BAND4_COEFF1 0xa83c
+#define CS48L32_EQ1_BAND4_COEFF2 0xa840
+#define CS48L32_EQ1_BAND4_PG 0xa844
+#define CS48L32_EQ1_BAND5_COEFF1 0xa848
+#define CS48L32_EQ1_BAND5_PG 0xa850
+#define CS48L32_EQ2_GAIN1 0xa854
+#define CS48L32_EQ2_GAIN2 0xa858
+#define CS48L32_EQ2_BAND1_COEFF1 0xa85c
+#define CS48L32_EQ2_BAND1_COEFF2 0xa860
+#define CS48L32_EQ2_BAND1_PG 0xa864
+#define CS48L32_EQ2_BAND2_COEFF1 0xa868
+#define CS48L32_EQ2_BAND2_COEFF2 0xa86c
+#define CS48L32_EQ2_BAND2_PG 0xa870
+#define CS48L32_EQ2_BAND3_COEFF1 0xa874
+#define CS48L32_EQ2_BAND3_COEFF2 0xa878
+#define CS48L32_EQ2_BAND3_PG 0xa87c
+#define CS48L32_EQ2_BAND4_COEFF1 0xa880
+#define CS48L32_EQ2_BAND4_COEFF2 0xa884
+#define CS48L32_EQ2_BAND4_PG 0xa888
+#define CS48L32_EQ2_BAND5_COEFF1 0xa88c
+#define CS48L32_EQ2_BAND5_PG 0xa894
+#define CS48L32_EQ3_GAIN1 0xa898
+#define CS48L32_EQ3_GAIN2 0xa89c
+#define CS48L32_EQ3_BAND1_COEFF1 0xa8a0
+#define CS48L32_EQ3_BAND1_COEFF2 0xa8a4
+#define CS48L32_EQ3_BAND1_PG 0xa8a8
+#define CS48L32_EQ3_BAND2_COEFF1 0xa8ac
+#define CS48L32_EQ3_BAND2_COEFF2 0xa8b0
+#define CS48L32_EQ3_BAND2_PG 0xa8b4
+#define CS48L32_EQ3_BAND3_COEFF1 0xa8b8
+#define CS48L32_EQ3_BAND3_COEFF2 0xa8bc
+#define CS48L32_EQ3_BAND3_PG 0xa8c0
+#define CS48L32_EQ3_BAND4_COEFF1 0xa8c4
+#define CS48L32_EQ3_BAND4_COEFF2 0xa8c8
+#define CS48L32_EQ3_BAND4_PG 0xa8cc
+#define CS48L32_EQ3_BAND5_COEFF1 0xa8d0
+#define CS48L32_EQ3_BAND5_PG 0xa8d8
+#define CS48L32_EQ4_GAIN1 0xa8dc
+#define CS48L32_EQ4_GAIN2 0xa8e0
+#define CS48L32_EQ4_BAND1_COEFF1 0xa8e4
+#define CS48L32_EQ4_BAND1_COEFF2 0xa8e8
+#define CS48L32_EQ4_BAND1_PG 0xa8ec
+#define CS48L32_EQ4_BAND2_COEFF1 0xa8f0
+#define CS48L32_EQ4_BAND2_COEFF2 0xa8f4
+#define CS48L32_EQ4_BAND2_PG 0xa8f8
+#define CS48L32_EQ4_BAND3_COEFF1 0xa8fc
+#define CS48L32_EQ4_BAND3_COEFF2 0xa900
+#define CS48L32_EQ4_BAND3_PG 0xa904
+#define CS48L32_EQ4_BAND4_COEFF1 0xa908
+#define CS48L32_EQ4_BAND4_COEFF2 0xa90c
+#define CS48L32_EQ4_BAND4_PG 0xa910
+#define CS48L32_EQ4_BAND5_COEFF1 0xa914
+#define CS48L32_EQ4_BAND5_PG 0xa91c
+#define CS48L32_LHPF_CONTROL1 0xaa30
+#define CS48L32_LHPF_CONTROL2 0xaa34
+#define CS48L32_LHPF1_COEFF 0xaa38
+#define CS48L32_LHPF2_COEFF 0xaa3c
+#define CS48L32_LHPF3_COEFF 0xaa40
+#define CS48L32_LHPF4_COEFF 0xaa44
+#define CS48L32_DRC1_CONTROL1 0xab00
+#define CS48L32_DRC1_CONTROL4 0xab0c
+#define CS48L32_DRC2_CONTROL1 0xab14
+#define CS48L32_DRC2_CONTROL4 0xab20
+#define CS48L32_TONE_GENERATOR1 0xb000
+#define CS48L32_TONE_GENERATOR2 0xb004
+#define CS48L32_COMFORT_NOISE_GENERATOR 0xb400
+#define CS48L32_US_CONTROL 0xb800
+#define CS48L32_US1_CONTROL 0xb804
+#define CS48L32_US1_DET_CONTROL 0xb808
+#define CS48L32_US2_CONTROL 0xb814
+#define CS48L32_US2_DET_CONTROL 0xb818
+#define CS48L32_DSP1_XM_SRAM_IBUS_SETUP_0 0x1700c
+#define CS48L32_DSP1_XM_SRAM_IBUS_SETUP_1 0x17010
+#define CS48L32_DSP1_XM_SRAM_IBUS_SETUP_24 0x1706c
+#define CS48L32_DSP1_YM_SRAM_IBUS_SETUP_0 0x17070
+#define CS48L32_DSP1_YM_SRAM_IBUS_SETUP_1 0x17074
+#define CS48L32_DSP1_YM_SRAM_IBUS_SETUP_8 0x17090
+#define CS48L32_DSP1_PM_SRAM_IBUS_SETUP_0 0x17094
+#define CS48L32_DSP1_PM_SRAM_IBUS_SETUP_1 0x17098
+#define CS48L32_DSP1_PM_SRAM_IBUS_SETUP_7 0x170b0
+#define CS48L32_IRQ1_STATUS 0x18004
+#define CS48L32_IRQ1_EINT_1 0x18010
+#define CS48L32_IRQ1_EINT_2 0x18014
+#define CS48L32_IRQ1_EINT_7 0x18028
+#define CS48L32_IRQ1_EINT_9 0x18030
+#define CS48L32_IRQ1_EINT_11 0x18038
+#define CS48L32_IRQ1_STS_1 0x18090
+#define CS48L32_IRQ1_STS_6 0x180a4
+#define CS48L32_IRQ1_STS_11 0x180b8
+#define CS48L32_IRQ1_MASK_1 0x18110
+#define CS48L32_IRQ1_MASK_2 0x18114
+#define CS48L32_IRQ1_MASK_7 0x18128
+#define CS48L32_IRQ1_MASK_9 0x18130
+#define CS48L32_IRQ1_MASK_11 0x18138
+#define CS48L32_DSP1_XMEM_PACKED_0 0x2000000
+#define CS48L32_DSP1_XMEM_PACKED_LAST 0x208fff0
+#define CS48L32_DSP1_SYS_INFO_ID 0x25e0000
+#define CS48L32_DSP1_AHBM_WINDOW_DEBUG_1 0x25e2044
+#define CS48L32_DSP1_XMEM_UNPACKED24_0 0x2800000
+#define CS48L32_DSP1_XMEM_UNPACKED24_LAST 0x28bfff4
+#define CS48L32_DSP1_CLOCK_FREQ 0x2b80000
+#define CS48L32_DSP1_SAMPLE_RATE_TX8 0x2b802b8
+#define CS48L32_DSP1_SCRATCH1 0x2b805c0
+#define CS48L32_DSP1_SCRATCH4 0x2b805d8
+#define CS48L32_DSP1_CCM_CORE_CONTROL 0x2bc1000
+#define CS48L32_DSP1_STREAM_ARB_RESYNC_MSK1 0x2bc5a00
+#define CS48L32_DSP1_YMEM_PACKED_0 0x2c00000
+#define CS48L32_DSP1_YMEM_PACKED_LAST 0x2c2fff0
+#define CS48L32_DSP1_YMEM_UNPACKED24_0 0x3400000
+#define CS48L32_DSP1_YMEM_UNPACKED24_LAST 0x343fff4
+#define CS48L32_DSP1_PMEM_0 0x3800000
+#define CS48L32_DSP1_PMEM_LAST 0x3845fe8
+
+/* (0x0) DEVID */
+#define CS48L32_DEVID_MASK 0x00ffffff
+#define CS48L32_DEVID_SHIFT 0
+
+/* (0x4) REVID */
+#define CS48L32_AREVID_MASK 0x000000f0
+#define CS48L32_AREVID_SHIFT 4
+#define CS48L32_MTLREVID_MASK 0x0000000f
+#define CS48L32_MTLREVID_SHIFT 0
+
+/* (0x10) OTPID */
+#define CS48L32_OTPID_MASK 0x0000000f
+
+/* (0x0804) MCU_CTRL1 */
+#define CS48L32_MCU_STS_MASK 0x0000ff00
+#define CS48L32_MCU_STS_SHIFT 8
+
+/* (0xc08) GPIO1_CTRL1 */
+#define CS48L32_GPIOX_CTRL1_FN_MASK 0x000003ff
+
+/* (0x1020) OUTPUT_SYS_CLK */
+#define CS48L32_OPCLK_EN_SHIFT 15
+#define CS48L32_OPCLK_DIV_MASK 0x000000f8
+#define CS48L32_OPCLK_DIV_SHIFT 3
+#define CS48L32_OPCLK_SEL_MASK 0x00000007
+
+/* (0x105c) AUXPDM_CTRL2 */
+#define CS48L32_AUXPDMDAT2_SRC_SHIFT 4
+#define CS48L32_AUXPDMDAT1_SRC_SHIFT 0
+
+/* (0x1400) CLOCK32K */
+#define CS48L32_CLK_32K_EN_MASK 0x00000040
+#define CS48L32_CLK_32K_SRC_MASK 0x00000003
+
+/* (0x1404) SYSTEM_CLOCK1 */
+#define CS48L32_SYSCLK_FRAC_MASK 0x00008000
+#define CS48L32_SYSCLK_FREQ_MASK 0x00000700
+#define CS48L32_SYSCLK_FREQ_SHIFT 8
+#define CS48L32_SYSCLK_EN_SHIFT 6
+#define CS48L32_SYSCLK_SRC_MASK 0x0000001f
+#define CS48L32_SYSCLK_SRC_SHIFT 0
+
+/* (0x1408) SYSTEM_CLOCK2 */
+#define CS48L32_SYSCLK_FREQ_STS_MASK 0x00000700
+#define CS48L32_SYSCLK_FREQ_STS_SHIFT 8
+
+/* (0x1420) SAMPLE_RATE1 */
+#define CS48L32_SAMPLE_RATE_1_MASK 0x0000001f
+#define CS48L32_SAMPLE_RATE_1_SHIFT 0
+
+/* (0x1510) DSP_CLOCK1 */
+#define CS48L32_DSP_CLK_FREQ_MASK 0xffff0000
+#define CS48L32_DSP_CLK_FREQ_SHIFT 16
+
+/* (0x1c00) FLL_CONTROL1 */
+#define CS48L32_FLL_CTRL_UPD_MASK 0x00000004
+#define CS48L32_FLL_HOLD_MASK 0x00000002
+#define CS48L32_FLL_EN_MASK 0x00000001
+
+/* (0x1c04) FLL_CONTROL2 */
+#define CS48L32_FLL_LOCKDET_THR_MASK 0xf0000000
+#define CS48L32_FLL_LOCKDET_THR_SHIFT 28
+#define CS48L32_FLL_LOCKDET_MASK 0x08000000
+#define CS48L32_FLL_PHASEDET_MASK 0x00400000
+#define CS48L32_FLL_PHASEDET_SHIFT 22
+#define CS48L32_FLL_REFCLK_DIV_MASK 0x00030000
+#define CS48L32_FLL_REFCLK_DIV_SHIFT 16
+#define CS48L32_FLL_REFCLK_SRC_MASK 0x0000f000
+#define CS48L32_FLL_REFCLK_SRC_SHIFT 12
+#define CS48L32_FLL_N_MASK 0x000003ff
+#define CS48L32_FLL_N_SHIFT 0
+
+/* (0x1c08) FLL_CONTROL3 */
+#define CS48L32_FLL_LAMBDA_MASK 0xffff0000
+#define CS48L32_FLL_LAMBDA_SHIFT 16
+#define CS48L32_FLL_THETA_MASK 0x0000ffff
+#define CS48L32_FLL_THETA_SHIFT 0
+
+/* (0x1c0c) FLL_CONTROL4 */
+#define CS48L32_FLL_FD_GAIN_COARSE_SHIFT 16
+#define CS48L32_FLL_HP_MASK 0x00003000
+#define CS48L32_FLL_HP_SHIFT 12
+#define CS48L32_FLL_FB_DIV_MASK 0x000003ff
+#define CS48L32_FLL_FB_DIV_SHIFT 0
+
+/* (0x1c10) FLL_CONTROL5 */
+#define CS48L32_FLL_FRC_INTEG_UPD_MASK 0x00008000
+
+/* (0x2000) CHARGE_PUMP1 */
+#define CS48L32_CP2_BYPASS_SHIFT 1
+#define CS48L32_CP2_EN_SHIFT 0
+
+/* (0x2408) LDO2_CTRL1 */
+#define CS48L32_LDO2_VSEL_MASK 0x000007e0
+#define CS48L32_LDO2_VSEL_SHIFT 5
+
+/* (0x2410) MICBIAS_CTRL1 */
+#define CS48L32_MICB1_LVL_MASK 0x000001e0
+#define CS48L32_MICB1_LVL_SHIFT 5
+#define CS48L32_MICB1_EN_SHIFT 0
+
+/* (0x2418) MICBIAS_CTRL5 */
+#define CS48L32_MICB1C_EN_SHIFT 8
+#define CS48L32_MICB1B_EN_SHIFT 4
+#define CS48L32_MICB1A_EN_SHIFT 0
+
+/* (0x2710) IRQ1_CTRL_AOD */
+#define CS48L32_IRQ_POL_MASK 0x00000400
+
+/* (0x4000) INPUT_CONTROL */
+#define CS48L32_IN2L_EN_SHIFT 3
+#define CS48L32_IN2R_EN_SHIFT 2
+#define CS48L32_IN1L_EN_SHIFT 1
+#define CS48L32_IN1R_EN_SHIFT 0
+
+/* (0x400c) INPUT_CONTROL2 */
+#define CS48L32_PDM_FLLCLK_SRC_MASK 0x0000000f
+#define CS48L32_PDM_FLLCLK_SRC_SHIFT 0
+
+/* (0x4014) INPUT_CONTROL3 */
+#define CS48L32_IN_VU 0x20000000
+#define CS48L32_IN_VU_MASK 0x20000000
+#define CS48L32_IN_VU_SHIFT 29
+#define CS48L32_IN_VU_WIDTH 1
+
+/* (0x4020) INPUT1_CONTROL1 */
+#define CS48L32_IN1_OSR_SHIFT 16
+#define CS48L32_IN1_PDM_SUP_MASK 0x00000300
+#define CS48L32_IN1_PDM_SUP_SHIFT 8
+#define CS48L32_IN1_MODE_SHIFT 0
+
+/*
+ * (0x4024) IN1L_CONTROL1
+ * (0x4044) IN1R_CONTROL1
+ */
+#define CS48L32_INx_SRC_MASK 0x30000000
+#define CS48L32_INx_SRC_SHIFT 28
+#define CS48L32_INx_RATE_MASK 0x0000f800
+#define CS48L32_INx_RATE_SHIFT 11
+#define CS48L32_INx_HPF_SHIFT 2
+#define CS48L32_INx_LP_MODE_SHIFT 0
+
+/*
+ * (0x4028) IN1L_CONTROL2
+ * (0x4048) IN1R_CONTROL2
+ */
+#define CS48L32_INx_MUTE_MASK 0x10000000
+#define CS48L32_INx_VOL_SHIFT 16
+#define CS48L32_INx_PGA_VOL_SHIFT 1
+
+/* (0x4244) INPUT_HPF_CONTROL */
+#define CS48L32_IN_HPF_CUT_SHIFT 0
+
+/* (0x4248) INPUT_VOL_CONTROL */
+#define CS48L32_IN_VD_RAMP_SHIFT 4
+#define CS48L32_IN_VI_RAMP_SHIFT 0
+
+/* (0x4308) AUXPDM1_CONTROL1 */
+#define CS48L32_AUXPDM1_FREQ_SHIFT 16
+#define CS48L32_AUXPDM1_SRC_MASK 0x00000f00
+#define CS48L32_AUXPDM1_SRC_SHIFT 8
+
+/* (0x4688) ADC1L_ANA_CONTROL1 */
+/* (0x468c) ADC1R_ANA_CONTROL1 */
+#define CS48L32_ADC1x_INT_ENA_FRC_MASK 0x00000002
+
+/* (0x6004) ASPn_CONTROL1 */
+#define CS48L32_ASP_RATE_MASK 0x00001f00
+#define CS48L32_ASP_RATE_SHIFT 8
+#define CS48L32_ASP_BCLK_FREQ_MASK 0x0000003f
+
+/* (0x6008) ASPn_CONTROL2 */
+#define CS48L32_ASP_RX_WIDTH_MASK 0xff000000
+#define CS48L32_ASP_RX_WIDTH_SHIFT 24
+#define CS48L32_ASP_TX_WIDTH_MASK 0x00ff0000
+#define CS48L32_ASP_TX_WIDTH_SHIFT 16
+#define CS48L32_ASP_FMT_MASK 0x00000700
+#define CS48L32_ASP_FMT_SHIFT 8
+#define CS48L32_ASP_BCLK_INV_MASK 0x00000040
+#define CS48L32_ASP_BCLK_MSTR_MASK 0x00000010
+#define CS48L32_ASP_FSYNC_INV_MASK 0x00000004
+#define CS48L32_ASP_FSYNC_MSTR_MASK 0x00000001
+
+/* (0x6010) ASPn_CONTROL3 */
+#define CS48L32_ASP_DOUT_HIZ_MASK 0x00000003
+
+/* (0x6030) ASPn_DATA_CONTROL1 */
+#define CS48L32_ASP_TX_WL_MASK 0x0000003f
+
+/* (0x6040) ASPn_DATA_CONTROL5 */
+#define CS48L32_ASP_RX_WL_MASK 0x0000003f
+
+/* (0x82xx - 0x90xx) *_INPUT[1-4] */
+#define CS48L32_MIXER_VOL_MASK 0x00FE0000
+#define CS48L32_MIXER_VOL_SHIFT 17
+#define CS48L32_MIXER_VOL_WIDTH 7
+#define CS48L32_MIXER_SRC_MASK 0x000001ff
+#define CS48L32_MIXER_SRC_SHIFT 0
+#define CS48L32_MIXER_SRC_WIDTH 9
+
+/* (0xa400) ISRC1_CONTROL1 */
+#define CS48L32_ISRC1_FSL_MASK 0xf8000000
+#define CS48L32_ISRC1_FSL_SHIFT 27
+#define CS48L32_ISRC1_FSH_MASK 0x0000f800
+#define CS48L32_ISRC1_FSH_SHIFT 11
+
+/* (0xa404) ISRC1_CONTROL2 */
+#define CS48L32_ISRC1_INT4_EN_SHIFT 11
+#define CS48L32_ISRC1_INT3_EN_SHIFT 10
+#define CS48L32_ISRC1_INT2_EN_SHIFT 9
+#define CS48L32_ISRC1_INT1_EN_SHIFT 8
+#define CS48L32_ISRC1_DEC4_EN_SHIFT 3
+#define CS48L32_ISRC1_DEC3_EN_SHIFT 2
+#define CS48L32_ISRC1_DEC2_EN_SHIFT 1
+#define CS48L32_ISRC1_DEC1_EN_SHIFT 0
+
+/* (0xa800) FX_SAMPLE_RATE */
+#define CS48L32_FX_RATE_MASK 0x0000f800
+#define CS48L32_FX_RATE_SHIFT 11
+
+/* (0xab00) DRC1_CONTROL1 */
+#define CS48L32_DRC1L_EN_SHIFT 1
+#define CS48L32_DRC1R_EN_SHIFT 0
+
+/* (0xb400) Comfort_Noise_Generator */
+#define CS48L32_NOISE_GEN_RATE_MASK 0x0000f800
+#define CS48L32_NOISE_GEN_RATE_SHIFT 11
+#define CS48L32_NOISE_GEN_EN_SHIFT 5
+#define CS48L32_NOISE_GEN_GAIN_SHIFT 0
+
+/* (0xb800) US_CONTROL */
+#define CS48L32_US1_DET_EN_SHIFT 8
+
+/* (0xb804) US1_CONTROL */
+#define CS48L32_US1_RATE_MASK 0xf8000000
+#define CS48L32_US1_RATE_SHIFT 27
+#define CS48L32_US1_GAIN_SHIFT 12
+#define CS48L32_US1_SRC_MASK 0x00000f00
+#define CS48L32_US1_SRC_SHIFT 8
+#define CS48L32_US1_FREQ_MASK 0x00000070
+#define CS48L32_US1_FREQ_SHIFT 4
+
+/* (0xb808) US1_DET_CONTROL */
+#define CS48L32_US1_DET_DCY_SHIFT 28
+#define CS48L32_US1_DET_HOLD_SHIFT 24
+#define CS48L32_US1_DET_NUM_SHIFT 20
+#define CS48L32_US1_DET_THR_SHIFT 16
+#define CS48L32_US1_DET_LPF_CUT_SHIFT 5
+#define CS48L32_US1_DET_LPF_SHIFT 4
+
+/* (0x18004) IRQ1_STATUS */
+#define CS48L32_IRQ1_STS_MASK 0x00000001
+
+/* (0x18014) IRQ1_EINT_2 */
+#define CS48L32_BOOT_DONE_EINT1_MASK 0x00000008
+
+/* (0x18028) IRQ1_EINT_7 */
+#define CS48L32_DSP1_MPU_ERR_EINT1_MASK 0x00200000
+#define CS48L32_DSP1_WDT_EXPIRE_EINT1_MASK 0x00100000
+
+/* (0x18030) IRQ1_EINT_9 */
+#define CS48L32_DSP1_IRQ0_EINT1_MASK 0x00000001
+
+/* (0x180a4) IRQ1_STS_6 */
+#define CS48L32_FLL1_LOCK_STS1_MASK 0x00000001
+
+#endif
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index f6baa9a01868..1ef13bcdc43f 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -38,8 +38,6 @@ int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream);
int snd_dmaengine_pcm_sync_stop(struct snd_pcm_substream *substream);
-int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
- dma_filter_fn filter_fn, void *filter_data);
int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream);
struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
diff --git a/include/sound/gus.h b/include/sound/gus.h
index cd8da68cab92..1c8fb6c93e50 100644
--- a/include/sound/gus.h
+++ b/include/sound/gus.h
@@ -513,22 +513,6 @@ struct _SND_IW_LFO_PROGRAM {
unsigned short depth;
};
-#if 0
-extern irqreturn_t snd_gf1_lfo_effect_interrupt(struct snd_gus_card * gus, snd_gf1_voice_t * voice);
-#endif
-extern void snd_gf1_lfo_init(struct snd_gus_card * gus);
-extern void snd_gf1_lfo_done(struct snd_gus_card * gus);
-extern void snd_gf1_lfo_program(struct snd_gus_card * gus, int voice, int lfo_type, struct _SND_IW_LFO_PROGRAM *program);
-extern void snd_gf1_lfo_enable(struct snd_gus_card * gus, int voice, int lfo_type);
-extern void snd_gf1_lfo_disable(struct snd_gus_card * gus, int voice, int lfo_type);
-extern void snd_gf1_lfo_change_freq(struct snd_gus_card * gus, int voice, int lfo_type, int freq);
-extern void snd_gf1_lfo_change_depth(struct snd_gus_card * gus, int voice, int lfo_type, int depth);
-extern void snd_gf1_lfo_setup(struct snd_gus_card * gus, int voice, int lfo_type, int freq, int current_depth, int depth, int sweep, int shape);
-extern void snd_gf1_lfo_shutdown(struct snd_gus_card * gus, int voice, int lfo_type);
-#if 0
-extern void snd_gf1_lfo_command(struct snd_gus_card * gus, int voice, unsigned char *command);
-#endif
-
/* gus_mem.c */
void snd_gf1_mem_lock(struct snd_gf1_mem * alloc, int xup);
@@ -578,14 +562,8 @@ int snd_gf1_new_mixer(struct snd_gus_card * gus);
int snd_gf1_pcm_new(struct snd_gus_card *gus, int pcm_dev, int control_index);
-#ifdef CONFIG_SND_DEBUG
-extern void snd_gf1_print_voice_registers(struct snd_gus_card * gus);
-#endif
-
/* gus.c */
-int snd_gus_use_inc(struct snd_gus_card * gus);
-void snd_gus_use_dec(struct snd_gus_card * gus);
int snd_gus_create(struct snd_card *card,
unsigned long port,
int irq, int dma1, int dma2,
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index b098ceadbe74..25668eee65cf 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -223,7 +223,7 @@ struct hdac_driver {
struct device_driver driver;
int type;
const struct hda_device_id *id_table;
- int (*match)(struct hdac_device *dev, struct hdac_driver *drv);
+ int (*match)(struct hdac_device *dev, const struct hdac_driver *drv);
void (*unsol_event)(struct hdac_device *dev, unsigned int event);
/* fields used by ext bus APIs */
@@ -235,7 +235,7 @@ struct hdac_driver {
#define drv_to_hdac_driver(_drv) container_of(_drv, struct hdac_driver, driver)
const struct hda_device_id *
-hdac_get_device_id(struct hdac_device *hdev, struct hdac_driver *drv);
+hdac_get_device_id(struct hdac_device *hdev, const struct hdac_driver *drv);
/*
* Bus verb operators
@@ -598,8 +598,6 @@ void snd_hdac_stream_spbcap_enable(struct hdac_bus *chip,
bool enable, int index);
int snd_hdac_stream_set_spib(struct hdac_bus *bus,
struct hdac_stream *azx_dev, u32 value);
-int snd_hdac_stream_get_spbmaxfifo(struct hdac_bus *bus,
- struct hdac_stream *azx_dev);
void snd_hdac_stream_drsm_enable(struct hdac_bus *bus,
bool enable, int index);
int snd_hdac_stream_wait_drsm(struct hdac_stream *azx_dev);
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index 4c7a40e149a5..7de390022ac2 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -22,6 +22,7 @@ void snd_hdac_ext_bus_ppcap_enable(struct hdac_bus *chip, bool enable);
void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_bus *chip, bool enable);
int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_bus *bus);
+struct hdac_ext_link *snd_hdac_ext_bus_get_hlink_by_id(struct hdac_bus *bus, u32 id);
struct hdac_ext_link *snd_hdac_ext_bus_get_hlink_by_addr(struct hdac_bus *bus, int addr);
struct hdac_ext_link *snd_hdac_ext_bus_get_hlink_by_name(struct hdac_bus *bus,
const char *codec_name);
@@ -97,12 +98,17 @@ struct hdac_ext_link {
void __iomem *ml_addr; /* link output stream reg pointer */
u32 lcaps; /* link capablities */
u16 lsdiid; /* link sdi identifier */
+ u32 id;
+ u8 slcount;
int ref_count;
struct list_head list;
};
+#define hdac_ext_link_alt(link) ((link)->lcaps & AZX_ML_HDA_LCAP_ALT)
+#define hdac_ext_link_ofls(link) ((link)->lcaps & AZX_ML_HDA_LCAP_OFLS)
+
int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *hlink);
int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *hlink);
int snd_hdac_ext_bus_link_power_up_all(struct hdac_bus *bus);
diff --git a/include/sound/jack.h b/include/sound/jack.h
index 1ed90e2109e9..36dc104c1145 100644
--- a/include/sound/jack.h
+++ b/include/sound/jack.h
@@ -79,7 +79,6 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
struct snd_jack **jack, bool initial_kctl, bool phantom_jack);
int snd_jack_add_new_kctl(struct snd_jack *jack, const char * name, int mask);
#ifdef CONFIG_SND_JACK_INPUT_DEV
-void snd_jack_set_parent(struct snd_jack *jack, struct device *parent);
int snd_jack_set_key(struct snd_jack *jack, enum snd_jack_types type,
int keytype);
#endif
@@ -104,11 +103,6 @@ static inline void snd_jack_report(struct snd_jack *jack, int status)
#endif
#if !defined(CONFIG_SND_JACK) || !defined(CONFIG_SND_JACK_INPUT_DEV)
-static inline void snd_jack_set_parent(struct snd_jack *jack,
- struct device *parent)
-{
-}
-
static inline int snd_jack_set_key(struct snd_jack *jack,
enum snd_jack_types type,
int keytype)
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 8582d22f3818..58fd6e84f961 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -1251,8 +1251,6 @@ unsigned int snd_pcm_rate_to_rate_bit(unsigned int rate);
unsigned int snd_pcm_rate_bit_to_rate(unsigned int rate_bit);
unsigned int snd_pcm_rate_mask_intersect(unsigned int rates_a,
unsigned int rates_b);
-unsigned int snd_pcm_rate_range_to_bits(unsigned int rate_min,
- unsigned int rate_max);
/**
* snd_pcm_set_runtime_buffer - Set the PCM runtime buffer
diff --git a/include/sound/sdca_asoc.h b/include/sound/sdca_asoc.h
new file mode 100644
index 000000000000..9121531f0826
--- /dev/null
+++ b/include/sound/sdca_asoc.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The MIPI SDCA specification is available for public downloads at
+ * https://www.mipi.org/mipi-sdca-v1-0-download
+ *
+ * Copyright (C) 2025 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef __SDCA_ASOC_H__
+#define __SDCA_ASOC_H__
+
+struct device;
+struct sdca_function_data;
+struct snd_kcontrol_new;
+struct snd_soc_component_driver;
+struct snd_soc_dai_driver;
+struct snd_soc_dai_ops;
+struct snd_soc_dapm_route;
+struct snd_soc_dapm_widget;
+
+int sdca_asoc_count_component(struct device *dev, struct sdca_function_data *function,
+ int *num_widgets, int *num_routes, int *num_controls,
+ int *num_dais);
+
+int sdca_asoc_populate_dapm(struct device *dev, struct sdca_function_data *function,
+ struct snd_soc_dapm_widget *widgets,
+ struct snd_soc_dapm_route *routes);
+int sdca_asoc_populate_controls(struct device *dev,
+ struct sdca_function_data *function,
+ struct snd_kcontrol_new *kctl);
+int sdca_asoc_populate_dais(struct device *dev, struct sdca_function_data *function,
+ struct snd_soc_dai_driver *dais,
+ const struct snd_soc_dai_ops *ops);
+
+int sdca_asoc_populate_component(struct device *dev,
+ struct sdca_function_data *function,
+ struct snd_soc_component_driver *component_drv,
+ struct snd_soc_dai_driver **dai_drv, int *num_dai_drv,
+ const struct snd_soc_dai_ops *ops);
+
+#endif // __SDCA_ASOC_H__
diff --git a/include/sound/sdca_function.h b/include/sound/sdca_function.h
index 253654568a41..eaedb54a8322 100644
--- a/include/sound/sdca_function.h
+++ b/include/sound/sdca_function.h
@@ -125,7 +125,7 @@ struct sdca_init_write {
* macros.
*
* Short hand to specific a Control type statically for example:
- * SDAC_CTL_TYPE_S(IT, MIC_BIAS).
+ * SDCA_CTL_TYPE_S(IT, MIC_BIAS).
*/
#define SDCA_CTL_TYPE_S(ent, sel) SDCA_CTL_TYPE(SDCA_ENTITY_TYPE_##ent, \
SDCA_CTL_##ent##_##sel)
@@ -169,6 +169,20 @@ enum sdca_ot_controls {
};
/**
+ * enum sdca_usage_range - Column definitions for Usage
+ */
+enum sdca_usage_range {
+ SDCA_USAGE_NUMBER = 0,
+ SDCA_USAGE_CBN = 1,
+ SDCA_USAGE_SAMPLE_RATE = 2,
+ SDCA_USAGE_SAMPLE_WIDTH = 3,
+ SDCA_USAGE_FULL_SCALE = 4,
+ SDCA_USAGE_NOISE_FLOOR = 5,
+ SDCA_USAGE_TAG = 6,
+ SDCA_USAGE_NCOLS = 7,
+};
+
+/**
* enum sdca_mu_controls - SDCA Controls for Mixer Unit
*
* Control Selectors for Mixer Unit from SDCA specification v1.0
@@ -207,6 +221,16 @@ enum sdca_fu_controls {
};
/**
+ * enum sdca_volume_range - Column definitions for Q7.8dB volumes/gains
+ */
+enum sdca_volume_range {
+ SDCA_VOLUME_LINEAR_MIN = 0,
+ SDCA_VOLUME_LINEAR_MAX = 1,
+ SDCA_VOLUME_LINEAR_STEP = 2,
+ SDCA_VOLUME_LINEAR_NCOLS = 3,
+};
+
+/**
* enum sdca_xu_controls - SDCA Controls for Extension Unit
*
* Control Selectors for Extension Unit from SDCA specification v1.0
@@ -237,6 +261,15 @@ enum sdca_cs_controls {
};
/**
+ * enum sdca_samplerateindex_range - Column definitions for SampleRateIndex
+ */
+enum sdca_samplerateindex_range {
+ SDCA_SAMPLERATEINDEX_INDEX = 0,
+ SDCA_SAMPLERATEINDEX_RATE = 1,
+ SDCA_SAMPLERATEINDEX_NCOLS = 2,
+};
+
+/**
* enum sdca_cx_controls - SDCA Controls for Clock Selector
*
* Control Selectors for Clock Selector from SDCA specification v1.0
@@ -258,6 +291,14 @@ enum sdca_pde_controls {
};
/**
+ * enum sdca_requested_ps_range - Column definitions for Requested PS
+ */
+enum sdca_requested_ps_range {
+ SDCA_REQUESTED_PS_STATE = 0,
+ SDCA_REQUESTED_PS_NCOLS = 1,
+};
+
+/**
* enum sdca_ge_controls - SDCA Controls for Group Unit
*
* Control Selectors for Group Unit from SDCA specification v1.0
@@ -269,6 +310,15 @@ enum sdca_ge_controls {
};
/**
+ * enum sdca_selected_mode_range - Column definitions for Selected Mode
+ */
+enum sdca_selected_mode_range {
+ SDCA_SELECTED_MODE_INDEX = 0,
+ SDCA_SELECTED_MODE_TERM_TYPE = 1,
+ SDCA_SELECTED_MODE_NCOLS = 2,
+};
+
+/**
* enum sdca_spe_controls - SDCA Controls for Security & Privacy Unit
*
* Control Selectors for Security & Privacy Unit from SDCA
@@ -773,6 +823,25 @@ enum sdca_terminal_type {
SDCA_TERM_TYPE_PRIVACY_INDICATORS = 0x747,
};
+#define SDCA_TERM_TYPE_LINEIN_STEREO_NAME "LineIn Stereo"
+#define SDCA_TERM_TYPE_LINEIN_FRONT_LR_NAME "LineIn Front-LR"
+#define SDCA_TERM_TYPE_LINEIN_CENTER_LFE_NAME "LineIn Center-LFE"
+#define SDCA_TERM_TYPE_LINEIN_SURROUND_LR_NAME "LineIn Surround-LR"
+#define SDCA_TERM_TYPE_LINEIN_REAR_LR_NAME "LineIn Rear-LR"
+#define SDCA_TERM_TYPE_LINEOUT_STEREO_NAME "LineOut Stereo"
+#define SDCA_TERM_TYPE_LINEOUT_FRONT_LR_NAME "LineOut Front-LR"
+#define SDCA_TERM_TYPE_LINEOUT_CENTER_LFE_NAME "LineOut Center-LFE"
+#define SDCA_TERM_TYPE_LINEOUT_SURROUND_LR_NAME "LineOut Surround-LR"
+#define SDCA_TERM_TYPE_LINEOUT_REAR_LR_NAME "LineOut Rear-LR"
+#define SDCA_TERM_TYPE_MIC_JACK_NAME "Microphone"
+#define SDCA_TERM_TYPE_STEREO_JACK_NAME "Speaker Stereo"
+#define SDCA_TERM_TYPE_FRONT_LR_JACK_NAME "Speaker Front-LR"
+#define SDCA_TERM_TYPE_CENTER_LFE_JACK_NAME "Speaker Center-LFE"
+#define SDCA_TERM_TYPE_SURROUND_LR_JACK_NAME "Speaker Surround-LR"
+#define SDCA_TERM_TYPE_REAR_LR_JACK_NAME "Speaker Rear-LR"
+#define SDCA_TERM_TYPE_HEADPHONE_JACK_NAME "Headphone"
+#define SDCA_TERM_TYPE_HEADSET_JACK_NAME "Headset"
+
/**
* enum sdca_connector_type - SDCA Connector Types
*
diff --git a/include/sound/snd_wavefront.h b/include/sound/snd_wavefront.h
index 27f7e8a477c2..30f508a56766 100644
--- a/include/sound/snd_wavefront.h
+++ b/include/sound/snd_wavefront.h
@@ -110,12 +110,8 @@ struct _snd_wavefront_card {
};
extern void snd_wavefront_internal_interrupt (snd_wavefront_card_t *card);
-extern int snd_wavefront_detect_irq (snd_wavefront_t *dev) ;
-extern int snd_wavefront_check_irq (snd_wavefront_t *dev, int irq);
-extern int snd_wavefront_restart (snd_wavefront_t *dev);
extern int snd_wavefront_start (snd_wavefront_t *dev);
extern int snd_wavefront_detect (snd_wavefront_card_t *card);
-extern int snd_wavefront_config_midi (snd_wavefront_t *dev) ;
extern int snd_wavefront_cmd (snd_wavefront_t *, int, unsigned char *,
unsigned char *);
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index 72e371a21767..b8af309c2683 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -10,6 +10,7 @@
#include <linux/acpi.h>
#include <linux/mod_devicetable.h>
#include <linux/soundwire/sdw.h>
+#include <sound/soc.h>
struct snd_soc_acpi_package_context {
char *name; /* package name */
@@ -193,6 +194,15 @@ struct snd_soc_acpi_link_adr {
* is not constant since this field may be updated at run-time
* @sof_tplg_filename: Sound Open Firmware topology file name, if enabled
* @tplg_quirk_mask: quirks to select different topology files dynamically
+ * @get_function_tplg_files: This is an optional callback, if specified then instead of
+ * the single sof_tplg_filename the callback will return the list of function topology
+ * files to be loaded.
+ * Return value: The number of the files or negative ERRNO. 0 means that the single topology
+ * file should be used, no function topology split can be used on the machine.
+ * @card: the pointer of the card
+ * @mach: the pointer of the machine driver
+ * @prefix: the prefix of the topology file name. Typically, it is the path.
+ * @tplg_files: the pointer of the array of the topology file names.
*/
/* Descriptor for SST ASoC machine driver */
struct snd_soc_acpi_mach {
@@ -212,6 +222,9 @@ struct snd_soc_acpi_mach {
struct snd_soc_acpi_mach_params mach_params;
const char *sof_tplg_filename;
const u32 tplg_quirk_mask;
+ int (*get_function_tplg_files)(struct snd_soc_card *card,
+ const struct snd_soc_acpi_mach *mach,
+ const char *prefix, const char ***tplg_files);
};
#define SND_SOC_ACPI_MAX_CODECS 3
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index af802ef536e7..400584474bc8 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -445,6 +445,10 @@ int snd_soc_dapm_get_pin_switch(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uncontrol);
int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uncontrol);
+int snd_soc_dapm_get_component_pin_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uncontrol);
+int snd_soc_dapm_put_component_pin_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uncontrol);
int snd_soc_dapm_new_controls(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_widget *widget, unsigned int num);
struct snd_soc_dapm_widget *snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 952ed77b8c87..1fffef311c41 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -394,27 +394,20 @@ struct platform_device;
#define SOC_ENUM_SINGLE_VIRT_DECL(name, xtexts) \
const struct soc_enum name = SOC_ENUM_SINGLE_VIRT(ARRAY_SIZE(xtexts), xtexts)
-struct snd_jack;
struct snd_soc_card;
-struct snd_soc_pcm_stream;
-struct snd_soc_ops;
struct snd_soc_pcm_runtime;
struct snd_soc_dai;
struct snd_soc_dai_driver;
struct snd_soc_dai_link;
struct snd_soc_component;
struct snd_soc_component_driver;
-struct soc_enum;
struct snd_soc_jack;
-struct snd_soc_jack_zone;
struct snd_soc_jack_pin;
#include <sound/soc-dapm.h>
#include <sound/soc-dpcm.h>
#include <sound/soc-topology.h>
-struct snd_soc_jack_gpio;
-
enum snd_soc_pcm_subclass {
SND_SOC_PCM_CLASS_PCM = 0,
SND_SOC_PCM_CLASS_BE = 1,
@@ -423,6 +416,7 @@ enum snd_soc_pcm_subclass {
int snd_soc_register_card(struct snd_soc_card *card);
void snd_soc_unregister_card(struct snd_soc_card *card);
int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card);
+int devm_snd_soc_register_deferrable_card(struct device *dev, struct snd_soc_card *card);
#ifdef CONFIG_PM_SLEEP
int snd_soc_suspend(struct device *dev);
int snd_soc_resume(struct device *dev);
@@ -450,7 +444,7 @@ int snd_soc_register_component(struct device *dev,
int devm_snd_soc_register_component(struct device *dev,
const struct snd_soc_component_driver *component_driver,
struct snd_soc_dai_driver *dai_drv, int num_dai);
-void snd_soc_unregister_component(struct device *dev);
+#define snd_soc_unregister_component(dev) snd_soc_unregister_component_by_driver(dev, NULL)
void snd_soc_unregister_component_by_driver(struct device *dev,
const struct snd_soc_component_driver *component_driver);
struct snd_soc_component *snd_soc_lookup_component_nolocked(struct device *dev,
@@ -468,8 +462,6 @@ static inline int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd)
}
#endif
-void snd_soc_disconnect_sync(struct device *dev);
-
struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_link);
@@ -935,7 +927,7 @@ snd_soc_link_to_platform(struct snd_soc_dai_link *link, int n) {
extern struct snd_soc_dai_link_component null_dailink_component[0];
extern struct snd_soc_dai_link_component snd_soc_dummy_dlc;
-
+int snd_soc_dlc_is_dummy(struct snd_soc_dai_link_component *dlc);
struct snd_soc_codec_conf {
/*
@@ -1087,6 +1079,7 @@ struct snd_soc_card {
unsigned int fully_routed:1;
unsigned int probed:1;
unsigned int component_chaining:1;
+ struct device *devres_dev;
void *drvdata;
};
diff --git a/include/sound/soc_sdw_utils.h b/include/sound/soc_sdw_utils.h
index d8bd5d37131a..b63021f5afaf 100644
--- a/include/sound/soc_sdw_utils.h
+++ b/include/sound/soc_sdw_utils.h
@@ -159,9 +159,8 @@ void asoc_sdw_init_dai_link(struct device *dev, struct snd_soc_dai_link *dai_lin
int asoc_sdw_init_simple_dai_link(struct device *dev, struct snd_soc_dai_link *dai_links,
int *be_id, char *name, int playback, int capture,
const char *cpu_dai_name, const char *platform_comp_name,
- int num_platforms, const char *codec_name,
- const char *codec_dai_name, int no_pcm,
- int (*init)(struct snd_soc_pcm_runtime *rtd),
+ const char *codec_name, const char *codec_dai_name,
+ int no_pcm, int (*init)(struct snd_soc_pcm_runtime *rtd),
const struct snd_soc_ops *ops);
int asoc_sdw_count_sdw_endpoints(struct snd_soc_card *card, int *num_devs, int *num_ends);
diff --git a/include/sound/sof.h b/include/sound/sof.h
index 64fd5504cb2b..eddea82c7b5a 100644
--- a/include/sound/sof.h
+++ b/include/sound/sof.h
@@ -106,6 +106,7 @@ struct snd_sof_pdata {
const char *fw_filename;
const char *tplg_filename_prefix;
const char *tplg_filename;
+ bool disable_function_topology;
/* loadable external libraries available under this directory */
const char *fw_lib_prefix;
diff --git a/include/sound/tas2781-comlib-i2c.h b/include/sound/tas2781-comlib-i2c.h
new file mode 100644
index 000000000000..a1afa5c444ba
--- /dev/null
+++ b/include/sound/tas2781-comlib-i2c.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+//
+// ALSA SoC Texas Instruments TAS2563/TAS2781 Audio Smart Amplifier
+//
+// Copyright (C) 2025 Texas Instruments Incorporated
+// https://www.ti.com
+//
+// The TAS2563/TAS2781 driver implements a flexible and configurable
+// algo coefficient setting for one, two, or even multiple
+// TAS2563/TAS2781 chips.
+//
+// Author: Shenghao Ding <shenghao-ding@ti.com>
+//
+
+#ifndef __TAS2781_COMLIB_I2C_H__
+#define __TAS2781_COMLIB_I2C_H__
+
+void tasdevice_reset(struct tasdevice_priv *tas_dev);
+int tascodec_init(struct tasdevice_priv *tas_priv, void *codec,
+ struct module *module,
+ void (*cont)(const struct firmware *fw, void *context));
+struct tasdevice_priv *tasdevice_kzalloc(struct i2c_client *i2c);
+int tasdevice_init(struct tasdevice_priv *tas_priv);
+int tasdev_chn_switch(struct tasdevice_priv *tas_priv,
+ unsigned short chn);
+int tasdevice_dev_update_bits(
+ struct tasdevice_priv *tasdevice, unsigned short chn,
+ unsigned int reg, unsigned int mask, unsigned int value);
+int tasdevice_amp_putvol(struct tasdevice_priv *tas_priv,
+ struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
+int tasdevice_amp_getvol(struct tasdevice_priv *tas_priv,
+ struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
+int tasdevice_digital_getvol(struct tasdevice_priv *tas_priv,
+ struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
+int tasdevice_digital_putvol(struct tasdevice_priv *tas_priv,
+ struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
+#endif /* __TAS2781_COMLIB_I2C_H__ */
diff --git a/include/sound/tas2781.h b/include/sound/tas2781.h
index eff011444cc8..40cd3bd079b5 100644
--- a/include/sound/tas2781.h
+++ b/include/sound/tas2781.h
@@ -32,6 +32,8 @@
SNDRV_PCM_FMTBIT_S24_LE | \
SNDRV_PCM_FMTBIT_S32_LE)
+#define TASDEVICE_CRC8_POLYNOMIAL 0x4d
+
/* PAGE Control Register (available in page0 of each book) */
#define TASDEVICE_PAGE_SELECT 0x00
#define TASDEVICE_BOOKCTL_PAGE 0x00
@@ -47,7 +49,7 @@
#define TASDEVICE_REG_SWRESET TASDEVICE_REG(0x0, 0x0, 0x01)
#define TASDEVICE_REG_SWRESET_RESET BIT(0)
-/* I2C Checksum */
+/* Checksum */
#define TASDEVICE_CHECKSUM_REG TASDEVICE_REG(0x0, 0x0, 0x7e)
/* XM_340 */
@@ -103,11 +105,6 @@
#define TAS2781_RUNTIME_RE_REG_TF TASDEVICE_REG(0x64, 0x62, 0x48)
#define TAS2781_RUNTIME_RE_REG TASDEVICE_REG(0x64, 0x63, 0x44)
-#define TASDEVICE_CMD_SING_W 0x1
-#define TASDEVICE_CMD_BURST 0x2
-#define TASDEVICE_CMD_DELAY 0x3
-#define TASDEVICE_CMD_FIELD_W 0x4
-
enum audio_device {
TAS2563,
TAS2781,
@@ -119,11 +116,6 @@ enum dspbin_type {
TASDEV_BETA,
};
-enum device_catlog_id {
- LENOVO = 0,
- OTHERS
-};
-
struct bulk_reg_val {
int reg;
unsigned char val[4];
@@ -159,10 +151,33 @@ struct calidata {
unsigned int cali_dat_sz_per_dev;
};
+/*
+ * To enable CONFIG_SND_SOC_TAS2781_ACOUST_I2C will create a bridge to the
+ * acoustic tuning tool which can tune the chips' acoustic effect. Due to the
+ * whole directly exposing the registers, there exist some potential risks. So
+ * this define is invisible in Kconfig, anyone who wants to use acoustic tool
+ * have to edit the source manually.
+ */
+#ifdef CONFIG_SND_SOC_TAS2781_ACOUST_I2C
+#define TASDEV_DATA_PAYLOAD_SIZE 128
+struct acoustic_data {
+ unsigned char len;
+ unsigned char id;
+ unsigned char addr;
+ unsigned char book;
+ unsigned char page;
+ unsigned char reg;
+ unsigned char data[TASDEV_DATA_PAYLOAD_SIZE];
+};
+#endif
+
struct tasdevice_priv {
struct tasdevice tasdevice[TASDEVICE_MAX_CHANNELS];
struct tasdevice_rca rcabin;
struct calidata cali_data;
+#ifdef CONFIG_SND_SOC_TAS2781_ACOUST_I2C
+ struct acoustic_data acou_data;
+#endif
struct tasdevice_fw *fmw;
struct gpio_desc *speaker_id;
struct gpio_desc *reset;
@@ -170,7 +185,6 @@ struct tasdevice_priv {
struct regmap *regmap;
struct device *dev;
- enum device_catlog_id catlog_id;
unsigned char cal_binaryname[TASDEVICE_MAX_CHANNELS][64];
unsigned char crc8_lkp_tbl[CRC8_TABLE_SIZE];
unsigned char coef_binaryname[64];
@@ -193,6 +207,7 @@ struct tasdevice_priv {
bool force_fwload_status;
bool playback_started;
bool isacpi;
+ bool isspi;
bool is_user_space_calidata;
unsigned int global_addr;
@@ -210,41 +225,27 @@ struct tasdevice_priv {
int (*tasdevice_load_block)(struct tasdevice_priv *tas_priv,
struct tasdev_blk *block);
- int (*save_calibration)(struct tasdevice_priv *tas_priv);
- void (*apply_calibration)(struct tasdevice_priv *tas_priv);
+ int (*change_chn_book)(struct tasdevice_priv *tas_priv,
+ unsigned short chn, int book);
+ int (*update_bits)(struct tasdevice_priv *tas_priv,
+ unsigned short chn, unsigned int reg, unsigned int mask,
+ unsigned int value);
+ int (*dev_read)(struct tasdevice_priv *tas_priv,
+ unsigned short chn, unsigned int reg, unsigned int *value);
+ int (*dev_bulk_read)(struct tasdevice_priv *tas_priv,
+ unsigned short chn, unsigned int reg, unsigned char *p_data,
+ unsigned int n_length);
};
-void tasdevice_reset(struct tasdevice_priv *tas_dev);
-int tascodec_init(struct tasdevice_priv *tas_priv, void *codec,
- struct module *module,
- void (*cont)(const struct firmware *fw, void *context));
-struct tasdevice_priv *tasdevice_kzalloc(struct i2c_client *i2c);
-int tasdevice_init(struct tasdevice_priv *tas_priv);
-void tasdevice_remove(struct tasdevice_priv *tas_priv);
-int tasdevice_save_calibration(struct tasdevice_priv *tas_priv);
-void tasdevice_apply_calibration(struct tasdevice_priv *tas_priv);
-int tasdev_chn_switch(struct tasdevice_priv *tas_priv,
- unsigned short chn);
int tasdevice_dev_read(struct tasdevice_priv *tas_priv,
unsigned short chn, unsigned int reg, unsigned int *value);
+int tasdevice_dev_bulk_read(struct tasdevice_priv *tas_priv,
+ unsigned short chn, unsigned int reg, unsigned char *p_data,
+ unsigned int n_length);
int tasdevice_dev_write(struct tasdevice_priv *tas_priv,
unsigned short chn, unsigned int reg, unsigned int value);
int tasdevice_dev_bulk_write(
struct tasdevice_priv *tas_priv, unsigned short chn,
unsigned int reg, unsigned char *p_data, unsigned int n_length);
-int tasdevice_dev_bulk_read(struct tasdevice_priv *tas_priv,
- unsigned short chn, unsigned int reg, unsigned char *p_data,
- unsigned int n_length);
-int tasdevice_dev_update_bits(
- struct tasdevice_priv *tasdevice, unsigned short chn,
- unsigned int reg, unsigned int mask, unsigned int value);
-int tasdevice_amp_putvol(struct tasdevice_priv *tas_priv,
- struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
-int tasdevice_amp_getvol(struct tasdevice_priv *tas_priv,
- struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
-int tasdevice_digital_putvol(struct tasdevice_priv *tas_priv,
- struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
-int tasdevice_digital_getvol(struct tasdevice_priv *tas_priv,
- struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
-
+void tasdevice_remove(struct tasdevice_priv *tas_priv);
#endif /* __TAS2781_H__ */
diff --git a/include/sound/tpa6130a2-plat.h b/include/sound/tpa6130a2-plat.h
deleted file mode 100644
index a60930e36e93..000000000000
--- a/include/sound/tpa6130a2-plat.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * TPA6130A2 driver platform header
- *
- * Copyright (C) Nokia Corporation
- *
- * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
- */
-
-#ifndef TPA6130A2_PLAT_H
-#define TPA6130A2_PLAT_H
-
-struct tpa6130a2_platform_data {
- int power_gpio;
-};
-
-#endif
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index bd0ea07338eb..14a924c0e303 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -11,7 +11,7 @@
#include <linux/tracepoint.h>
#include <uapi/linux/ioprio.h>
-#define RWBS_LEN 8
+#define RWBS_LEN 9
#define IOPRIO_CLASS_STRINGS \
{ IOPRIO_CLASS_NONE, "none" }, \
@@ -361,21 +361,6 @@ DECLARE_EVENT_CLASS(block_bio,
);
/**
- * block_bio_bounce - used bounce buffer when processing block operation
- * @bio: block operation
- *
- * A bounce buffer was used to handle the block operation @bio in @q.
- * This occurs when hardware limitations prevent a direct transfer of
- * data between the @bio data memory area and the IO device. Use of a
- * bounce buffer requires extra copying of data and decreases
- * performance.
- */
-DEFINE_EVENT(block_bio, block_bio_bounce,
- TP_PROTO(struct bio *bio),
- TP_ARGS(bio)
-);
-
-/**
* block_bio_backmerge - merging block operation to the end of an existing operation
* @bio: new block operation to merge
*
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 3efc00cc1bcd..bebc252db865 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -143,7 +143,6 @@ FLUSH_STATES
#define EXTENT_FLAGS \
{ EXTENT_DIRTY, "DIRTY"}, \
- { EXTENT_UPTODATE, "UPTODATE"}, \
{ EXTENT_LOCKED, "LOCKED"}, \
{ EXTENT_NEW, "NEW"}, \
{ EXTENT_DELALLOC, "DELALLOC"}, \
@@ -224,8 +223,7 @@ DECLARE_EVENT_CLASS(btrfs__inode,
__entry->generation = BTRFS_I(inode)->generation;
__entry->last_trans = BTRFS_I(inode)->last_trans;
__entry->logged_trans = BTRFS_I(inode)->logged_trans;
- __entry->root_objectid =
- BTRFS_I(inode)->root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(BTRFS_I(inode)->root);
),
TP_printk_btrfs("root=%llu(%s) gen=%llu ino=%llu blocks=%llu "
@@ -297,7 +295,7 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(root);
__entry->ino = btrfs_ino(inode);
__entry->start = map->start;
__entry->len = map->len;
@@ -376,7 +374,7 @@ DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular,
),
TP_fast_assign_btrfs(bi->root->fs_info,
- __entry->root_obj = bi->root->root_key.objectid;
+ __entry->root_obj = btrfs_root_id(bi->root);
__entry->ino = btrfs_ino(bi);
__entry->isize = bi->vfs_inode.i_size;
__entry->disk_isize = bi->disk_i_size;
@@ -427,7 +425,7 @@ DECLARE_EVENT_CLASS(
TP_fast_assign_btrfs(
bi->root->fs_info,
- __entry->root_obj = bi->root->root_key.objectid;
+ __entry->root_obj = btrfs_root_id(bi->root);
__entry->ino = btrfs_ino(bi);
__entry->isize = bi->vfs_inode.i_size;
__entry->disk_isize = bi->disk_i_size;
@@ -527,7 +525,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
__entry->flags = ordered->flags;
__entry->compress_type = ordered->compress_type;
__entry->refs = refcount_read(&ordered->refs);
- __entry->root_objectid = inode->root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(inode->root);
__entry->truncated_len = ordered->truncated_len;
),
@@ -664,7 +662,7 @@ TRACE_EVENT(btrfs_finish_ordered_extent,
__entry->start = start;
__entry->len = len;
__entry->uptodate = uptodate;
- __entry->root_objectid = inode->root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(inode->root);
),
TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu uptodate=%d",
@@ -705,8 +703,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
__entry->for_reclaim = wbc->for_reclaim;
__entry->range_cyclic = wbc->range_cyclic;
__entry->writeback_index = inode->i_mapping->writeback_index;
- __entry->root_objectid =
- BTRFS_I(inode)->root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(BTRFS_I(inode)->root);
),
TP_printk_btrfs("root=%llu(%s) ino=%llu page_index=%lu "
@@ -750,7 +747,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
__entry->start = start;
__entry->end = end;
__entry->uptodate = uptodate;
- __entry->root_objectid = inode->root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(inode->root);
),
TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu end=%llu uptodate=%d",
@@ -780,8 +777,7 @@ TRACE_EVENT(btrfs_sync_file,
__entry->ino = btrfs_ino(BTRFS_I(inode));
__entry->parent = btrfs_ino(BTRFS_I(d_inode(dentry->d_parent)));
__entry->datasync = datasync;
- __entry->root_objectid =
- BTRFS_I(inode)->root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(BTRFS_I(inode)->root);
),
TP_printk_btrfs("root=%llu(%s) ino=%llu parent=%llu datasync=%d",
@@ -1052,7 +1048,7 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
__entry->sub_stripes = map->sub_stripes;
__entry->offset = offset;
__entry->size = size;
- __entry->root_objectid = fs_info->chunk_root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(fs_info->chunk_root);
),
TP_printk_btrfs("root=%llu(%s) offset=%llu size=%llu "
@@ -1097,7 +1093,7 @@ TRACE_EVENT(btrfs_cow_block,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(root);
__entry->buf_start = buf->start;
__entry->refs = atomic_read(&buf->refs);
__entry->cow_start = cow->start;
@@ -1240,7 +1236,7 @@ DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_free,
TP_ARGS(fs_info, start, len)
);
-TRACE_EVENT(find_free_extent,
+TRACE_EVENT(btrfs_find_free_extent,
TP_PROTO(const struct btrfs_root *root,
const struct find_free_extent_ctl *ffe_ctl),
@@ -1255,7 +1251,7 @@ TRACE_EVENT(find_free_extent,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(root);
__entry->num_bytes = ffe_ctl->num_bytes;
__entry->empty_size = ffe_ctl->empty_size;
__entry->flags = ffe_ctl->flags;
@@ -1268,7 +1264,7 @@ TRACE_EVENT(find_free_extent,
BTRFS_GROUP_FLAGS))
);
-TRACE_EVENT(find_free_extent_search_loop,
+TRACE_EVENT(btrfs_find_free_extent_search_loop,
TP_PROTO(const struct btrfs_root *root,
const struct find_free_extent_ctl *ffe_ctl),
@@ -1284,7 +1280,7 @@ TRACE_EVENT(find_free_extent_search_loop,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(root);
__entry->num_bytes = ffe_ctl->num_bytes;
__entry->empty_size = ffe_ctl->empty_size;
__entry->flags = ffe_ctl->flags;
@@ -1298,7 +1294,7 @@ TRACE_EVENT(find_free_extent_search_loop,
__entry->loop)
);
-TRACE_EVENT(find_free_extent_have_block_group,
+TRACE_EVENT(btrfs_find_free_extent_have_block_group,
TP_PROTO(const struct btrfs_root *root,
const struct find_free_extent_ctl *ffe_ctl,
@@ -1318,7 +1314,7 @@ TRACE_EVENT(find_free_extent_have_block_group,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(root);
__entry->num_bytes = ffe_ctl->num_bytes;
__entry->empty_size = ffe_ctl->empty_size;
__entry->flags = ffe_ctl->flags;
@@ -1480,7 +1476,7 @@ TRACE_EVENT(btrfs_setup_cluster,
);
struct extent_state;
-TRACE_EVENT(alloc_extent_state,
+TRACE_EVENT(btrfs_alloc_extent_state,
TP_PROTO(const struct extent_state *state,
gfp_t mask, unsigned long IP),
@@ -1503,7 +1499,7 @@ TRACE_EVENT(alloc_extent_state,
show_gfp_flags(__entry->mask), __entry->ip)
);
-TRACE_EVENT(free_extent_state,
+TRACE_EVENT(btrfs_free_extent_state,
TP_PROTO(const struct extent_state *state, unsigned long IP),
@@ -1672,8 +1668,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data,
),
TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
- __entry->rootid =
- BTRFS_I(inode)->root->root_key.objectid;
+ __entry->rootid = btrfs_root_id(BTRFS_I(inode)->root);
__entry->ino = btrfs_ino(BTRFS_I(inode));
__entry->start = start;
__entry->len = len;
@@ -1744,7 +1739,7 @@ DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_trace_extent,
TP_ARGS(fs_info, rec, bytenr)
);
-TRACE_EVENT(qgroup_num_dirty_extents,
+TRACE_EVENT(btrfs_qgroup_num_dirty_extents,
TP_PROTO(const struct btrfs_fs_info *fs_info, u64 transid,
u64 num_dirty_extents),
@@ -1798,7 +1793,7 @@ TRACE_EVENT(btrfs_qgroup_account_extent,
__entry->nr_new_roots)
);
-TRACE_EVENT(qgroup_update_counters,
+TRACE_EVENT(btrfs_qgroup_update_counters,
TP_PROTO(const struct btrfs_fs_info *fs_info,
const struct btrfs_qgroup *qgroup,
@@ -1827,7 +1822,7 @@ TRACE_EVENT(qgroup_update_counters,
__entry->cur_old_count, __entry->cur_new_count)
);
-TRACE_EVENT(qgroup_update_reserve,
+TRACE_EVENT(btrfs_qgroup_update_reserve,
TP_PROTO(const struct btrfs_fs_info *fs_info, const struct btrfs_qgroup *qgroup,
s64 diff, int type),
@@ -1853,7 +1848,7 @@ TRACE_EVENT(qgroup_update_reserve,
__entry->cur_reserved, __entry->diff)
);
-TRACE_EVENT(qgroup_meta_reserve,
+TRACE_EVENT(btrfs_qgroup_meta_reserve,
TP_PROTO(const struct btrfs_root *root, s64 diff, int type),
@@ -1866,7 +1861,7 @@ TRACE_EVENT(qgroup_meta_reserve,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->refroot = root->root_key.objectid;
+ __entry->refroot = btrfs_root_id(root);
__entry->diff = diff;
__entry->type = type;
),
@@ -1876,7 +1871,7 @@ TRACE_EVENT(qgroup_meta_reserve,
__print_symbolic(__entry->type, QGROUP_RSV_TYPES), __entry->diff)
);
-TRACE_EVENT(qgroup_meta_convert,
+TRACE_EVENT(btrfs_qgroup_meta_convert,
TP_PROTO(const struct btrfs_root *root, s64 diff),
@@ -1888,7 +1883,7 @@ TRACE_EVENT(qgroup_meta_convert,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->refroot = root->root_key.objectid;
+ __entry->refroot = btrfs_root_id(root);
__entry->diff = diff;
),
@@ -1899,7 +1894,7 @@ TRACE_EVENT(qgroup_meta_convert,
__entry->diff)
);
-TRACE_EVENT(qgroup_meta_free_all_pertrans,
+TRACE_EVENT(btrfs_qgroup_meta_free_all_pertrans,
TP_PROTO(struct btrfs_root *root),
@@ -1912,7 +1907,7 @@ TRACE_EVENT(qgroup_meta_free_all_pertrans,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->refroot = root->root_key.objectid;
+ __entry->refroot = btrfs_root_id(root);
spin_lock(&root->qgroup_meta_rsv_lock);
__entry->diff = -(s64)root->qgroup_meta_rsv_pertrans;
spin_unlock(&root->qgroup_meta_rsv_lock);
@@ -1994,7 +1989,7 @@ TRACE_EVENT(btrfs_inode_mod_outstanding_extents,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(root);
__entry->ino = ino;
__entry->mod = mod;
__entry->outstanding = outstanding;
@@ -2074,12 +2069,12 @@ TRACE_EVENT(btrfs_set_extent_bit,
__field( unsigned, set_bits)
),
- TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree),
- const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree);
+ TP_fast_assign_btrfs(btrfs_extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = btrfs_extent_io_tree_to_inode(tree);
__entry->owner = tree->owner;
__entry->ino = inode ? btrfs_ino(inode) : 0;
- __entry->rootid = inode ? inode->root->root_key.objectid : 0;
+ __entry->rootid = inode ? btrfs_root_id(inode->root) : 0;
__entry->start = start;
__entry->len = len;
__entry->set_bits = set_bits;
@@ -2107,12 +2102,12 @@ TRACE_EVENT(btrfs_clear_extent_bit,
__field( unsigned, clear_bits)
),
- TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree),
- const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree);
+ TP_fast_assign_btrfs(btrfs_extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = btrfs_extent_io_tree_to_inode(tree);
__entry->owner = tree->owner;
__entry->ino = inode ? btrfs_ino(inode) : 0;
- __entry->rootid = inode ? inode->root->root_key.objectid : 0;
+ __entry->rootid = inode ? btrfs_root_id(inode->root) : 0;
__entry->start = start;
__entry->len = len;
__entry->clear_bits = clear_bits;
@@ -2141,12 +2136,12 @@ TRACE_EVENT(btrfs_convert_extent_bit,
__field( unsigned, clear_bits)
),
- TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree),
- const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree);
+ TP_fast_assign_btrfs(btrfs_extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = btrfs_extent_io_tree_to_inode(tree);
__entry->owner = tree->owner;
__entry->ino = inode ? btrfs_ino(inode) : 0;
- __entry->rootid = inode ? inode->root->root_key.objectid : 0;
+ __entry->rootid = inode ? btrfs_root_id(inode->root) : 0;
__entry->start = start;
__entry->len = len;
__entry->set_bits = set_bits;
@@ -2341,11 +2336,7 @@ DEFINE_EVENT(btrfs_locking_events, name, \
DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_unlock);
DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_unlock);
-DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_unlock_blocking);
-DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_read);
-DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_write);
DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_read_lock);
-DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_lock_atomic);
DECLARE_EVENT_CLASS(btrfs__space_info_update,
@@ -2621,7 +2612,7 @@ TRACE_EVENT(btrfs_extent_map_shrinker_remove_em,
TP_fast_assign_btrfs(inode->root->fs_info,
__entry->ino = btrfs_ino(inode);
- __entry->root_id = inode->root->root_key.objectid;
+ __entry->root_id = btrfs_root_id(inode->root);
__entry->start = em->start;
__entry->len = em->len;
__entry->flags = em->flags;
diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h
index af2755bda6eb..7d332387be6c 100644
--- a/include/trace/events/cgroup.h
+++ b/include/trace/events/cgroup.h
@@ -231,7 +231,11 @@ DECLARE_EVENT_CLASS(cgroup_rstat,
__entry->cpu, __entry->contended)
);
-/* Related to global: cgroup_rstat_lock */
+/*
+ * Related to locks:
+ * global rstat_base_lock for base stats
+ * cgroup_subsys::rstat_ss_lock for subsystem stats
+ */
DEFINE_EVENT(cgroup_rstat, cgroup_rstat_lock_contended,
TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
@@ -253,7 +257,11 @@ DEFINE_EVENT(cgroup_rstat, cgroup_rstat_unlock,
TP_ARGS(cgrp, cpu, contended)
);
-/* Related to per CPU: cgroup_rstat_cpu_lock */
+/*
+ * Related to per CPU locks:
+ * global rstat_base_cpu_lock for base stats
+ * cgroup_subsys::rstat_ss_cpu_lock for subsystem stats
+ */
DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_lock_contended,
TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
index c69c7b1e41d1..a5f4b9234f46 100644
--- a/include/trace/events/erofs.h
+++ b/include/trace/events/erofs.h
@@ -113,7 +113,7 @@ TRACE_EVENT(erofs_read_folio,
__entry->raw)
);
-TRACE_EVENT(erofs_readpages,
+TRACE_EVENT(erofs_readahead,
TP_PROTO(struct inode *inode, pgoff_t start, unsigned int nrpage,
bool raw),
diff --git a/include/trace/events/exceptions.h b/include/trace/events/exceptions.h
new file mode 100644
index 000000000000..a631f8de8917
--- /dev/null
+++ b/include/trace/events/exceptions.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM exceptions
+
+#if !defined(_TRACE_PAGE_FAULT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PAGE_FAULT_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(exceptions,
+
+ TP_PROTO(unsigned long address, struct pt_regs *regs,
+ unsigned long error_code),
+
+ TP_ARGS(address, regs, error_code),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, address )
+ __field( unsigned long, ip )
+ __field( unsigned long, error_code )
+ ),
+
+ TP_fast_assign(
+ __entry->address = address;
+ __entry->ip = instruction_pointer(regs);
+ __entry->error_code = error_code;
+ ),
+
+ TP_printk("address=%ps ip=%ps error_code=0x%lx",
+ (void *)__entry->address, (void *)__entry->ip,
+ __entry->error_code) );
+
+DEFINE_EVENT(exceptions, page_fault_user,
+ TP_PROTO(unsigned long address, struct pt_regs *regs, unsigned long error_code),
+ TP_ARGS(address, regs, error_code));
+DEFINE_EVENT(exceptions, page_fault_kernel,
+ TP_PROTO(unsigned long address, struct pt_regs *regs, unsigned long error_code),
+ TP_ARGS(address, regs, error_code));
+
+#endif /* _TRACE_PAGE_FAULT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
index fb81c533b310..178ab6f611be 100644
--- a/include/trace/events/io_uring.h
+++ b/include/trace/events/io_uring.h
@@ -645,7 +645,7 @@ TRACE_EVENT(io_uring_short_write,
/*
* io_uring_local_work_run - ran ring local task work
*
- * @tctx: pointer to a io_uring_ctx
+ * @ctx: pointer to an io_ring_ctx
* @count: how many functions it ran
* @loops: how many loops it ran
*
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 8994e97d86c1..3bec9fb73a36 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -326,11 +326,37 @@ DEFINE_EVENT(sched_process_template, sched_process_free,
TP_ARGS(p));
/*
- * Tracepoint for a task exiting:
+ * Tracepoint for a task exiting.
+ * Note, it's a superset of sched_process_template and should be kept
+ * compatible as much as possible. sched_process_exits has an extra
+ * `group_dead` argument, so sched_process_template can't be used,
+ * unfortunately, just like sched_migrate_task above.
*/
-DEFINE_EVENT(sched_process_template, sched_process_exit,
- TP_PROTO(struct task_struct *p),
- TP_ARGS(p));
+TRACE_EVENT(sched_process_exit,
+
+ TP_PROTO(struct task_struct *p, bool group_dead),
+
+ TP_ARGS(p, group_dead),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, prio )
+ __field( bool, group_dead )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
+ __entry->group_dead = group_dead;
+ ),
+
+ TP_printk("comm=%s pid=%d prio=%d group_dead=%s",
+ __entry->comm, __entry->pid, __entry->prio,
+ __entry->group_dead ? "true" : "false"
+ )
+);
/*
* Tracepoint for waiting on task to unschedule:
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 25d5c6e90a99..45c4fa13499c 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -54,6 +54,9 @@ extern "C" {
#define DRM_AMDGPU_VM 0x13
#define DRM_AMDGPU_FENCE_TO_HANDLE 0x14
#define DRM_AMDGPU_SCHED 0x15
+#define DRM_AMDGPU_USERQ 0x16
+#define DRM_AMDGPU_USERQ_SIGNAL 0x17
+#define DRM_AMDGPU_USERQ_WAIT 0x18
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -71,6 +74,9 @@ extern "C" {
#define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
#define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
+#define DRM_IOCTL_AMDGPU_USERQ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ, union drm_amdgpu_userq)
+#define DRM_IOCTL_AMDGPU_USERQ_SIGNAL DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_SIGNAL, struct drm_amdgpu_userq_signal)
+#define DRM_IOCTL_AMDGPU_USERQ_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_WAIT, struct drm_amdgpu_userq_wait)
/**
* DOC: memory domains
@@ -319,6 +325,260 @@ union drm_amdgpu_ctx {
union drm_amdgpu_ctx_out out;
};
+/* user queue IOCTL operations */
+#define AMDGPU_USERQ_OP_CREATE 1
+#define AMDGPU_USERQ_OP_FREE 2
+
+/* queue priority levels */
+/* low < normal low < normal high < high */
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK 0x3
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT 0
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW 0
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW 1
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH 2
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH 3 /* admin only */
+/* for queues that need access to protected content */
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE (1 << 2)
+
+/*
+ * This structure is a container to pass input configuration
+ * info for all supported userqueue related operations.
+ * For operation AMDGPU_USERQ_OP_CREATE: user is expected
+ * to set all fields, excep the parameter 'queue_id'.
+ * For operation AMDGPU_USERQ_OP_FREE: the only input parameter expected
+ * to be set is 'queue_id', eveything else is ignored.
+ */
+struct drm_amdgpu_userq_in {
+ /** AMDGPU_USERQ_OP_* */
+ __u32 op;
+ /** Queue id passed for operation USERQ_OP_FREE */
+ __u32 queue_id;
+ /** the target GPU engine to execute workload (AMDGPU_HW_IP_*) */
+ __u32 ip_type;
+ /**
+ * @doorbell_handle: the handle of doorbell GEM object
+ * associated with this userqueue client.
+ */
+ __u32 doorbell_handle;
+ /**
+ * @doorbell_offset: 32-bit offset of the doorbell in the doorbell bo.
+ * Kernel will generate absolute doorbell offset using doorbell_handle
+ * and doorbell_offset in the doorbell bo.
+ */
+ __u32 doorbell_offset;
+ /**
+ * @flags: flags used for queue parameters
+ */
+ __u32 flags;
+ /**
+ * @queue_va: Virtual address of the GPU memory which holds the queue
+ * object. The queue holds the workload packets.
+ */
+ __u64 queue_va;
+ /**
+ * @queue_size: Size of the queue in bytes, this needs to be 256-byte
+ * aligned.
+ */
+ __u64 queue_size;
+ /**
+ * @rptr_va : Virtual address of the GPU memory which holds the ring RPTR.
+ * This object must be at least 8 byte in size and aligned to 8-byte offset.
+ */
+ __u64 rptr_va;
+ /**
+ * @wptr_va : Virtual address of the GPU memory which holds the ring WPTR.
+ * This object must be at least 8 byte in size and aligned to 8-byte offset.
+ *
+ * Queue, RPTR and WPTR can come from the same object, as long as the size
+ * and alignment related requirements are met.
+ */
+ __u64 wptr_va;
+ /**
+ * @mqd: MQD (memory queue descriptor) is a set of parameters which allow
+ * the GPU to uniquely define and identify a usermode queue.
+ *
+ * MQD data can be of different size for different GPU IP/engine and
+ * their respective versions/revisions, so this points to a __u64 *
+ * which holds IP specific MQD of this usermode queue.
+ */
+ __u64 mqd;
+ /**
+ * @size: size of MQD data in bytes, it must match the MQD structure
+ * size of the respective engine/revision defined in UAPI for ex, for
+ * gfx11 workloads, size = sizeof(drm_amdgpu_userq_mqd_gfx11).
+ */
+ __u64 mqd_size;
+};
+
+/* The structure to carry output of userqueue ops */
+struct drm_amdgpu_userq_out {
+ /**
+ * For operation AMDGPU_USERQ_OP_CREATE: This field contains a unique
+ * queue ID to represent the newly created userqueue in the system, otherwise
+ * it should be ignored.
+ */
+ __u32 queue_id;
+ __u32 _pad;
+};
+
+union drm_amdgpu_userq {
+ struct drm_amdgpu_userq_in in;
+ struct drm_amdgpu_userq_out out;
+};
+
+/* GFX V11 IP specific MQD parameters */
+struct drm_amdgpu_userq_mqd_gfx11 {
+ /**
+ * @shadow_va: Virtual address of the GPU memory to hold the shadow buffer.
+ * Use AMDGPU_INFO_IOCTL to find the exact size of the object.
+ */
+ __u64 shadow_va;
+ /**
+ * @csa_va: Virtual address of the GPU memory to hold the CSA buffer.
+ * Use AMDGPU_INFO_IOCTL to find the exact size of the object.
+ */
+ __u64 csa_va;
+};
+
+/* GFX V11 SDMA IP specific MQD parameters */
+struct drm_amdgpu_userq_mqd_sdma_gfx11 {
+ /**
+ * @csa_va: Virtual address of the GPU memory to hold the CSA buffer.
+ * This must be a from a separate GPU object, and use AMDGPU_INFO IOCTL
+ * to get the size.
+ */
+ __u64 csa_va;
+};
+
+/* GFX V11 Compute IP specific MQD parameters */
+struct drm_amdgpu_userq_mqd_compute_gfx11 {
+ /**
+ * @eop_va: Virtual address of the GPU memory to hold the EOP buffer.
+ * This must be a from a separate GPU object, and use AMDGPU_INFO IOCTL
+ * to get the size.
+ */
+ __u64 eop_va;
+};
+
+/* userq signal/wait ioctl */
+struct drm_amdgpu_userq_signal {
+ /**
+ * @queue_id: Queue handle used by the userq fence creation function
+ * to retrieve the WPTR.
+ */
+ __u32 queue_id;
+ __u32 pad;
+ /**
+ * @syncobj_handles: The list of syncobj handles submitted by the user queue
+ * job to be signaled.
+ */
+ __u64 syncobj_handles;
+ /**
+ * @num_syncobj_handles: A count that represents the number of syncobj handles in
+ * @syncobj_handles.
+ */
+ __u64 num_syncobj_handles;
+ /**
+ * @bo_read_handles: The list of BO handles that the submitted user queue job
+ * is using for read only. This will update BO fences in the kernel.
+ */
+ __u64 bo_read_handles;
+ /**
+ * @bo_write_handles: The list of BO handles that the submitted user queue job
+ * is using for write only. This will update BO fences in the kernel.
+ */
+ __u64 bo_write_handles;
+ /**
+ * @num_bo_read_handles: A count that represents the number of read BO handles in
+ * @bo_read_handles.
+ */
+ __u32 num_bo_read_handles;
+ /**
+ * @num_bo_write_handles: A count that represents the number of write BO handles in
+ * @bo_write_handles.
+ */
+ __u32 num_bo_write_handles;
+};
+
+struct drm_amdgpu_userq_fence_info {
+ /**
+ * @va: A gpu address allocated for each queue which stores the
+ * read pointer (RPTR) value.
+ */
+ __u64 va;
+ /**
+ * @value: A 64 bit value represents the write pointer (WPTR) of the
+ * queue commands which compared with the RPTR value to signal the
+ * fences.
+ */
+ __u64 value;
+};
+
+struct drm_amdgpu_userq_wait {
+ /**
+ * @waitq_id: Queue handle used by the userq wait IOCTL to retrieve the
+ * wait queue and maintain the fence driver references in it.
+ */
+ __u32 waitq_id;
+ __u32 pad;
+ /**
+ * @syncobj_handles: The list of syncobj handles submitted by the user queue
+ * job to get the va/value pairs.
+ */
+ __u64 syncobj_handles;
+ /**
+ * @syncobj_timeline_handles: The list of timeline syncobj handles submitted by
+ * the user queue job to get the va/value pairs at given @syncobj_timeline_points.
+ */
+ __u64 syncobj_timeline_handles;
+ /**
+ * @syncobj_timeline_points: The list of timeline syncobj points submitted by the
+ * user queue job for the corresponding @syncobj_timeline_handles.
+ */
+ __u64 syncobj_timeline_points;
+ /**
+ * @bo_read_handles: The list of read BO handles submitted by the user queue
+ * job to get the va/value pairs.
+ */
+ __u64 bo_read_handles;
+ /**
+ * @bo_write_handles: The list of write BO handles submitted by the user queue
+ * job to get the va/value pairs.
+ */
+ __u64 bo_write_handles;
+ /**
+ * @num_syncobj_timeline_handles: A count that represents the number of timeline
+ * syncobj handles in @syncobj_timeline_handles.
+ */
+ __u16 num_syncobj_timeline_handles;
+ /**
+ * @num_fences: This field can be used both as input and output. As input it defines
+ * the maximum number of fences that can be returned and as output it will specify
+ * how many fences were actually returned from the ioctl.
+ */
+ __u16 num_fences;
+ /**
+ * @num_syncobj_handles: A count that represents the number of syncobj handles in
+ * @syncobj_handles.
+ */
+ __u32 num_syncobj_handles;
+ /**
+ * @num_bo_read_handles: A count that represents the number of read BO handles in
+ * @bo_read_handles.
+ */
+ __u32 num_bo_read_handles;
+ /**
+ * @num_bo_write_handles: A count that represents the number of write BO handles in
+ * @bo_write_handles.
+ */
+ __u32 num_bo_write_handles;
+ /**
+ * @out_fences: The field is a return value from the ioctl containing the list of
+ * address/value pairs to wait for.
+ */
+ __u64 out_fences;
+};
+
/* vm ioctl */
#define AMDGPU_VM_OP_RESERVE_VMID 1
#define AMDGPU_VM_OP_UNRESERVE_VMID 2
@@ -599,6 +859,19 @@ struct drm_amdgpu_gem_va {
__u64 offset_in_bo;
/** Specify mapping size. Must be correctly aligned. */
__u64 map_size;
+ /**
+ * vm_timeline_point is a sequence number used to add new timeline point.
+ */
+ __u64 vm_timeline_point;
+ /**
+ * The vm page table update fence is installed in given vm_timeline_syncobj_out
+ * at vm_timeline_point.
+ */
+ __u32 vm_timeline_syncobj_out;
+ /** the number of syncobj handles in @input_fence_syncobj_handles */
+ __u32 num_syncobj_handles;
+ /** Array of sync object handle to wait for given input fences */
+ __u64 input_fence_syncobj_handles;
};
#define AMDGPU_HW_IP_GFX 0
@@ -940,6 +1213,8 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
#define AMDGPU_INFO_MAX_IBS 0x22
/* query last page fault info */
#define AMDGPU_INFO_GPUVM_FAULT 0x23
+/* query FW object size and alignment */
+#define AMDGPU_INFO_UQ_FW_AREAS 0x24
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@@ -1093,6 +1368,7 @@ struct drm_amdgpu_info_vbios {
#define AMDGPU_VRAM_TYPE_DDR5 10
#define AMDGPU_VRAM_TYPE_LPDDR4 11
#define AMDGPU_VRAM_TYPE_LPDDR5 12
+#define AMDGPU_VRAM_TYPE_HBM3E 13
struct drm_amdgpu_info_device {
/** PCI Device ID */
@@ -1198,6 +1474,9 @@ struct drm_amdgpu_info_device {
__u32 csa_size;
/* context save area base virtual alignment for gfx11 */
__u32 csa_alignment;
+ /* Userq IP mask (1 << AMDGPU_HW_IP_*) */
+ __u32 userq_ip_mask;
+ __u32 pad;
};
struct drm_amdgpu_info_hw_ip {
@@ -1216,6 +1495,27 @@ struct drm_amdgpu_info_hw_ip {
__u32 ip_discovery_version;
};
+/* GFX metadata BO sizes and alignment info (in bytes) */
+struct drm_amdgpu_info_uq_fw_areas_gfx {
+ /* shadow area size */
+ __u32 shadow_size;
+ /* shadow area base virtual mem alignment */
+ __u32 shadow_alignment;
+ /* context save area size */
+ __u32 csa_size;
+ /* context save area base virtual mem alignment */
+ __u32 csa_alignment;
+};
+
+/* IP specific fw related information used in the
+ * subquery AMDGPU_INFO_UQ_FW_AREAS
+ */
+struct drm_amdgpu_info_uq_fw_areas {
+ union {
+ struct drm_amdgpu_info_uq_fw_areas_gfx gfx;
+ };
+};
+
struct drm_amdgpu_info_num_handles {
/** Max handles as supported by firmware for UVD */
__u32 uvd_max_handles;
@@ -1279,6 +1579,23 @@ struct drm_amdgpu_info_gpuvm_fault {
__u32 vmhub;
};
+struct drm_amdgpu_info_uq_metadata_gfx {
+ /* shadow area size for gfx11 */
+ __u32 shadow_size;
+ /* shadow area base virtual alignment for gfx11 */
+ __u32 shadow_alignment;
+ /* context save area size for gfx11 */
+ __u32 csa_size;
+ /* context save area base virtual alignment for gfx11 */
+ __u32 csa_alignment;
+};
+
+struct drm_amdgpu_info_uq_metadata {
+ union {
+ struct drm_amdgpu_info_uq_metadata_gfx gfx;
+ };
+};
+
/*
* Supported GPU families
*/
diff --git a/include/uapi/drm/asahi_drm.h b/include/uapi/drm/asahi_drm.h
new file mode 100644
index 000000000000..de67f1c603af
--- /dev/null
+++ b/include/uapi/drm/asahi_drm.h
@@ -0,0 +1,1194 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) The Asahi Linux Contributors
+ * Copyright (C) 2018-2023 Collabora Ltd.
+ * Copyright (C) 2014-2018 Broadcom
+ */
+#ifndef _ASAHI_DRM_H_
+#define _ASAHI_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * DOC: Introduction to the Asahi UAPI
+ *
+ * This documentation describes the Asahi IOCTLs.
+ *
+ * Just a few generic rules about the data passed to the Asahi IOCTLs (cribbed
+ * from Panthor):
+ *
+ * - Structures must be aligned on 64-bit/8-byte. If the object is not
+ * naturally aligned, a padding field must be added.
+ * - Fields must be explicitly aligned to their natural type alignment with
+ * pad[0..N] fields.
+ * - All padding fields will be checked by the driver to make sure they are
+ * zeroed.
+ * - Flags can be added, but not removed/replaced.
+ * - New fields can be added to the main structures (the structures
+ * directly passed to the ioctl). Those fields can be added at the end of
+ * the structure, or replace existing padding fields. Any new field being
+ * added must preserve the behavior that existed before those fields were
+ * added when a value of zero is passed.
+ * - New fields can be added to indirect objects (objects pointed by the
+ * main structure), iff those objects are passed a size to reflect the
+ * size known by the userspace driver (see
+ * drm_asahi_cmd_header::size).
+ * - If the kernel driver is too old to know some fields, those will be
+ * ignored if zero, and otherwise rejected (and so will be zero on output).
+ * - If userspace is too old to know some fields, those will be zeroed
+ * (input) before the structure is parsed by the kernel driver.
+ * - Each new flag/field addition must come with a driver version update so
+ * the userspace driver doesn't have to guess which flags are supported.
+ * - Structures should not contain unions, as this would defeat the
+ * extensibility of such structures.
+ * - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed
+ * at the end of the drm_asahi_ioctl_id enum.
+ */
+
+/**
+ * enum drm_asahi_ioctl_id - IOCTL IDs
+ *
+ * Place new ioctls at the end, don't re-order, don't replace or remove entries.
+ *
+ * These IDs are not meant to be used directly. Use the DRM_IOCTL_ASAHI_xxx
+ * definitions instead.
+ */
+enum drm_asahi_ioctl_id {
+ /** @DRM_ASAHI_GET_PARAMS: Query device properties. */
+ DRM_ASAHI_GET_PARAMS = 0,
+
+ /** @DRM_ASAHI_GET_TIME: Query device time. */
+ DRM_ASAHI_GET_TIME,
+
+ /** @DRM_ASAHI_VM_CREATE: Create a GPU VM address space. */
+ DRM_ASAHI_VM_CREATE,
+
+ /** @DRM_ASAHI_VM_DESTROY: Destroy a VM. */
+ DRM_ASAHI_VM_DESTROY,
+
+ /** @DRM_ASAHI_VM_BIND: Bind/unbind memory to a VM. */
+ DRM_ASAHI_VM_BIND,
+
+ /** @DRM_ASAHI_GEM_CREATE: Create a buffer object. */
+ DRM_ASAHI_GEM_CREATE,
+
+ /**
+ * @DRM_ASAHI_GEM_MMAP_OFFSET: Get offset to pass to mmap() to map a
+ * given GEM handle.
+ */
+ DRM_ASAHI_GEM_MMAP_OFFSET,
+
+ /** @DRM_ASAHI_GEM_BIND_OBJECT: Bind memory as a special object */
+ DRM_ASAHI_GEM_BIND_OBJECT,
+
+ /** @DRM_ASAHI_QUEUE_CREATE: Create a scheduling queue. */
+ DRM_ASAHI_QUEUE_CREATE,
+
+ /** @DRM_ASAHI_QUEUE_DESTROY: Destroy a scheduling queue. */
+ DRM_ASAHI_QUEUE_DESTROY,
+
+ /** @DRM_ASAHI_SUBMIT: Submit commands to a queue. */
+ DRM_ASAHI_SUBMIT,
+};
+
+#define DRM_ASAHI_MAX_CLUSTERS 64
+
+/**
+ * struct drm_asahi_params_global - Global parameters.
+ *
+ * This struct may be queried by drm_asahi_get_params.
+ */
+struct drm_asahi_params_global {
+ /** @features: Feature bits from drm_asahi_feature */
+ __u64 features;
+
+ /** @gpu_generation: GPU generation, e.g. 13 for G13G */
+ __u32 gpu_generation;
+
+ /** @gpu_variant: GPU variant as a character, e.g. 'C' for G13C */
+ __u32 gpu_variant;
+
+ /**
+ * @gpu_revision: GPU revision in BCD, e.g. 0x00 for 'A0' or
+ * 0x21 for 'C1'
+ */
+ __u32 gpu_revision;
+
+ /** @chip_id: Chip ID in BCD, e.g. 0x8103 for T8103 */
+ __u32 chip_id;
+
+ /** @num_dies: Number of dies in the SoC */
+ __u32 num_dies;
+
+ /** @num_clusters_total: Number of GPU clusters (across all dies) */
+ __u32 num_clusters_total;
+
+ /**
+ * @num_cores_per_cluster: Number of logical cores per cluster
+ * (including inactive/nonexistent)
+ */
+ __u32 num_cores_per_cluster;
+
+ /** @max_frequency_khz: Maximum GPU core clock frequency */
+ __u32 max_frequency_khz;
+
+ /** @core_masks: Bitmask of present/enabled cores per cluster */
+ __u64 core_masks[DRM_ASAHI_MAX_CLUSTERS];
+
+ /**
+ * @vm_start: VM range start VMA. Together with @vm_end, this defines
+ * the window of valid GPU VAs. Userspace is expected to subdivide VAs
+ * out of this window.
+ *
+ * This window contains all virtual addresses that userspace needs to
+ * know about. There may be kernel-internal GPU VAs outside this range,
+ * but that detail is not relevant here.
+ */
+ __u64 vm_start;
+
+ /** @vm_end: VM range end VMA */
+ __u64 vm_end;
+
+ /**
+ * @vm_kernel_min_size: Minimum kernel VMA window size.
+ *
+ * When creating a VM, userspace is required to carve out a section of
+ * virtual addresses (within the range given by @vm_start and
+ * @vm_end). The kernel will allocate various internal structures
+ * within the specified VA range.
+ *
+ * Allowing userspace to choose the VA range for the kernel, rather than
+ * the kernel reserving VAs and requiring userspace to cope, can assist
+ * in implementing SVM.
+ */
+ __u64 vm_kernel_min_size;
+
+ /**
+ * @max_commands_per_submission: Maximum number of supported commands
+ * per submission. This mirrors firmware limits. Userspace must split up
+ * larger command buffers, which may require inserting additional
+ * synchronization.
+ */
+ __u32 max_commands_per_submission;
+
+ /**
+ * @max_attachments: Maximum number of drm_asahi_attachment's per
+ * command
+ */
+ __u32 max_attachments;
+
+ /**
+ * @command_timestamp_frequency_hz: Timebase frequency for timestamps
+ * written during command execution, specified via drm_asahi_timestamp
+ * structures. As this rate is controlled by the firmware, it is a
+ * queryable parameter.
+ *
+ * Userspace must divide by this frequency to convert timestamps to
+ * seconds, rather than hardcoding a particular firmware's rate.
+ */
+ __u64 command_timestamp_frequency_hz;
+};
+
+/**
+ * enum drm_asahi_feature - Feature bits
+ *
+ * This covers only features that userspace cannot infer from the architecture
+ * version. Most features don't need to be here.
+ */
+enum drm_asahi_feature {
+ /**
+ * @DRM_ASAHI_FEATURE_SOFT_FAULTS: GPU has "soft fault" enabled. Shader
+ * loads of unmapped memory will return zero. Shader stores to unmapped
+ * memory will be silently discarded. Note that only shader load/store
+ * is affected. Other hardware units are not affected, notably including
+ * texture sampling.
+ *
+ * Soft fault is set when initializing the GPU and cannot be runtime
+ * toggled. Therefore, it is exposed as a feature bit and not a
+ * userspace-settable flag on the VM. When soft fault is enabled,
+ * userspace can speculate memory accesses more aggressively.
+ */
+ DRM_ASAHI_FEATURE_SOFT_FAULTS = (1UL) << 0,
+};
+
+/**
+ * struct drm_asahi_get_params - Arguments passed to DRM_IOCTL_ASAHI_GET_PARAMS
+ */
+struct drm_asahi_get_params {
+ /** @param_group: Parameter group to fetch (MBZ) */
+ __u32 param_group;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @pointer: User pointer to write parameter struct */
+ __u64 pointer;
+
+ /**
+ * @size: Size of the user buffer. In case of older userspace, this may
+ * be less than sizeof(struct drm_asahi_params_global). The kernel will
+ * not write past the length specified here, allowing extensibility.
+ */
+ __u64 size;
+};
+
+/**
+ * struct drm_asahi_vm_create - Arguments passed to DRM_IOCTL_ASAHI_VM_CREATE
+ */
+struct drm_asahi_vm_create {
+ /**
+ * @kernel_start: Start of the kernel-reserved address range. See
+ * drm_asahi_params_global::vm_kernel_min_size.
+ *
+ * Both @kernel_start and @kernel_end must be within the range of
+ * valid VAs given by drm_asahi_params_global::vm_start and
+ * drm_asahi_params_global::vm_end. The size of the kernel range
+ * (@kernel_end - @kernel_start) must be at least
+ * drm_asahi_params_global::vm_kernel_min_size.
+ *
+ * Userspace must not bind any memory on this VM into this reserved
+ * range, it is for kernel use only.
+ */
+ __u64 kernel_start;
+
+ /**
+ * @kernel_end: End of the kernel-reserved address range. See
+ * @kernel_start.
+ */
+ __u64 kernel_end;
+
+ /** @vm_id: Returned VM ID */
+ __u32 vm_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_asahi_vm_destroy - Arguments passed to DRM_IOCTL_ASAHI_VM_DESTROY
+ */
+struct drm_asahi_vm_destroy {
+ /** @vm_id: VM ID to be destroyed */
+ __u32 vm_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * enum drm_asahi_gem_flags - Flags for GEM creation
+ */
+enum drm_asahi_gem_flags {
+ /**
+ * @DRM_ASAHI_GEM_WRITEBACK: BO should be CPU-mapped as writeback.
+ *
+ * Map as writeback instead of write-combine. This optimizes for CPU
+ * reads.
+ */
+ DRM_ASAHI_GEM_WRITEBACK = (1L << 0),
+
+ /**
+ * @DRM_ASAHI_GEM_VM_PRIVATE: BO is private to this GPU VM (no exports).
+ */
+ DRM_ASAHI_GEM_VM_PRIVATE = (1L << 1),
+};
+
+/**
+ * struct drm_asahi_gem_create - Arguments passed to DRM_IOCTL_ASAHI_GEM_CREATE
+ */
+struct drm_asahi_gem_create {
+ /** @size: Size of the BO */
+ __u64 size;
+
+ /** @flags: Combination of drm_asahi_gem_flags flags. */
+ __u32 flags;
+
+ /**
+ * @vm_id: VM ID to assign to the BO, if DRM_ASAHI_GEM_VM_PRIVATE is set
+ */
+ __u32 vm_id;
+
+ /** @handle: Returned GEM handle for the BO */
+ __u32 handle;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_asahi_gem_mmap_offset - Arguments passed to
+ * DRM_IOCTL_ASAHI_GEM_MMAP_OFFSET
+ */
+struct drm_asahi_gem_mmap_offset {
+ /** @handle: Handle for the object being mapped. */
+ __u32 handle;
+
+ /** @flags: Must be zero */
+ __u32 flags;
+
+ /** @offset: The fake offset to use for subsequent mmap call */
+ __u64 offset;
+};
+
+/**
+ * enum drm_asahi_bind_flags - Flags for GEM binding
+ */
+enum drm_asahi_bind_flags {
+ /**
+ * @DRM_ASAHI_BIND_UNBIND: Instead of binding a GEM object to the range,
+ * simply unbind the GPU VMA range.
+ */
+ DRM_ASAHI_BIND_UNBIND = (1L << 0),
+
+ /** @DRM_ASAHI_BIND_READ: Map BO with GPU read permission */
+ DRM_ASAHI_BIND_READ = (1L << 1),
+
+ /** @DRM_ASAHI_BIND_WRITE: Map BO with GPU write permission */
+ DRM_ASAHI_BIND_WRITE = (1L << 2),
+
+ /**
+ * @DRM_ASAHI_BIND_SINGLE_PAGE: Map a single page of the BO repeatedly
+ * across the VA range.
+ *
+ * This is useful to fill a VA range with scratch pages or zero pages.
+ * It is intended as a mechanism to accelerate sparse.
+ */
+ DRM_ASAHI_BIND_SINGLE_PAGE = (1L << 3),
+};
+
+/**
+ * struct drm_asahi_gem_bind_op - Description of a single GEM bind operation.
+ */
+struct drm_asahi_gem_bind_op {
+ /** @flags: Combination of drm_asahi_bind_flags flags. */
+ __u32 flags;
+
+ /** @handle: GEM object to bind (except for UNBIND) */
+ __u32 handle;
+
+ /**
+ * @offset: Offset into the object (except for UNBIND).
+ *
+ * For a regular bind, this is the beginning of the region of the GEM
+ * object to bind.
+ *
+ * For a single-page bind, this is the offset to the single page that
+ * will be repeatedly bound.
+ *
+ * Must be page-size aligned.
+ */
+ __u64 offset;
+
+ /**
+ * @range: Number of bytes to bind/unbind to @addr.
+ *
+ * Must be page-size aligned.
+ */
+ __u64 range;
+
+ /**
+ * @addr: Address to bind to.
+ *
+ * Must be page-size aligned.
+ */
+ __u64 addr;
+};
+
+/**
+ * struct drm_asahi_vm_bind - Arguments passed to
+ * DRM_IOCTL_ASAHI_VM_BIND
+ */
+struct drm_asahi_vm_bind {
+ /** @vm_id: The ID of the VM to bind to */
+ __u32 vm_id;
+
+ /** @num_binds: number of binds in this IOCTL. */
+ __u32 num_binds;
+
+ /**
+ * @stride: Stride in bytes between consecutive binds. This allows
+ * extensibility of drm_asahi_gem_bind_op.
+ */
+ __u32 stride;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /**
+ * @userptr: User pointer to an array of @num_binds structures of type
+ * @drm_asahi_gem_bind_op and size @stride bytes.
+ */
+ __u64 userptr;
+};
+
+/**
+ * enum drm_asahi_bind_object_op - Special object bind operation
+ */
+enum drm_asahi_bind_object_op {
+ /** @DRM_ASAHI_BIND_OBJECT_OP_BIND: Bind a BO as a special GPU object */
+ DRM_ASAHI_BIND_OBJECT_OP_BIND = 0,
+
+ /** @DRM_ASAHI_BIND_OBJECT_OP_UNBIND: Unbind a special GPU object */
+ DRM_ASAHI_BIND_OBJECT_OP_UNBIND = 1,
+};
+
+/**
+ * enum drm_asahi_bind_object_flags - Special object bind flags
+ */
+enum drm_asahi_bind_object_flags {
+ /**
+ * @DRM_ASAHI_BIND_OBJECT_USAGE_TIMESTAMPS: Map a BO as a timestamp
+ * buffer.
+ */
+ DRM_ASAHI_BIND_OBJECT_USAGE_TIMESTAMPS = (1L << 0),
+};
+
+/**
+ * struct drm_asahi_gem_bind_object - Arguments passed to
+ * DRM_IOCTL_ASAHI_GEM_BIND_OBJECT
+ */
+struct drm_asahi_gem_bind_object {
+ /** @op: Bind operation (enum drm_asahi_bind_object_op) */
+ __u32 op;
+
+ /** @flags: Combination of drm_asahi_bind_object_flags flags. */
+ __u32 flags;
+
+ /** @handle: GEM object to bind/unbind (BIND) */
+ __u32 handle;
+
+ /** @vm_id: The ID of the VM to operate on (MBZ currently) */
+ __u32 vm_id;
+
+ /** @offset: Offset into the object (BIND only) */
+ __u64 offset;
+
+ /** @range: Number of bytes to bind/unbind (BIND only) */
+ __u64 range;
+
+ /** @object_handle: Object handle (out for BIND, in for UNBIND) */
+ __u32 object_handle;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * enum drm_asahi_cmd_type - Command type
+ */
+enum drm_asahi_cmd_type {
+ /**
+ * @DRM_ASAHI_CMD_RENDER: Render command, executing on the render
+ * subqueue. Combined vertex and fragment operation.
+ *
+ * Followed by a @drm_asahi_cmd_render payload.
+ */
+ DRM_ASAHI_CMD_RENDER = 0,
+
+ /**
+ * @DRM_ASAHI_CMD_COMPUTE: Compute command on the compute subqueue.
+ *
+ * Followed by a @drm_asahi_cmd_compute payload.
+ */
+ DRM_ASAHI_CMD_COMPUTE = 1,
+
+ /**
+ * @DRM_ASAHI_SET_VERTEX_ATTACHMENTS: Software command to set
+ * attachments for subsequent vertex shaders in the same submit.
+ *
+ * Followed by (possibly multiple) @drm_asahi_attachment payloads.
+ */
+ DRM_ASAHI_SET_VERTEX_ATTACHMENTS = 2,
+
+ /**
+ * @DRM_ASAHI_SET_FRAGMENT_ATTACHMENTS: Software command to set
+ * attachments for subsequent fragment shaders in the same submit.
+ *
+ * Followed by (possibly multiple) @drm_asahi_attachment payloads.
+ */
+ DRM_ASAHI_SET_FRAGMENT_ATTACHMENTS = 3,
+
+ /**
+ * @DRM_ASAHI_SET_COMPUTE_ATTACHMENTS: Software command to set
+ * attachments for subsequent compute shaders in the same submit.
+ *
+ * Followed by (possibly multiple) @drm_asahi_attachment payloads.
+ */
+ DRM_ASAHI_SET_COMPUTE_ATTACHMENTS = 4,
+};
+
+/**
+ * enum drm_asahi_priority - Scheduling queue priority.
+ *
+ * These priorities are forwarded to the firmware to influence firmware
+ * scheduling. The exact policy is ultimately decided by firmware, but
+ * these enums allow userspace to communicate the intentions.
+ */
+enum drm_asahi_priority {
+ /** @DRM_ASAHI_PRIORITY_LOW: Low priority queue. */
+ DRM_ASAHI_PRIORITY_LOW = 0,
+
+ /** @DRM_ASAHI_PRIORITY_MEDIUM: Medium priority queue. */
+ DRM_ASAHI_PRIORITY_MEDIUM = 1,
+
+ /**
+ * @DRM_ASAHI_PRIORITY_HIGH: High priority queue.
+ *
+ * Reserved for future extension.
+ */
+ DRM_ASAHI_PRIORITY_HIGH = 2,
+
+ /**
+ * @DRM_ASAHI_PRIORITY_REALTIME: Real-time priority queue.
+ *
+ * Reserved for future extension.
+ */
+ DRM_ASAHI_PRIORITY_REALTIME = 3,
+};
+
+/**
+ * struct drm_asahi_queue_create - Arguments passed to
+ * DRM_IOCTL_ASAHI_QUEUE_CREATE
+ */
+struct drm_asahi_queue_create {
+ /** @flags: MBZ */
+ __u32 flags;
+
+ /** @vm_id: The ID of the VM this queue is bound to */
+ __u32 vm_id;
+
+ /** @priority: One of drm_asahi_priority */
+ __u32 priority;
+
+ /** @queue_id: The returned queue ID */
+ __u32 queue_id;
+
+ /**
+ * @usc_exec_base: GPU base address for all USC binaries (shaders) on
+ * this queue. USC addresses are 32-bit relative to this 64-bit base.
+ *
+ * This sets the following registers on all queue commands:
+ *
+ * USC_EXEC_BASE_TA (vertex)
+ * USC_EXEC_BASE_ISP (fragment)
+ * USC_EXEC_BASE_CP (compute)
+ *
+ * While the hardware lets us configure these independently per command,
+ * we do not have a use case for this. Instead, we expect userspace to
+ * fix a 4GiB VA carveout for USC memory and pass its base address here.
+ */
+ __u64 usc_exec_base;
+};
+
+/**
+ * struct drm_asahi_queue_destroy - Arguments passed to
+ * DRM_IOCTL_ASAHI_QUEUE_DESTROY
+ */
+struct drm_asahi_queue_destroy {
+ /** @queue_id: The queue ID to be destroyed */
+ __u32 queue_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * enum drm_asahi_sync_type - Sync item type
+ */
+enum drm_asahi_sync_type {
+ /** @DRM_ASAHI_SYNC_SYNCOBJ: Binary sync object */
+ DRM_ASAHI_SYNC_SYNCOBJ = 0,
+
+ /** @DRM_ASAHI_SYNC_TIMELINE_SYNCOBJ: Timeline sync object */
+ DRM_ASAHI_SYNC_TIMELINE_SYNCOBJ = 1,
+};
+
+/**
+ * struct drm_asahi_sync - Sync item
+ */
+struct drm_asahi_sync {
+ /** @sync_type: One of drm_asahi_sync_type */
+ __u32 sync_type;
+
+ /** @handle: The sync object handle */
+ __u32 handle;
+
+ /** @timeline_value: Timeline value for timeline sync objects */
+ __u64 timeline_value;
+};
+
+/**
+ * define DRM_ASAHI_BARRIER_NONE - Command index for no barrier
+ *
+ * This special value may be passed in to drm_asahi_command::vdm_barrier or
+ * drm_asahi_command::cdm_barrier to indicate that the respective subqueue
+ * should not wait on any previous work.
+ */
+#define DRM_ASAHI_BARRIER_NONE (0xFFFFu)
+
+/**
+ * struct drm_asahi_cmd_header - Top level command structure
+ *
+ * This struct is core to the command buffer definition and therefore is not
+ * extensible.
+ */
+struct drm_asahi_cmd_header {
+ /** @cmd_type: One of drm_asahi_cmd_type */
+ __u16 cmd_type;
+
+ /**
+ * @size: Size of this command, not including this header.
+ *
+ * For hardware commands, this enables extensibility of commands without
+ * requiring extra command types. Passing a command that is shorter
+ * than expected is explicitly allowed for backwards-compatibility.
+ * Truncated fields will be zeroed.
+ *
+ * For the synthetic attachment setting commands, this implicitly
+ * encodes the number of attachments. These commands take multiple
+ * fixed-size @drm_asahi_attachment structures as their payload, so size
+ * equals number of attachments * sizeof(struct drm_asahi_attachment).
+ */
+ __u16 size;
+
+ /**
+ * @vdm_barrier: VDM (render) command index to wait on.
+ *
+ * Barriers are indices relative to the beginning of a given submit. A
+ * barrier of 0 waits on commands submitted to the respective subqueue
+ * in previous submit ioctls. A barrier of N waits on N previous
+ * commands on the subqueue within the current submit ioctl. As a
+ * special case, passing @DRM_ASAHI_BARRIER_NONE avoids waiting on any
+ * commands in the subqueue.
+ *
+ * Examples:
+ *
+ * 0: This waits on all previous work.
+ *
+ * NONE: This does not wait for anything on this subqueue.
+ *
+ * 1: This waits on the first render command in the submit.
+ * This is valid only if there are multiple render commands in the
+ * same submit.
+ *
+ * Barriers are valid only for hardware commands. Synthetic software
+ * commands to set attachments must pass NONE here.
+ */
+ __u16 vdm_barrier;
+
+ /**
+ * @cdm_barrier: CDM (compute) command index to wait on.
+ *
+ * See @vdm_barrier, and replace VDM/render with CDM/compute.
+ */
+ __u16 cdm_barrier;
+};
+
+/**
+ * struct drm_asahi_submit - Arguments passed to DRM_IOCTL_ASAHI_SUBMIT
+ */
+struct drm_asahi_submit {
+ /**
+ * @syncs: An optional pointer to an array of drm_asahi_sync. The first
+ * @in_sync_count elements are in-syncs, then the remaining
+ * @out_sync_count elements are out-syncs. Using a single array with
+ * explicit partitioning simplifies handling.
+ */
+ __u64 syncs;
+
+ /**
+ * @cmdbuf: Pointer to the command buffer to submit.
+ *
+ * This is a flat command buffer. By design, it contains no CPU
+ * pointers, which makes it suitable for a virtgpu wire protocol without
+ * requiring any serializing/deserializing step.
+ *
+ * It consists of a series of commands. Each command begins with a
+ * fixed-size @drm_asahi_cmd_header header and is followed by a
+ * variable-length payload according to the type and size in the header.
+ *
+ * The combined count of "real" hardware commands must be nonzero and at
+ * most drm_asahi_params_global::max_commands_per_submission.
+ */
+ __u64 cmdbuf;
+
+ /** @flags: Flags for command submission (MBZ) */
+ __u32 flags;
+
+ /** @queue_id: The queue ID to be submitted to */
+ __u32 queue_id;
+
+ /**
+ * @in_sync_count: Number of sync objects to wait on before starting
+ * this job.
+ */
+ __u32 in_sync_count;
+
+ /**
+ * @out_sync_count: Number of sync objects to signal upon completion of
+ * this job.
+ */
+ __u32 out_sync_count;
+
+ /** @cmdbuf_size: Command buffer size in bytes */
+ __u32 cmdbuf_size;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_asahi_attachment - Describe an "attachment".
+ *
+ * Attachments are any memory written by shaders, notably including render
+ * target attachments written by the end-of-tile program. This is purely a hint
+ * about the accessed memory regions. It is optional to specify, which is
+ * fortunate as it cannot be specified precisely with bindless access anyway.
+ * But where possible, it's probably a good idea for userspace to include these
+ * hints, forwarded to the firmware.
+ *
+ * This struct is implicitly sized and therefore is not extensible.
+ */
+struct drm_asahi_attachment {
+ /** @pointer: Base address of the attachment */
+ __u64 pointer;
+
+ /** @size: Size of the attachment in bytes */
+ __u64 size;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @flags: MBZ */
+ __u32 flags;
+};
+
+enum drm_asahi_render_flags {
+ /**
+ * @DRM_ASAHI_RENDER_VERTEX_SCRATCH: A vertex stage shader uses scratch
+ * memory.
+ */
+ DRM_ASAHI_RENDER_VERTEX_SCRATCH = (1U << 0),
+
+ /**
+ * @DRM_ASAHI_RENDER_PROCESS_EMPTY_TILES: Process even empty tiles.
+ * This must be set when clearing render targets.
+ */
+ DRM_ASAHI_RENDER_PROCESS_EMPTY_TILES = (1U << 1),
+
+ /**
+ * @DRM_ASAHI_RENDER_NO_VERTEX_CLUSTERING: Run vertex stage on a single
+ * cluster (on multi-cluster GPUs)
+ *
+ * This harms performance but can workaround certain sync/coherency
+ * bugs, and therefore is useful for debugging.
+ */
+ DRM_ASAHI_RENDER_NO_VERTEX_CLUSTERING = (1U << 2),
+
+ /**
+ * @DRM_ASAHI_RENDER_DBIAS_IS_INT: Use integer depth bias formula.
+ *
+ * Graphics specifications contain two alternate formulas for depth
+ * bias, a float formula used with floating-point depth buffers and an
+ * integer formula using with unorm depth buffers. This flag specifies
+ * that the integer formula should be used. If omitted, the float
+ * formula is used instead.
+ *
+ * This corresponds to bit 18 of the relevant hardware control register,
+ * so we match that here for efficiency.
+ */
+ DRM_ASAHI_RENDER_DBIAS_IS_INT = (1U << 18),
+};
+
+/**
+ * struct drm_asahi_zls_buffer - Describe a depth or stencil buffer.
+ *
+ * These fields correspond to hardware registers in the ZLS (Z Load/Store) unit.
+ * There are three hardware registers for each field respectively for loads,
+ * stores, and partial renders. In practice, it makes sense to set all to the
+ * same values, except in exceptional cases not yet implemented in userspace, so
+ * we do not duplicate here for simplicity/efficiency.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_zls_buffer {
+ /** @base: Base address of the buffer */
+ __u64 base;
+
+ /**
+ * @comp_base: If the load buffer is compressed, address of the
+ * compression metadata section.
+ */
+ __u64 comp_base;
+
+ /**
+ * @stride: If layered rendering is enabled, the number of bytes
+ * between each layer of the buffer.
+ */
+ __u32 stride;
+
+ /**
+ * @comp_stride: If layered rendering is enabled, the number of bytes
+ * between each layer of the compression metadata.
+ */
+ __u32 comp_stride;
+};
+
+/**
+ * struct drm_asahi_timestamp - Describe a timestamp write.
+ *
+ * The firmware can optionally write the GPU timestamp at render pass
+ * granularities, but it needs to be mapped specially via
+ * DRM_IOCTL_ASAHI_GEM_BIND_OBJECT. This structure therefore describes where to
+ * write as a handle-offset pair, rather than a GPU address like normal.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_timestamp {
+ /**
+ * @handle: Handle of the timestamp buffer, or 0 to skip this
+ * timestamp. If nonzero, this must equal the value returned in
+ * drm_asahi_gem_bind_object::object_handle.
+ */
+ __u32 handle;
+
+ /** @offset: Offset to write into the timestamp buffer */
+ __u32 offset;
+};
+
+/**
+ * struct drm_asahi_timestamps - Describe timestamp writes.
+ *
+ * Each operation that can be timestamped, can be timestamped at the start and
+ * end. Therefore, drm_asahi_timestamp structs always come in pairs, bundled
+ * together into drm_asahi_timestamps.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_timestamps {
+ /** @start: Timestamp recorded at the start of the operation */
+ struct drm_asahi_timestamp start;
+
+ /** @end: Timestamp recorded at the end of the operation */
+ struct drm_asahi_timestamp end;
+};
+
+/**
+ * struct drm_asahi_helper_program - Describe helper program configuration.
+ *
+ * The helper program is a compute-like kernel required for various hardware
+ * functionality. Its most important role is dynamically allocating
+ * scratch/stack memory for individual subgroups, by partitioning a static
+ * allocation shared for the whole device. It is supplied by userspace via
+ * drm_asahi_helper_program and internally dispatched by the hardware as needed.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_helper_program {
+ /**
+ * @binary: USC address to the helper program binary. This is a tagged
+ * pointer with configuration in the bottom bits.
+ */
+ __u32 binary;
+
+ /** @cfg: Additional configuration bits for the helper program. */
+ __u32 cfg;
+
+ /**
+ * @data: Data passed to the helper program. This value is not
+ * interpreted by the kernel, firmware, or hardware in any way. It is
+ * simply a sideband for userspace, set with the submit ioctl and read
+ * via special registers inside the helper program.
+ *
+ * In practice, userspace will pass a 64-bit GPU VA here pointing to the
+ * actual arguments, which presumably don't fit in 64-bits.
+ */
+ __u64 data;
+};
+
+/**
+ * struct drm_asahi_bg_eot - Describe a background or end-of-tile program.
+ *
+ * The background and end-of-tile programs are dispatched by the hardware at the
+ * beginning and end of rendering. As the hardware "tilebuffer" is simply local
+ * memory, these programs are necessary to implement API-level render targets.
+ * The fragment-like background program is responsible for loading either the
+ * clear colour or the existing render target contents, while the compute-like
+ * end-of-tile program stores the tilebuffer contents to memory.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_bg_eot {
+ /**
+ * @usc: USC address of the hardware USC words binding resources
+ * (including images and uniforms) and the program itself. Note this is
+ * an additional layer of indirection compared to the helper program,
+ * avoiding the need for a sideband for data. This is a tagged pointer
+ * with additional configuration in the bottom bits.
+ */
+ __u32 usc;
+
+ /**
+ * @rsrc_spec: Resource specifier for the program. This is a packed
+ * hardware data structure describing the required number of registers,
+ * uniforms, bound textures, and bound samplers.
+ */
+ __u32 rsrc_spec;
+};
+
+/**
+ * struct drm_asahi_cmd_render - Command to submit 3D
+ *
+ * This command submits a single render pass. The hardware control stream may
+ * include many draws and subpasses, but within the command, the framebuffer
+ * dimensions and attachments are fixed.
+ *
+ * The hardware requires the firmware to set a large number of Control Registers
+ * setting up state at render pass granularity before each command rendering 3D.
+ * The firmware bundles this state into data structures. Unfortunately, we
+ * cannot expose either any of that directly to userspace, because the
+ * kernel-firmware ABI is not stable. Although we can guarantee the firmware
+ * updates in tandem with the kernel, we cannot break old userspace when
+ * upgrading the firmware and kernel. Therefore, we need to abstract well the
+ * data structures to avoid tying our hands with future firmwares.
+ *
+ * The bulk of drm_asahi_cmd_render therefore consists of values of hardware
+ * control registers, marshalled via the firmware interface.
+ *
+ * The framebuffer/tilebuffer dimensions are also specified here. In addition to
+ * being passed to the firmware/hardware, the kernel requires these dimensions
+ * to calculate various essential tiling-related data structures. It is
+ * unfortunate that our submits are heavier than on vendors with saner
+ * hardware-software interfaces. The upshot is all of this information is
+ * readily available to userspace with all current APIs.
+ *
+ * It looks odd - but it's not overly burdensome and it ensures we can remain
+ * compatible with old userspace.
+ */
+struct drm_asahi_cmd_render {
+ /** @flags: Combination of drm_asahi_render_flags flags. */
+ __u32 flags;
+
+ /**
+ * @isp_zls_pixels: ISP_ZLS_PIXELS register value. This contains the
+ * depth/stencil width/height, which may differ from the framebuffer
+ * width/height.
+ */
+ __u32 isp_zls_pixels;
+
+ /**
+ * @vdm_ctrl_stream_base: VDM_CTRL_STREAM_BASE register value. GPU
+ * address to the beginning of the VDM control stream.
+ */
+ __u64 vdm_ctrl_stream_base;
+
+ /** @vertex_helper: Helper program used for the vertex shader */
+ struct drm_asahi_helper_program vertex_helper;
+
+ /** @fragment_helper: Helper program used for the fragment shader */
+ struct drm_asahi_helper_program fragment_helper;
+
+ /**
+ * @isp_scissor_base: ISP_SCISSOR_BASE register value. GPU address of an
+ * array of scissor descriptors indexed in the render pass.
+ */
+ __u64 isp_scissor_base;
+
+ /**
+ * @isp_dbias_base: ISP_DBIAS_BASE register value. GPU address of an
+ * array of depth bias values indexed in the render pass.
+ */
+ __u64 isp_dbias_base;
+
+ /**
+ * @isp_oclqry_base: ISP_OCLQRY_BASE register value. GPU address of an
+ * array of occlusion query results written by the render pass.
+ */
+ __u64 isp_oclqry_base;
+
+ /** @depth: Depth buffer */
+ struct drm_asahi_zls_buffer depth;
+
+ /** @stencil: Stencil buffer */
+ struct drm_asahi_zls_buffer stencil;
+
+ /** @zls_ctrl: ZLS_CTRL register value */
+ __u64 zls_ctrl;
+
+ /** @ppp_multisamplectl: PPP_MULTISAMPLECTL register value */
+ __u64 ppp_multisamplectl;
+
+ /**
+ * @sampler_heap: Base address of the sampler heap. This heap is used
+ * for both vertex shaders and fragment shaders. The registers are
+ * per-stage, but there is no known use case for separate heaps.
+ */
+ __u64 sampler_heap;
+
+ /** @ppp_ctrl: PPP_CTRL register value */
+ __u32 ppp_ctrl;
+
+ /** @width_px: Framebuffer width in pixels */
+ __u16 width_px;
+
+ /** @height_px: Framebuffer height in pixels */
+ __u16 height_px;
+
+ /** @layers: Number of layers in the framebuffer */
+ __u16 layers;
+
+ /** @sampler_count: Number of samplers in the sampler heap. */
+ __u16 sampler_count;
+
+ /** @utile_width_px: Width of a logical tilebuffer tile in pixels */
+ __u8 utile_width_px;
+
+ /** @utile_height_px: Height of a logical tilebuffer tile in pixels */
+ __u8 utile_height_px;
+
+ /** @samples: # of samples in the framebuffer. Must be 1, 2, or 4. */
+ __u8 samples;
+
+ /** @sample_size_B: # of bytes in the tilebuffer required per sample. */
+ __u8 sample_size_B;
+
+ /**
+ * @isp_merge_upper_x: 32-bit float used in the hardware triangle
+ * merging. Calculate as: tan(60 deg) * width.
+ *
+ * Making these values UAPI avoids requiring floating-point calculations
+ * in the kernel in the hot path.
+ */
+ __u32 isp_merge_upper_x;
+
+ /**
+ * @isp_merge_upper_y: 32-bit float. Calculate as: tan(60 deg) * height.
+ * See @isp_merge_upper_x.
+ */
+ __u32 isp_merge_upper_y;
+
+ /** @bg: Background program run for each tile at the start */
+ struct drm_asahi_bg_eot bg;
+
+ /** @eot: End-of-tile program ran for each tile at the end */
+ struct drm_asahi_bg_eot eot;
+
+ /**
+ * @partial_bg: Background program ran at the start of each tile when
+ * resuming the render pass during a partial render.
+ */
+ struct drm_asahi_bg_eot partial_bg;
+
+ /**
+ * @partial_eot: End-of-tile program ran at the end of each tile when
+ * pausing the render pass during a partial render.
+ */
+ struct drm_asahi_bg_eot partial_eot;
+
+ /**
+ * @isp_bgobjdepth: ISP_BGOBJDEPTH register value. This is the depth
+ * buffer clear value, encoded in the depth buffer's format: either a
+ * 32-bit float or a 16-bit unorm (with upper bits zeroed).
+ */
+ __u32 isp_bgobjdepth;
+
+ /**
+ * @isp_bgobjvals: ISP_BGOBJVALS register value. The bottom 8-bits
+ * contain the stencil buffer clear value.
+ */
+ __u32 isp_bgobjvals;
+
+ /** @ts_vtx: Timestamps for the vertex portion of the render */
+ struct drm_asahi_timestamps ts_vtx;
+
+ /** @ts_frag: Timestamps for the fragment portion of the render */
+ struct drm_asahi_timestamps ts_frag;
+};
+
+/**
+ * struct drm_asahi_cmd_compute - Command to submit compute
+ *
+ * This command submits a control stream consisting of compute dispatches. There
+ * is essentially no limit on how many compute dispatches may be included in a
+ * single compute command, although timestamps are at command granularity.
+ */
+struct drm_asahi_cmd_compute {
+ /** @flags: MBZ */
+ __u32 flags;
+
+ /** @sampler_count: Number of samplers in the sampler heap. */
+ __u32 sampler_count;
+
+ /**
+ * @cdm_ctrl_stream_base: CDM_CTRL_STREAM_BASE register value. GPU
+ * address to the beginning of the CDM control stream.
+ */
+ __u64 cdm_ctrl_stream_base;
+
+ /**
+ * @cdm_ctrl_stream_end: GPU base address to the end of the hardware
+ * control stream. Note this only considers the first contiguous segment
+ * of the control stream, as the stream might jump elsewhere.
+ */
+ __u64 cdm_ctrl_stream_end;
+
+ /** @sampler_heap: Base address of the sampler heap. */
+ __u64 sampler_heap;
+
+ /** @helper: Helper program used for this compute command */
+ struct drm_asahi_helper_program helper;
+
+ /** @ts: Timestamps for the compute command */
+ struct drm_asahi_timestamps ts;
+};
+
+/**
+ * struct drm_asahi_get_time - Arguments passed to DRM_IOCTL_ASAHI_GET_TIME
+ */
+struct drm_asahi_get_time {
+ /** @flags: MBZ. */
+ __u64 flags;
+
+ /** @gpu_timestamp: On return, the GPU timestamp in nanoseconds. */
+ __u64 gpu_timestamp;
+};
+
+/**
+ * DRM_IOCTL_ASAHI() - Build an Asahi IOCTL number
+ * @__access: Access type. Must be R, W or RW.
+ * @__id: One of the DRM_ASAHI_xxx id.
+ * @__type: Suffix of the type being passed to the IOCTL.
+ *
+ * Don't use this macro directly, use the DRM_IOCTL_ASAHI_xxx
+ * values instead.
+ *
+ * Return: An IOCTL number to be passed to ioctl() from userspace.
+ */
+#define DRM_IOCTL_ASAHI(__access, __id, __type) \
+ DRM_IO ## __access(DRM_COMMAND_BASE + DRM_ASAHI_ ## __id, \
+ struct drm_asahi_ ## __type)
+
+/* Note: this is an enum so that it can be resolved by Rust bindgen. */
+enum {
+ DRM_IOCTL_ASAHI_GET_PARAMS = DRM_IOCTL_ASAHI(W, GET_PARAMS, get_params),
+ DRM_IOCTL_ASAHI_GET_TIME = DRM_IOCTL_ASAHI(WR, GET_TIME, get_time),
+ DRM_IOCTL_ASAHI_VM_CREATE = DRM_IOCTL_ASAHI(WR, VM_CREATE, vm_create),
+ DRM_IOCTL_ASAHI_VM_DESTROY = DRM_IOCTL_ASAHI(W, VM_DESTROY, vm_destroy),
+ DRM_IOCTL_ASAHI_VM_BIND = DRM_IOCTL_ASAHI(W, VM_BIND, vm_bind),
+ DRM_IOCTL_ASAHI_GEM_CREATE = DRM_IOCTL_ASAHI(WR, GEM_CREATE, gem_create),
+ DRM_IOCTL_ASAHI_GEM_MMAP_OFFSET = DRM_IOCTL_ASAHI(WR, GEM_MMAP_OFFSET, gem_mmap_offset),
+ DRM_IOCTL_ASAHI_GEM_BIND_OBJECT = DRM_IOCTL_ASAHI(WR, GEM_BIND_OBJECT, gem_bind_object),
+ DRM_IOCTL_ASAHI_QUEUE_CREATE = DRM_IOCTL_ASAHI(WR, QUEUE_CREATE, queue_create),
+ DRM_IOCTL_ASAHI_QUEUE_DESTROY = DRM_IOCTL_ASAHI(W, QUEUE_DESTROY, queue_destroy),
+ DRM_IOCTL_ASAHI_SUBMIT = DRM_IOCTL_ASAHI(W, SUBMIT, submit),
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _ASAHI_DRM_H_ */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 7fba37b94401..e63a71d3c607 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -905,13 +905,17 @@ struct drm_syncobj_destroy {
};
#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0)
+#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE (1 << 1)
#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0)
+#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_TIMELINE (1 << 1)
struct drm_syncobj_handle {
__u32 handle;
__u32 flags;
__s32 fd;
__u32 pad;
+
+ __u64 point;
};
struct drm_syncobj_transfer {
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index e41a3cec6a9e..81202a50dc9e 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -422,6 +422,7 @@ extern "C" {
#define DRM_FORMAT_MOD_VENDOR_ALLWINNER 0x09
#define DRM_FORMAT_MOD_VENDOR_AMLOGIC 0x0a
#define DRM_FORMAT_MOD_VENDOR_MTK 0x0b
+#define DRM_FORMAT_MOD_VENDOR_APPLE 0x0c
/* add more to the end as needed */
@@ -1495,6 +1496,50 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
#define DRM_FORMAT_MOD_MTK_16L_32S_TILE DRM_FORMAT_MOD_MTK(MTK_FMT_MOD_TILE_16L32S)
/*
+ * Apple GPU-tiled layouts.
+ *
+ * Apple GPUs support nonlinear tilings with optional lossless compression.
+ *
+ * GPU-tiled images are divided into 16KiB tiles:
+ *
+ * Bytes per pixel Tile size
+ * --------------- ---------
+ * 1 128x128
+ * 2 128x64
+ * 4 64x64
+ * 8 64x32
+ * 16 32x32
+ *
+ * Tiles are raster-order. Pixels within a tile are interleaved (Morton order).
+ *
+ * Compressed images pad the body to 128-bytes and are immediately followed by a
+ * metadata section. The metadata section rounds the image dimensions to
+ * powers-of-two and contains 8 bytes for each 16x16 compression subtile.
+ * Subtiles are interleaved (Morton order).
+ *
+ * All images are 128-byte aligned.
+ *
+ * These layouts fundamentally do not have meaningful strides. No matter how we
+ * specify strides for these layouts, userspace unaware of Apple image layouts
+ * will be unable to use correctly the specified stride for any purpose.
+ * Userspace aware of the image layouts do not use strides. The most "correct"
+ * convention would be setting the image stride to 0. Unfortunately, some
+ * software assumes the stride is at least (width * bytes per pixel). We
+ * therefore require that stride equals (width * bytes per pixel). Since the
+ * stride is arbitrary here, we pick the simplest convention.
+ *
+ * Although containing two sections, compressed image layouts are treated in
+ * software as a single plane. This is modelled after AFBC, a similar
+ * scheme. Attempting to separate the sections to be "explicit" in DRM would
+ * only generate more confusion, as software does not treat the image this way.
+ *
+ * For detailed information on the hardware image layouts, see
+ * https://docs.mesa3d.org/drivers/asahi.html#image-layouts
+ */
+#define DRM_FORMAT_MOD_APPLE_GPU_TILED fourcc_mod_code(APPLE, 1)
+#define DRM_FORMAT_MOD_APPLE_GPU_TILED_COMPRESSED fourcc_mod_code(APPLE, 2)
+
+/*
* AMD modifiers
*
* Memory layout:
diff --git a/include/uapi/drm/nova_drm.h b/include/uapi/drm/nova_drm.h
new file mode 100644
index 000000000000..3ca90ed9d2bb
--- /dev/null
+++ b/include/uapi/drm/nova_drm.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef __NOVA_DRM_H__
+#define __NOVA_DRM_H__
+
+#include "drm.h"
+
+/* DISCLAIMER: Do not use, this is not a stable uAPI.
+ *
+ * This uAPI serves only testing purposes as long as this driver is still in
+ * development. It is required to implement and test infrastructure which is
+ * upstreamed in the context of this driver. See also [1].
+ *
+ * [1] https://lore.kernel.org/dri-devel/Zfsj0_tb-0-tNrJy@cassiopeiae/T/#u
+ */
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * NOVA_GETPARAM_VRAM_BAR_SIZE
+ *
+ * Query the VRAM BAR size in bytes.
+ */
+#define NOVA_GETPARAM_VRAM_BAR_SIZE 0x1
+
+/**
+ * struct drm_nova_getparam - query GPU and driver metadata
+ */
+struct drm_nova_getparam {
+ /**
+ * @param: The identifier of the parameter to query.
+ */
+ __u64 param;
+
+ /**
+ * @value: The value for the specified parameter.
+ */
+ __u64 value;
+};
+
+/**
+ * struct drm_nova_gem_create - create a new DRM GEM object
+ */
+struct drm_nova_gem_create {
+ /**
+ * @handle: The handle of the new DRM GEM object.
+ */
+ __u32 handle;
+
+ /**
+ * @pad: 32 bit padding, should be 0.
+ */
+ __u32 pad;
+
+ /**
+ * @size: The size of the new DRM GEM object.
+ */
+ __u64 size;
+};
+
+/**
+ * struct drm_nova_gem_info - query DRM GEM object metadata
+ */
+struct drm_nova_gem_info {
+ /**
+ * @handle: The handle of the DRM GEM object to query.
+ */
+ __u32 handle;
+
+ /**
+ * @pad: 32 bit padding, should be 0.
+ */
+ __u32 pad;
+
+ /**
+ * @size: The size of the DRM GEM obejct.
+ */
+ __u64 size;
+};
+
+#define DRM_NOVA_GETPARAM 0x00
+#define DRM_NOVA_GEM_CREATE 0x01
+#define DRM_NOVA_GEM_INFO 0x02
+
+/* Note: this is an enum so that it can be resolved by Rust bindgen. */
+enum {
+ DRM_IOCTL_NOVA_GETPARAM = DRM_IOWR(DRM_COMMAND_BASE + DRM_NOVA_GETPARAM,
+ struct drm_nova_getparam),
+ DRM_IOCTL_NOVA_GEM_CREATE = DRM_IOWR(DRM_COMMAND_BASE + DRM_NOVA_GEM_CREATE,
+ struct drm_nova_gem_create),
+ DRM_IOCTL_NOVA_GEM_INFO = DRM_IOWR(DRM_COMMAND_BASE + DRM_NOVA_GEM_INFO,
+ struct drm_nova_gem_info),
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __NOVA_DRM_H__ */
diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h
index 97e2c4510e69..ad9a70afea6c 100644
--- a/include/uapi/drm/panthor_drm.h
+++ b/include/uapi/drm/panthor_drm.h
@@ -127,6 +127,9 @@ enum drm_panthor_ioctl_id {
/** @DRM_PANTHOR_TILER_HEAP_DESTROY: Destroy a tiler heap. */
DRM_PANTHOR_TILER_HEAP_DESTROY,
+
+ /** @DRM_PANTHOR_BO_SET_LABEL: Label a BO. */
+ DRM_PANTHOR_BO_SET_LABEL,
};
/**
@@ -978,6 +981,24 @@ struct drm_panthor_tiler_heap_destroy {
};
/**
+ * struct drm_panthor_bo_set_label - Arguments passed to DRM_IOCTL_PANTHOR_BO_SET_LABEL
+ */
+struct drm_panthor_bo_set_label {
+ /** @handle: Handle of the buffer object to label. */
+ __u32 handle;
+
+ /** @pad: MBZ. */
+ __u32 pad;
+
+ /**
+ * @label: User pointer to a NUL-terminated string
+ *
+ * Length cannot be greater than 4096
+ */
+ __u64 label;
+};
+
+/**
* DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number
* @__access: Access type. Must be R, W or RW.
* @__id: One of the DRM_PANTHOR_xxx id.
@@ -1019,6 +1040,8 @@ enum {
DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create),
DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY =
DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy),
+ DRM_IOCTL_PANTHOR_BO_SET_LABEL =
+ DRM_IOCTL_PANTHOR(WR, BO_SET_LABEL, bo_set_label),
};
#if defined(__cplusplus)
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index c2ce71987e9b..9debb320c34b 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -163,6 +163,12 @@ struct drm_virtgpu_3d_wait {
__u32 flags;
};
+#define VIRTGPU_DRM_CAPSET_VIRGL 1
+#define VIRTGPU_DRM_CAPSET_VIRGL2 2
+#define VIRTGPU_DRM_CAPSET_GFXSTREAM_VULKAN 3
+#define VIRTGPU_DRM_CAPSET_VENUS 4
+#define VIRTGPU_DRM_CAPSET_CROSS_DOMAIN 5
+#define VIRTGPU_DRM_CAPSET_DRM 6
struct drm_virtgpu_get_caps {
__u32 cap_set_id;
__u32 cap_set_ver;
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index 616916985e3f..9c08738c3b91 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -917,7 +917,11 @@ struct drm_xe_gem_mmap_offset {
* struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
*
* The @flags can be:
- * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE
+ * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE - Map the whole virtual address
+ * space of the VM to scratch page. A vm_bind would overwrite the scratch
+ * page mapping. This flag is mutually exclusive with the
+ * %DRM_XE_VM_CREATE_FLAG_FAULT_MODE flag, with an exception of on x2 and
+ * xe3 platform.
* - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
* exec submissions to its exec_queues that don't have an upper time
* limit on the job execution time. But exec submissions to these
diff --git a/include/uapi/linux/blktrace_api.h b/include/uapi/linux/blktrace_api.h
index 690621b610e5..1bfb635e309b 100644
--- a/include/uapi/linux/blktrace_api.h
+++ b/include/uapi/linux/blktrace_api.h
@@ -49,7 +49,7 @@ enum blktrace_act {
__BLK_TA_UNPLUG_TIMER, /* queue was unplugged by timer */
__BLK_TA_INSERT, /* insert request */
__BLK_TA_SPLIT, /* bio was split */
- __BLK_TA_BOUNCE, /* bio was bounced */
+ __BLK_TA_BOUNCE, /* unused, was: bio was bounced */
__BLK_TA_REMAP, /* bio was remapped */
__BLK_TA_ABORT, /* request aborted */
__BLK_TA_DRV_DATA, /* driver-specific binary data */
diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h
index d58fa1cdcb08..189ecf0e13cd 100644
--- a/include/uapi/linux/cec-funcs.h
+++ b/include/uapi/linux/cec-funcs.h
@@ -14,7 +14,7 @@
static inline void cec_msg_active_source(struct cec_msg *msg, __u16 phys_addr)
{
msg->len = 4;
- msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[0] |= CEC_LOG_ADDR_BROADCAST;
msg->msg[1] = CEC_MSG_ACTIVE_SOURCE;
msg->msg[2] = phys_addr >> 8;
msg->msg[3] = phys_addr & 0xff;
@@ -59,7 +59,7 @@ static inline void cec_msg_request_active_source(struct cec_msg *msg,
int reply)
{
msg->len = 2;
- msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[0] |= CEC_LOG_ADDR_BROADCAST;
msg->msg[1] = CEC_MSG_REQUEST_ACTIVE_SOURCE;
msg->reply = reply ? CEC_MSG_ACTIVE_SOURCE : 0;
}
@@ -68,7 +68,7 @@ static inline void cec_msg_routing_information(struct cec_msg *msg,
__u16 phys_addr)
{
msg->len = 4;
- msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[0] |= CEC_LOG_ADDR_BROADCAST;
msg->msg[1] = CEC_MSG_ROUTING_INFORMATION;
msg->msg[2] = phys_addr >> 8;
msg->msg[3] = phys_addr & 0xff;
@@ -86,7 +86,7 @@ static inline void cec_msg_routing_change(struct cec_msg *msg,
__u16 new_phys_addr)
{
msg->len = 6;
- msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[0] |= CEC_LOG_ADDR_BROADCAST;
msg->msg[1] = CEC_MSG_ROUTING_CHANGE;
msg->msg[2] = orig_phys_addr >> 8;
msg->msg[3] = orig_phys_addr & 0xff;
@@ -106,7 +106,7 @@ static inline void cec_ops_routing_change(const struct cec_msg *msg,
static inline void cec_msg_set_stream_path(struct cec_msg *msg, __u16 phys_addr)
{
msg->len = 4;
- msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[0] |= CEC_LOG_ADDR_BROADCAST;
msg->msg[1] = CEC_MSG_SET_STREAM_PATH;
msg->msg[2] = phys_addr >> 8;
msg->msg[3] = phys_addr & 0xff;
@@ -791,7 +791,7 @@ static inline void cec_msg_report_physical_addr(struct cec_msg *msg,
__u16 phys_addr, __u8 prim_devtype)
{
msg->len = 5;
- msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[0] |= CEC_LOG_ADDR_BROADCAST;
msg->msg[1] = CEC_MSG_REPORT_PHYSICAL_ADDR;
msg->msg[2] = phys_addr >> 8;
msg->msg[3] = phys_addr & 0xff;
@@ -817,7 +817,7 @@ static inline void cec_msg_set_menu_language(struct cec_msg *msg,
const char *language)
{
msg->len = 5;
- msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[0] |= CEC_LOG_ADDR_BROADCAST;
msg->msg[1] = CEC_MSG_SET_MENU_LANGUAGE;
memcpy(msg->msg + 2, language, 3);
}
@@ -850,7 +850,7 @@ static inline void cec_msg_report_features(struct cec_msg *msg,
__u8 rc_profile, __u8 dev_features)
{
msg->len = 6;
- msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[0] |= CEC_LOG_ADDR_BROADCAST;
msg->msg[1] = CEC_MSG_REPORT_FEATURES;
msg->msg[2] = cec_version;
msg->msg[3] = all_device_types;
@@ -1092,7 +1092,7 @@ static inline void cec_msg_tuner_step_increment(struct cec_msg *msg)
static inline void cec_msg_device_vendor_id(struct cec_msg *msg, __u32 vendor_id)
{
msg->len = 5;
- msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[0] |= CEC_LOG_ADDR_BROADCAST;
msg->msg[1] = CEC_MSG_DEVICE_VENDOR_ID;
msg->msg[2] = vendor_id >> 16;
msg->msg[3] = (vendor_id >> 8) & 0xff;
@@ -1655,7 +1655,7 @@ static inline void cec_msg_report_current_latency(struct cec_msg *msg,
__u8 audio_out_delay)
{
msg->len = 6;
- msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[0] |= CEC_LOG_ADDR_BROADCAST;
msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY;
msg->msg[2] = phys_addr >> 8;
msg->msg[3] = phys_addr & 0xff;
@@ -1687,7 +1687,7 @@ static inline void cec_msg_request_current_latency(struct cec_msg *msg,
__u16 phys_addr)
{
msg->len = 4;
- msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[0] |= CEC_LOG_ADDR_BROADCAST;
msg->msg[1] = CEC_MSG_REQUEST_CURRENT_LATENCY;
msg->msg[2] = phys_addr >> 8;
msg->msg[3] = phys_addr & 0xff;
@@ -1707,7 +1707,7 @@ static inline void cec_msg_cdc_hec_inquire_state(struct cec_msg *msg,
__u16 phys_addr2)
{
msg->len = 9;
- msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[0] |= CEC_LOG_ADDR_BROADCAST;
msg->msg[1] = CEC_MSG_CDC_MESSAGE;
/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE;
@@ -1737,7 +1737,7 @@ static inline void cec_msg_cdc_hec_report_state(struct cec_msg *msg,
__u16 hec_field)
{
msg->len = has_field ? 10 : 8;
- msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[0] |= CEC_LOG_ADDR_BROADCAST;
msg->msg[1] = CEC_MSG_CDC_MESSAGE;
/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
msg->msg[4] = CEC_MSG_CDC_HEC_REPORT_STATE;
@@ -1782,7 +1782,7 @@ static inline void cec_msg_cdc_hec_set_state(struct cec_msg *msg,
__u16 phys_addr5)
{
msg->len = 10;
- msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[0] |= CEC_LOG_ADDR_BROADCAST;
msg->msg[1] = CEC_MSG_CDC_MESSAGE;
/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE;
@@ -1832,7 +1832,7 @@ static inline void cec_msg_cdc_hec_set_state_adjacent(struct cec_msg *msg,
__u8 hec_set_state)
{
msg->len = 8;
- msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[0] |= CEC_LOG_ADDR_BROADCAST;
msg->msg[1] = CEC_MSG_CDC_MESSAGE;
/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
msg->msg[4] = CEC_MSG_CDC_HEC_SET_STATE_ADJACENT;
@@ -1857,7 +1857,7 @@ static inline void cec_msg_cdc_hec_request_deactivation(struct cec_msg *msg,
__u16 phys_addr3)
{
msg->len = 11;
- msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[0] |= CEC_LOG_ADDR_BROADCAST;
msg->msg[1] = CEC_MSG_CDC_MESSAGE;
/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
msg->msg[4] = CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION;
@@ -1884,7 +1884,7 @@ static inline void cec_ops_cdc_hec_request_deactivation(const struct cec_msg *ms
static inline void cec_msg_cdc_hec_notify_alive(struct cec_msg *msg)
{
msg->len = 5;
- msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[0] |= CEC_LOG_ADDR_BROADCAST;
msg->msg[1] = CEC_MSG_CDC_MESSAGE;
/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
msg->msg[4] = CEC_MSG_CDC_HEC_NOTIFY_ALIVE;
@@ -1899,7 +1899,7 @@ static inline void cec_ops_cdc_hec_notify_alive(const struct cec_msg *msg,
static inline void cec_msg_cdc_hec_discover(struct cec_msg *msg)
{
msg->len = 5;
- msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[0] |= CEC_LOG_ADDR_BROADCAST;
msg->msg[1] = CEC_MSG_CDC_MESSAGE;
/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
msg->msg[4] = CEC_MSG_CDC_HEC_DISCOVER;
@@ -1916,7 +1916,7 @@ static inline void cec_msg_cdc_hpd_set_state(struct cec_msg *msg,
__u8 hpd_state)
{
msg->len = 6;
- msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[0] |= CEC_LOG_ADDR_BROADCAST;
msg->msg[1] = CEC_MSG_CDC_MESSAGE;
/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
msg->msg[4] = CEC_MSG_CDC_HPD_SET_STATE;
@@ -1938,7 +1938,7 @@ static inline void cec_msg_cdc_hpd_report_state(struct cec_msg *msg,
__u8 hpd_error)
{
msg->len = 6;
- msg->msg[0] |= 0xf; /* broadcast */
+ msg->msg[0] |= CEC_LOG_ADDR_BROADCAST;
msg->msg[1] = CEC_MSG_CDC_MESSAGE;
/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
msg->msg[4] = CEC_MSG_CDC_HPD_REPORT_STATE;
diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h
index 7a8f4c290187..3aff99f2696a 100644
--- a/include/uapi/linux/fscrypt.h
+++ b/include/uapi/linux/fscrypt.h
@@ -119,7 +119,7 @@ struct fscrypt_key_specifier {
*/
struct fscrypt_provisioning_key_payload {
__u32 type;
- __u32 __reserved;
+ __u32 flags;
__u8 raw[];
};
@@ -128,7 +128,9 @@ struct fscrypt_add_key_arg {
struct fscrypt_key_specifier key_spec;
__u32 raw_size;
__u32 key_id;
- __u32 __reserved[8];
+#define FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED 0x00000001
+ __u32 flags;
+ __u32 __reserved[7];
__u8 raw[];
};
diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h
index d2ee625ea189..7e2744ec8933 100644
--- a/include/uapi/linux/futex.h
+++ b/include/uapi/linux/futex.h
@@ -63,7 +63,7 @@
#define FUTEX2_SIZE_U32 0x02
#define FUTEX2_SIZE_U64 0x03
#define FUTEX2_NUMA 0x04
- /* 0x08 */
+#define FUTEX2_MPOL 0x08
/* 0x10 */
/* 0x20 */
/* 0x40 */
@@ -75,6 +75,13 @@
#define FUTEX_32 FUTEX2_SIZE_U32 /* historical accident :-( */
/*
+ * When FUTEX2_NUMA doubles the futex word, the second word is a node value.
+ * The special value -1 indicates no-node. This is the same value as
+ * NUMA_NO_NODE, except that value is not ABI, this is.
+ */
+#define FUTEX_NO_NODE (-1)
+
+/*
* Max numbers of elements in a futex_waitv array
*/
#define FUTEX_WAITV_MAX 128
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 8f1fc12bac46..cfd17e382082 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -73,6 +73,7 @@ struct io_uring_sqe {
__u32 futex_flags;
__u32 install_fd_flags;
__u32 nop_flags;
+ __u32 pipe_flags;
};
__u64 user_data; /* data to be passed back at completion time */
/* pack this to avoid bogus arm OABI complaints */
@@ -93,6 +94,10 @@ struct io_uring_sqe {
__u16 addr_len;
__u16 __pad3[1];
};
+ struct {
+ __u8 write_stream;
+ __u8 __pad4[3];
+ };
};
union {
struct {
@@ -283,6 +288,7 @@ enum io_uring_op {
IORING_OP_EPOLL_WAIT,
IORING_OP_READV_FIXED,
IORING_OP_WRITEV_FIXED,
+ IORING_OP_PIPE,
/* this goes last, obviously */
IORING_OP_LAST,
@@ -988,12 +994,16 @@ struct io_uring_zcrx_offsets {
__u64 __resv[2];
};
+enum io_uring_zcrx_area_flags {
+ IORING_ZCRX_AREA_DMABUF = 1,
+};
+
struct io_uring_zcrx_area_reg {
__u64 addr;
__u64 len;
__u64 rq_area_token;
__u32 flags;
- __u32 __resv1;
+ __u32 dmabuf_fd;
__u64 __resv2[2];
};
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 1e59344c5673..04c7d283dc7d 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -536,6 +536,8 @@ enum kfd_smi_event {
KFD_SMI_EVENT_QUEUE_EVICTION = 9,
KFD_SMI_EVENT_QUEUE_RESTORE = 10,
KFD_SMI_EVENT_UNMAP_FROM_GPU = 11,
+ KFD_SMI_EVENT_PROCESS_START = 12,
+ KFD_SMI_EVENT_PROCESS_END = 13,
/*
* max event number, as a flag bit to get events from all processes,
@@ -651,6 +653,9 @@ struct kfd_ioctl_smi_events_args {
"%lld -%d @%lx(%lx) %x %d\n", (ns), (pid), (addr), (size),\
(node), (unmap_trigger)
+#define KFD_EVENT_FMT_PROCESS(pid, task_name)\
+ "%x %s\n", (pid), (task_name)
+
/**************************************************************************************************
* CRIU IOCTLs (Checkpoint Restore In Userspace)
*
diff --git a/include/uapi/linux/media/amlogic/c3-isp-config.h b/include/uapi/linux/media/amlogic/c3-isp-config.h
new file mode 100644
index 000000000000..ed085ea62a57
--- /dev/null
+++ b/include/uapi/linux/media/amlogic/c3-isp-config.h
@@ -0,0 +1,564 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#ifndef _UAPI_C3_ISP_CONFIG_H_
+#define _UAPI_C3_ISP_CONFIG_H_
+
+#include <linux/types.h>
+
+/*
+ * Frames are split into zones of almost equal width and height - a zone is a
+ * rectangular tile of a frame. The metering blocks within the ISP collect
+ * aggregated statistics per zone.
+ */
+#define C3_ISP_AE_MAX_ZONES (17 * 15)
+#define C3_ISP_AF_MAX_ZONES (17 * 15)
+#define C3_ISP_AWB_MAX_ZONES (32 * 24)
+
+/* The maximum number of point on the diagonal of the frame for statistics */
+#define C3_ISP_AE_MAX_PT_NUM 18
+#define C3_ISP_AF_MAX_PT_NUM 18
+#define C3_ISP_AWB_MAX_PT_NUM 33
+
+/**
+ * struct c3_isp_awb_zone_stats - AWB statistics of a zone
+ *
+ * AWB zone stats is aligned with 8 bytes
+ *
+ * @rg: the ratio of R / G in a zone
+ * @bg: the ratio of B / G in a zone
+ * @pixel_sum: the total number of pixels used in a zone
+ */
+struct c3_isp_awb_zone_stats {
+ __u16 rg;
+ __u16 bg;
+ __u32 pixel_sum;
+};
+
+/**
+ * struct c3_isp_awb_stats - Auto white balance statistics information.
+ *
+ * AWB statistical information of all zones.
+ *
+ * @stats: array of auto white balance statistics
+ */
+struct c3_isp_awb_stats {
+ struct c3_isp_awb_zone_stats stats[C3_ISP_AWB_MAX_ZONES];
+} __attribute__((aligned(16)));
+
+/**
+ * struct c3_isp_ae_zone_stats - AE statistics of a zone
+ *
+ * AE zone stats is aligned with 8 bytes.
+ * This is a 5-bin histogram and the total sum is normalized to 0xffff.
+ * So hist2 = 0xffff - (hist0 + hist1 + hist3 + hist4)
+ *
+ * @hist0: the global normalized pixel count for bin 0
+ * @hist1: the global normalized pixel count for bin 1
+ * @hist3: the global normalized pixel count for bin 3
+ * @hist4: the global normalized pixel count for bin 4
+ */
+struct c3_isp_ae_zone_stats {
+ __u16 hist0;
+ __u16 hist1;
+ __u16 hist3;
+ __u16 hist4;
+};
+
+/**
+ * struct c3_isp_ae_stats - Exposure statistics information
+ *
+ * AE statistical information consists of all blocks information and a 1024-bin
+ * histogram.
+ *
+ * @stats: array of auto exposure block statistics
+ * @reserved: undefined buffer space
+ * @hist: a 1024-bin histogram for the entire image
+ */
+struct c3_isp_ae_stats {
+ struct c3_isp_ae_zone_stats stats[C3_ISP_AE_MAX_ZONES];
+ __u32 reserved[2];
+ __u32 hist[1024];
+} __attribute__((aligned(16)));
+
+/**
+ * struct c3_isp_af_zone_stats - AF statistics of a zone
+ *
+ * AF zone stats is aligned with 8 bytes.
+ * The zonal accumulated contrast metrics are stored in floating point format
+ * with 16 bits mantissa and 5 or 6 bits exponent. Apart from contrast metrics
+ * we accumulate squared image and quartic image data over the zone.
+ *
+ * @i2_mat: the mantissa of zonal squared image pixel sum
+ * @i4_mat: the mantissa of zonal quartic image pixel sum
+ * @e4_mat: the mantissa of zonal multi-directional quartic edge sum
+ * @e4_exp: the exponent of zonal multi-directional quartic edge sum
+ * @i2_exp: the exponent of zonal squared image pixel sum
+ * @i4_exp: the exponent of zonal quartic image pixel sum
+ */
+struct c3_isp_af_zone_stats {
+ __u16 i2_mat;
+ __u16 i4_mat;
+ __u16 e4_mat;
+ __u16 e4_exp : 5;
+ __u16 i2_exp : 5;
+ __u16 i4_exp : 6;
+};
+
+/**
+ * struct c3_isp_af_stats - Auto Focus statistics information
+ *
+ * AF statistical information of each zone
+ *
+ * @stats: array of auto focus block statistics
+ * @reserved: undefined buffer space
+ */
+struct c3_isp_af_stats {
+ struct c3_isp_af_zone_stats stats[C3_ISP_AF_MAX_ZONES];
+ __u32 reserved[2];
+} __attribute__((aligned(16)));
+
+/**
+ * struct c3_isp_stats_info - V4L2_META_FMT_C3ISP_STATS
+ *
+ * Contains ISP statistics
+ *
+ * @awb: auto white balance stats
+ * @ae: auto exposure stats
+ * @af: auto focus stats
+ */
+struct c3_isp_stats_info {
+ struct c3_isp_awb_stats awb;
+ struct c3_isp_ae_stats ae;
+ struct c3_isp_af_stats af;
+};
+
+/**
+ * enum c3_isp_params_buffer_version - C3 ISP parameters block versioning
+ *
+ * @C3_ISP_PARAMS_BUFFER_V0: First version of C3 ISP parameters block
+ */
+enum c3_isp_params_buffer_version {
+ C3_ISP_PARAMS_BUFFER_V0,
+};
+
+/**
+ * enum c3_isp_params_block_type - Enumeration of C3 ISP parameter blocks
+ *
+ * Each block configures a specific processing block of the C3 ISP.
+ * The block type allows the driver to correctly interpret the parameters block
+ * data.
+ *
+ * @C3_ISP_PARAMS_BLOCK_AWB_GAINS: White balance gains
+ * @C3_ISP_PARAMS_BLOCK_AWB_CONFIG: AWB statistic format configuration for all
+ * blocks that control how stats are generated
+ * @C3_ISP_PARAMS_BLOCK_AE_CONFIG: AE statistic format configuration for all
+ * blocks that control how stats are generated
+ * @C3_ISP_PARAMS_BLOCK_AF_CONFIG: AF statistic format configuration for all
+ * blocks that control how stats are generated
+ * @C3_ISP_PARAMS_BLOCK_PST_GAMMA: post gamma parameters
+ * @C3_ISP_PARAMS_BLOCK_CCM: Color correction matrix parameters
+ * @C3_ISP_PARAMS_BLOCK_CSC: Color space conversion parameters
+ * @C3_ISP_PARAMS_BLOCK_BLC: Black level correction parameters
+ * @C3_ISP_PARAMS_BLOCK_SENTINEL: First non-valid block index
+ */
+enum c3_isp_params_block_type {
+ C3_ISP_PARAMS_BLOCK_AWB_GAINS,
+ C3_ISP_PARAMS_BLOCK_AWB_CONFIG,
+ C3_ISP_PARAMS_BLOCK_AE_CONFIG,
+ C3_ISP_PARAMS_BLOCK_AF_CONFIG,
+ C3_ISP_PARAMS_BLOCK_PST_GAMMA,
+ C3_ISP_PARAMS_BLOCK_CCM,
+ C3_ISP_PARAMS_BLOCK_CSC,
+ C3_ISP_PARAMS_BLOCK_BLC,
+ C3_ISP_PARAMS_BLOCK_SENTINEL
+};
+
+#define C3_ISP_PARAMS_BLOCK_FL_DISABLE (1U << 0)
+#define C3_ISP_PARAMS_BLOCK_FL_ENABLE (1U << 1)
+
+/**
+ * struct c3_isp_params_block_header - C3 ISP parameter block header
+ *
+ * This structure represents the common part of all the ISP configuration
+ * blocks. Each parameters block shall embed an instance of this structure type
+ * as its first member, followed by the block-specific configuration data. The
+ * driver inspects this common header to discern the block type and its size and
+ * properly handle the block content by casting it to the correct block-specific
+ * type.
+ *
+ * The @type field is one of the values enumerated by
+ * :c:type:`c3_isp_params_block_type` and specifies how the data should be
+ * interpreted by the driver. The @size field specifies the size of the
+ * parameters block and is used by the driver for validation purposes. The
+ * @flags field is a bitmask of per-block flags C3_ISP_PARAMS_FL*.
+ *
+ * When userspace wants to disable an ISP block the
+ * C3_ISP_PARAMS_BLOCK_FL_DISABLED bit should be set in the @flags field. In
+ * this case userspace may optionally omit the remainder of the configuration
+ * block, which will be ignored by the driver.
+ *
+ * When a new configuration of an ISP block needs to be applied userspace
+ * shall fully populate the ISP block and omit setting the
+ * C3_ISP_PARAMS_BLOCK_FL_DISABLED bit in the @flags field.
+ *
+ * Userspace is responsible for correctly populating the parameters block header
+ * fields (@type, @flags and @size) and the block-specific parameters.
+ *
+ * For example:
+ *
+ * .. code-block:: c
+ *
+ * void populate_pst_gamma(struct c3_isp_params_block_header *block) {
+ * struct c3_isp_params_pst_gamma *gamma =
+ * (struct c3_isp_params_pst_gamma *)block;
+ *
+ * gamma->header.type = C3_ISP_PARAMS_BLOCK_PST_GAMMA;
+ * gamma->header.flags = C3_ISP_PARAMS_BLOCK_FL_ENABLE;
+ * gamma->header.size = sizeof(*gamma);
+ *
+ * for (unsigned int i = 0; i < 129; i++)
+ * gamma->pst_gamma_lut[i] = i;
+ * }
+ *
+ * @type: The parameters block type from :c:type:`c3_isp_params_block_type`
+ * @flags: A bitmask of block flags
+ * @size: Size (in bytes) of the parameters block, including this header
+ */
+struct c3_isp_params_block_header {
+ __u16 type;
+ __u16 flags;
+ __u32 size;
+};
+
+/**
+ * struct c3_isp_params_awb_gains - Gains for auto-white balance
+ *
+ * This struct allows users to configure the gains for white balance.
+ * There are four gain settings corresponding to each colour channel in
+ * the bayer domain. All of the gains are stored in Q4.8 format.
+ *
+ * header.type should be set to C3_ISP_PARAMS_BLOCK_AWB_GAINS
+ * from :c:type:`c3_isp_params_block_type`
+ *
+ * @header: The C3 ISP parameters block header
+ * @gr_gain: Multiplier for Gr channel (Q4.8 format)
+ * @r_gain: Multiplier for R channel (Q4.8 format)
+ * @b_gain: Multiplier for B channel (Q4.8 format)
+ * @gb_gain: Multiplier for Gb channel (Q4.8 format)
+ */
+struct c3_isp_params_awb_gains {
+ struct c3_isp_params_block_header header;
+ __u16 gr_gain;
+ __u16 r_gain;
+ __u16 b_gain;
+ __u16 gb_gain;
+} __attribute__((aligned(8)));
+
+/**
+ * enum c3_isp_params_awb_tap_points - Tap points for the AWB statistics
+ * @C3_ISP_AWB_STATS_TAP_OFE: immediately after the optical frontend block
+ * @C3_ISP_AWB_STATS_TAP_GE: immediately after the green equal block
+ * @C3_ISP_AWB_STATS_TAP_BEFORE_WB: immediately before the white balance block
+ * @C3_ISP_AWB_STATS_TAP_AFTER_WB: immediately after the white balance block
+ */
+enum c3_isp_params_awb_tap_points {
+ C3_ISP_AWB_STATS_TAP_OFE = 0,
+ C3_ISP_AWB_STATS_TAP_GE,
+ C3_ISP_AWB_STATS_TAP_BEFORE_WB,
+ C3_ISP_AWB_STATS_TAP_AFTER_WB,
+};
+
+/**
+ * struct c3_isp_params_awb_config - Stats settings for auto-white balance
+ *
+ * This struct allows the configuration of the statistics generated for auto
+ * white balance.
+ *
+ * header.type should be set to C3_ISP_PARAMS_BLOCK_AWB_CONFIG
+ * from :c:type:`c3_isp_params_block_type`
+ *
+ * @header: the C3 ISP parameters block header
+ * @tap_point: the tap point from enum c3_isp_params_awb_tap_point
+ * @satur_vald: AWB statistic over saturation control
+ * value: 0: disable, 1: enable
+ * @horiz_zones_num: active number of hotizontal zones [0..32]
+ * @vert_zones_num: active number of vertical zones [0..24]
+ * @rg_min: minimum R/G ratio (Q4.8 format)
+ * @rg_max: maximum R/G ratio (Q4.8 format)
+ * @bg_min: minimum B/G ratio (Q4.8 format)
+ * @bg_max: maximum B/G ratio (Q4.8 format)
+ * @rg_low: R/G ratio trim low (Q4.8 format)
+ * @rg_high: R/G ratio trim hight (Q4.8 format)
+ * @bg_low: B/G ratio trim low (Q4.8 format)
+ * @bg_high: B/G ratio trim high (Q4.8 format)
+ * @zone_weight: array of weights for AWB statistics zones [0..15]
+ * @horiz_coord: the horizontal coordinate of points on the diagonal [0..2888]
+ * @vert_coord: the vertical coordinate of points on the diagonal [0..2240]
+ */
+struct c3_isp_params_awb_config {
+ struct c3_isp_params_block_header header;
+ __u8 tap_point;
+ __u8 satur_vald;
+ __u8 horiz_zones_num;
+ __u8 vert_zones_num;
+ __u16 rg_min;
+ __u16 rg_max;
+ __u16 bg_min;
+ __u16 bg_max;
+ __u16 rg_low;
+ __u16 rg_high;
+ __u16 bg_low;
+ __u16 bg_high;
+ __u8 zone_weight[C3_ISP_AWB_MAX_ZONES];
+ __u16 horiz_coord[C3_ISP_AWB_MAX_PT_NUM];
+ __u16 vert_coord[C3_ISP_AWB_MAX_PT_NUM];
+} __attribute__((aligned(8)));
+
+/**
+ * enum c3_isp_params_ae_tap_points - Tap points for the AE statistics
+ * @C3_ISP_AE_STATS_TAP_GE: immediately after the green equal block
+ * @C3_ISP_AE_STATS_TAP_MLS: immediately after the mesh lens shading block
+ */
+enum c3_isp_params_ae_tap_points {
+ C3_ISP_AE_STATS_TAP_GE = 0,
+ C3_ISP_AE_STATS_TAP_MLS,
+};
+
+/**
+ * struct c3_isp_params_ae_config - Stats settings for auto-exposure
+ *
+ * This struct allows the configuration of the statistics generated for
+ * auto exposure.
+ *
+ * header.type should be set to C3_ISP_PARAMS_BLOCK_AE_CONFIG
+ * from :c:type:`c3_isp_params_block_type`
+ *
+ * @header: the C3 ISP parameters block header
+ * @horiz_zones_num: active number of horizontal zones [0..17]
+ * @vert_zones_num: active number of vertical zones [0..15]
+ * @tap_point: the tap point from enum c3_isp_params_ae_tap_point
+ * @zone_weight: array of weights for AE statistics zones [0..15]
+ * @horiz_coord: the horizontal coordinate of points on the diagonal [0..2888]
+ * @vert_coord: the vertical coordinate of points on the diagonal [0..2240]
+ * @reserved: applications must zero this array
+ */
+struct c3_isp_params_ae_config {
+ struct c3_isp_params_block_header header;
+ __u8 tap_point;
+ __u8 horiz_zones_num;
+ __u8 vert_zones_num;
+ __u8 zone_weight[C3_ISP_AE_MAX_ZONES];
+ __u16 horiz_coord[C3_ISP_AE_MAX_PT_NUM];
+ __u16 vert_coord[C3_ISP_AE_MAX_PT_NUM];
+ __u16 reserved[3];
+} __attribute__((aligned(8)));
+
+/**
+ * enum c3_isp_params_af_tap_points - Tap points for the AF statistics
+ * @C3_ISP_AF_STATS_TAP_SNR: immediately after the spatial noise reduce block
+ * @C3_ISP_AF_STATS_TAP_DMS: immediately after the demosaic block
+ */
+enum c3_isp_params_af_tap_points {
+ C3_ISP_AF_STATS_TAP_SNR = 0,
+ C3_ISP_AF_STATS_TAP_DMS,
+};
+
+/**
+ * struct c3_isp_params_af_config - Stats settings for auto-focus
+ *
+ * This struct allows the configuration of the statistics generated for
+ * auto focus.
+ *
+ * header.type should be set to C3_ISP_PARAMS_BLOCK_AF_CONFIG
+ * from :c:type:`c3_isp_params_block_type`
+ *
+ * @header: the C3 ISP parameters block header
+ * @tap_point: the tap point from enum c3_isp_params_af_tap_point
+ * @horiz_zones_num: active number of hotizontal zones [0..17]
+ * @vert_zones_num: active number of vertical zones [0..15]
+ * @reserved: applications must zero this array
+ * @horiz_coord: the horizontal coordinate of points on the diagonal [0..2888]
+ * @vert_coord: the vertical coordinate of points on the diagonal [0..2240]
+ */
+struct c3_isp_params_af_config {
+ struct c3_isp_params_block_header header;
+ __u8 tap_point;
+ __u8 horiz_zones_num;
+ __u8 vert_zones_num;
+ __u8 reserved[5];
+ __u16 horiz_coord[C3_ISP_AF_MAX_PT_NUM];
+ __u16 vert_coord[C3_ISP_AF_MAX_PT_NUM];
+} __attribute__((aligned(8)));
+
+/**
+ * struct c3_isp_params_pst_gamma - Post gamma configuration
+ *
+ * This struct allows the configuration of the look up table for
+ * post gamma. The gamma curve consists of 129 points, so need to
+ * set lut[129].
+ *
+ * header.type should be set to C3_ISP_PARAMS_BLOCK_PST_GAMMA
+ * from :c:type:`c3_isp_params_block_type`
+ *
+ * @header: the C3 ISP parameters block header
+ * @lut: lookup table for P-Stitch gamma [0..1023]
+ * @reserved: applications must zero this array
+ */
+struct c3_isp_params_pst_gamma {
+ struct c3_isp_params_block_header header;
+ __u16 lut[129];
+ __u16 reserved[3];
+} __attribute__((aligned(8)));
+
+/**
+ * struct c3_isp_params_ccm - ISP CCM configuration
+ *
+ * This struct allows the configuration of the matrix for
+ * color correction. The matrix consists of 3 x 3 points,
+ * so need to set matrix[3][3].
+ *
+ * header.type should be set to C3_ISP_PARAMS_BLOCK_CCM
+ * from :c:type:`c3_isp_params_block_type`
+ *
+ * @header: the C3 ISP parameters block header
+ * @matrix: a 3 x 3 matrix used for color correction,
+ * the value of matrix[x][y] is orig_value x 256. [-4096..4095]
+ * @reserved: applications must zero this array
+ */
+struct c3_isp_params_ccm {
+ struct c3_isp_params_block_header header;
+ __s16 matrix[3][3];
+ __u16 reserved[3];
+} __attribute__((aligned(8)));
+
+/**
+ * struct c3_isp_params_csc - ISP Color Space Conversion configuration
+ *
+ * This struct allows the configuration of the matrix for color space
+ * conversion. The matrix consists of 3 x 3 points, so need to set matrix[3][3].
+ *
+ * header.type should be set to C3_ISP_PARAMS_BLOCK_CSC
+ * from :c:type:`c3_isp_params_block_type`
+ *
+ * @header: the C3 ISP parameters block header
+ * @matrix: a 3x3 matrix used for the color space conversion,
+ * the value of matrix[x][y] is orig_value x 256. [-4096..4095]
+ * @reserved: applications must zero this array
+ */
+struct c3_isp_params_csc {
+ struct c3_isp_params_block_header header;
+ __s16 matrix[3][3];
+ __u16 reserved[3];
+} __attribute__((aligned(8)));
+
+/**
+ * struct c3_isp_params_blc - ISP Black Level Correction configuration
+ *
+ * This struct allows the configuration of the block level offset for each
+ * color channel.
+ *
+ * header.type should be set to C3_ISP_PARAMS_BLOCK_BLC
+ * from :c:type:`c3_isp_params_block_type`
+ *
+ * @header: the C3 ISP parameters block header
+ * @gr_ofst: Gr blc offset (Q4.12 format)
+ * @r_ofst: R blc offset (Q4.12 format)
+ * @b_ofst: B blc offset (Q4.12 format)
+ * @gb_ofst: Gb blc offset(Q4.12 format)
+ */
+struct c3_isp_params_blc {
+ struct c3_isp_params_block_header header;
+ __u16 gr_ofst;
+ __u16 r_ofst;
+ __u16 b_ofst;
+ __u16 gb_ofst;
+};
+
+/**
+ * define C3_ISP_PARAMS_MAX_SIZE - Maximum size of all C3 ISP Parameters
+ *
+ * Though the parameters for the C3 ISP are passed as optional blocks, the
+ * driver still needs to know the absolute maximum size so that it can allocate
+ * a buffer sized appropriately to accommodate userspace attempting to set all
+ * possible parameters in a single frame.
+ */
+#define C3_ISP_PARAMS_MAX_SIZE \
+ (sizeof(struct c3_isp_params_awb_gains) + \
+ sizeof(struct c3_isp_params_awb_config) + \
+ sizeof(struct c3_isp_params_ae_config) + \
+ sizeof(struct c3_isp_params_af_config) + \
+ sizeof(struct c3_isp_params_pst_gamma) + \
+ sizeof(struct c3_isp_params_ccm) + \
+ sizeof(struct c3_isp_params_csc) + \
+ sizeof(struct c3_isp_params_blc))
+
+/**
+ * struct c3_isp_params_cfg - C3 ISP configuration parameters
+ *
+ * This struct contains the configuration parameters of the C3 ISP
+ * algorithms, serialized by userspace into an opaque data buffer. Each
+ * configuration parameter block is represented by a block-specific structure
+ * which contains a :c:type:`c3_isp_param_block_header` entry as first
+ * member. Userspace populates the @data buffer with configuration parameters
+ * for the blocks that it intends to configure. As a consequence, the data
+ * buffer effective size changes according to the number of ISP blocks that
+ * userspace intends to configure.
+ *
+ * The parameters buffer is versioned by the @version field to allow modifying
+ * and extending its definition. Userspace should populate the @version field to
+ * inform the driver about the version it intends to use. The driver will parse
+ * and handle the @data buffer according to the data layout specific to the
+ * indicated revision and return an error if the desired revision is not
+ * supported.
+ *
+ * For each ISP block that userspace wants to configure, a block-specific
+ * structure is appended to the @data buffer, one after the other without gaps
+ * in between nor overlaps. Userspace shall populate the @total_size field with
+ * the effective size, in bytes, of the @data buffer.
+ *
+ * The expected memory layout of the parameters buffer is::
+ *
+ * +-------------------- struct c3_isp_params_cfg ---- ------------------+
+ * | version = C3_ISP_PARAM_BUFFER_V0; |
+ * | data_size = sizeof(struct c3_isp_params_awb_gains) + |
+ * | sizeof(struct c3_isp_params_awb_config); |
+ * | +------------------------- data ---------------------------------+ |
+ * | | +------------ struct c3_isp_params_awb_gains) ------------------+ |
+ * | | | +--------- struct c3_isp_params_block_header header -----+ | | |
+ * | | | | type = C3_ISP_PARAMS_BLOCK_AWB_GAINS; | | | |
+ * | | | | flags = C3_ISP_PARAMS_BLOCK_FL_NONE; | | | |
+ * | | | | size = sizeof(struct c3_isp_params_awb_gains); | | | |
+ * | | | +---------------------------------------------------------+ | | |
+ * | | | gr_gain = ...; | | |
+ * | | | r_gain = ...; | | |
+ * | | | b_gain = ...; | | |
+ * | | | gb_gain = ...; | | |
+ * | | +------------------ struct c3_isp_params_awb_config ----------+ | |
+ * | | | +---------- struct c3_isp_param_block_header header ------+ | | |
+ * | | | | type = C3_ISP_PARAMS_BLOCK_AWB_CONFIG; | | | |
+ * | | | | flags = C3_ISP_PARAMS_BLOCK_FL_NONE; | | | |
+ * | | | | size = sizeof(struct c3_isp_params_awb_config) | | | |
+ * | | | +---------------------------------------------------------+ | | |
+ * | | | tap_point = ...; | | |
+ * | | | satur_vald = ...; | | |
+ * | | | horiz_zones_num = ...; | | |
+ * | | | vert_zones_num = ...; | | |
+ * | | +-------------------------------------------------------------+ | |
+ * | +-----------------------------------------------------------------+ |
+ * +---------------------------------------------------------------------+
+ *
+ * @version: The C3 ISP parameters buffer version
+ * @data_size: The C3 ISP configuration data effective size, excluding this
+ * header
+ * @data: The C3 ISP configuration blocks data
+ */
+struct c3_isp_params_cfg {
+ __u32 version;
+ __u32 data_size;
+ __u8 data[C3_ISP_PARAMS_MAX_SIZE];
+};
+
+#endif
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 5fc753c23734..78a362b80027 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -39,18 +39,21 @@ enum perf_type_id {
/*
* attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE
+ *
* PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA
* AA: hardware event ID
* EEEEEEEE: PMU type ID
+ *
* PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB
* BB: hardware cache ID
* CC: hardware cache op ID
* DD: hardware cache op result ID
* EEEEEEEE: PMU type ID
- * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied.
+ *
+ * If the PMU type ID is 0, PERF_TYPE_RAW will be applied.
*/
-#define PERF_PMU_TYPE_SHIFT 32
-#define PERF_HW_EVENT_MASK 0xffffffff
+#define PERF_PMU_TYPE_SHIFT 32
+#define PERF_HW_EVENT_MASK 0xffffffff
/*
* Generalized performance event event_id types, used by the
@@ -112,7 +115,7 @@ enum perf_hw_cache_op_result_id {
/*
* Special "software" events provided by the kernel, even if the hardware
* does not support performance events. These events measure various
- * physical and sw events of the kernel (and allow the profiling of them as
+ * physical and SW events of the kernel (and allow the profiling of them as
* well):
*/
enum perf_sw_ids {
@@ -167,8 +170,9 @@ enum perf_event_sample_format {
};
#define PERF_SAMPLE_WEIGHT_TYPE (PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT)
+
/*
- * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
+ * Values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set.
*
* If the user does not pass priv level information via branch_sample_type,
* the kernel uses the event's priv level. Branch and event priv levels do
@@ -178,20 +182,20 @@ enum perf_event_sample_format {
* of branches and therefore it supersedes all the other types.
*/
enum perf_branch_sample_type_shift {
- PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */
- PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */
- PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */
-
- PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */
- PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */
- PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */
- PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */
- PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */
- PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */
- PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */
+ PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */
+ PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */
+ PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */
+
+ PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */
+ PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */
+ PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */
+ PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */
+ PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */
+ PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */
+ PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */
PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */
- PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
+ PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* CALL/RET stack */
PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */
PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */
@@ -210,96 +214,95 @@ enum perf_branch_sample_type_shift {
};
enum perf_branch_sample_type {
- PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
- PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
- PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
+ PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
+ PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
+ PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
- PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
- PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
- PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
- PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
- PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
- PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
- PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
- PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
+ PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
+ PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
+ PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
+ PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
+ PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
- PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
- PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
- PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,
+ PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
+ PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
+ PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,
- PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
- PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
+ PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
+ PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
- PERF_SAMPLE_BRANCH_TYPE_SAVE =
- 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,
+ PERF_SAMPLE_BRANCH_TYPE_SAVE = 1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,
- PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT,
+ PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT,
- PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT,
+ PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT,
- PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT,
+ PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT,
- PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
+ PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};
/*
- * Common flow change classification
+ * Common control flow change classifications:
*/
enum {
- PERF_BR_UNKNOWN = 0, /* unknown */
- PERF_BR_COND = 1, /* conditional */
- PERF_BR_UNCOND = 2, /* unconditional */
- PERF_BR_IND = 3, /* indirect */
- PERF_BR_CALL = 4, /* function call */
- PERF_BR_IND_CALL = 5, /* indirect function call */
- PERF_BR_RET = 6, /* function return */
- PERF_BR_SYSCALL = 7, /* syscall */
- PERF_BR_SYSRET = 8, /* syscall return */
- PERF_BR_COND_CALL = 9, /* conditional function call */
- PERF_BR_COND_RET = 10, /* conditional function return */
- PERF_BR_ERET = 11, /* exception return */
- PERF_BR_IRQ = 12, /* irq */
- PERF_BR_SERROR = 13, /* system error */
- PERF_BR_NO_TX = 14, /* not in transaction */
- PERF_BR_EXTEND_ABI = 15, /* extend ABI */
+ PERF_BR_UNKNOWN = 0, /* Unknown */
+ PERF_BR_COND = 1, /* Conditional */
+ PERF_BR_UNCOND = 2, /* Unconditional */
+ PERF_BR_IND = 3, /* Indirect */
+ PERF_BR_CALL = 4, /* Function call */
+ PERF_BR_IND_CALL = 5, /* Indirect function call */
+ PERF_BR_RET = 6, /* Function return */
+ PERF_BR_SYSCALL = 7, /* Syscall */
+ PERF_BR_SYSRET = 8, /* Syscall return */
+ PERF_BR_COND_CALL = 9, /* Conditional function call */
+ PERF_BR_COND_RET = 10, /* Conditional function return */
+ PERF_BR_ERET = 11, /* Exception return */
+ PERF_BR_IRQ = 12, /* IRQ */
+ PERF_BR_SERROR = 13, /* System error */
+ PERF_BR_NO_TX = 14, /* Not in transaction */
+ PERF_BR_EXTEND_ABI = 15, /* Extend ABI */
PERF_BR_MAX,
};
/*
- * Common branch speculation outcome classification
+ * Common branch speculation outcome classifications:
*/
enum {
- PERF_BR_SPEC_NA = 0, /* Not available */
- PERF_BR_SPEC_WRONG_PATH = 1, /* Speculative but on wrong path */
- PERF_BR_NON_SPEC_CORRECT_PATH = 2, /* Non-speculative but on correct path */
- PERF_BR_SPEC_CORRECT_PATH = 3, /* Speculative and on correct path */
+ PERF_BR_SPEC_NA = 0, /* Not available */
+ PERF_BR_SPEC_WRONG_PATH = 1, /* Speculative but on wrong path */
+ PERF_BR_NON_SPEC_CORRECT_PATH = 2, /* Non-speculative but on correct path */
+ PERF_BR_SPEC_CORRECT_PATH = 3, /* Speculative and on correct path */
PERF_BR_SPEC_MAX,
};
enum {
- PERF_BR_NEW_FAULT_ALGN = 0, /* Alignment fault */
- PERF_BR_NEW_FAULT_DATA = 1, /* Data fault */
- PERF_BR_NEW_FAULT_INST = 2, /* Inst fault */
- PERF_BR_NEW_ARCH_1 = 3, /* Architecture specific */
- PERF_BR_NEW_ARCH_2 = 4, /* Architecture specific */
- PERF_BR_NEW_ARCH_3 = 5, /* Architecture specific */
- PERF_BR_NEW_ARCH_4 = 6, /* Architecture specific */
- PERF_BR_NEW_ARCH_5 = 7, /* Architecture specific */
+ PERF_BR_NEW_FAULT_ALGN = 0, /* Alignment fault */
+ PERF_BR_NEW_FAULT_DATA = 1, /* Data fault */
+ PERF_BR_NEW_FAULT_INST = 2, /* Inst fault */
+ PERF_BR_NEW_ARCH_1 = 3, /* Architecture specific */
+ PERF_BR_NEW_ARCH_2 = 4, /* Architecture specific */
+ PERF_BR_NEW_ARCH_3 = 5, /* Architecture specific */
+ PERF_BR_NEW_ARCH_4 = 6, /* Architecture specific */
+ PERF_BR_NEW_ARCH_5 = 7, /* Architecture specific */
PERF_BR_NEW_MAX,
};
enum {
- PERF_BR_PRIV_UNKNOWN = 0,
- PERF_BR_PRIV_USER = 1,
- PERF_BR_PRIV_KERNEL = 2,
- PERF_BR_PRIV_HV = 3,
+ PERF_BR_PRIV_UNKNOWN = 0,
+ PERF_BR_PRIV_USER = 1,
+ PERF_BR_PRIV_KERNEL = 2,
+ PERF_BR_PRIV_HV = 3,
};
-#define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1
-#define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2
-#define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3
-#define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4
-#define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5
+#define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1
+#define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2
+#define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3
+#define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4
+#define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5
#define PERF_SAMPLE_BRANCH_PLM_ALL \
(PERF_SAMPLE_BRANCH_USER|\
@@ -310,9 +313,9 @@ enum {
* Values to determine ABI of the registers dump.
*/
enum perf_sample_regs_abi {
- PERF_SAMPLE_REGS_ABI_NONE = 0,
- PERF_SAMPLE_REGS_ABI_32 = 1,
- PERF_SAMPLE_REGS_ABI_64 = 2,
+ PERF_SAMPLE_REGS_ABI_NONE = 0,
+ PERF_SAMPLE_REGS_ABI_32 = 1,
+ PERF_SAMPLE_REGS_ABI_64 = 2,
};
/*
@@ -320,21 +323,21 @@ enum perf_sample_regs_abi {
* abort events. Multiple bits can be set.
*/
enum {
- PERF_TXN_ELISION = (1 << 0), /* From elision */
- PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */
- PERF_TXN_SYNC = (1 << 2), /* Instruction is related */
- PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */
- PERF_TXN_RETRY = (1 << 4), /* Retry possible */
- PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */
- PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
- PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */
+ PERF_TXN_ELISION = (1 << 0), /* From elision */
+ PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */
+ PERF_TXN_SYNC = (1 << 2), /* Instruction is related */
+ PERF_TXN_ASYNC = (1 << 3), /* Instruction is not related */
+ PERF_TXN_RETRY = (1 << 4), /* Retry possible */
+ PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */
+ PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
+ PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */
- PERF_TXN_MAX = (1 << 8), /* non-ABI */
+ PERF_TXN_MAX = (1 << 8), /* non-ABI */
- /* bits 32..63 are reserved for the abort code */
+ /* Bits 32..63 are reserved for the abort code */
- PERF_TXN_ABORT_MASK = (0xffffffffULL << 32),
- PERF_TXN_ABORT_SHIFT = 32,
+ PERF_TXN_ABORT_MASK = (0xffffffffULL << 32),
+ PERF_TXN_ABORT_SHIFT = 32,
};
/*
@@ -369,24 +372,22 @@ enum perf_event_read_format {
PERF_FORMAT_MAX = 1U << 5, /* non-ABI */
};
-#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
-#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
-#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
-#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
- /* add: sample_stack_user */
-#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
-#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
-#define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */
-#define PERF_ATTR_SIZE_VER7 128 /* add: sig_data */
-#define PERF_ATTR_SIZE_VER8 136 /* add: config3 */
+#define PERF_ATTR_SIZE_VER0 64 /* Size of first published 'struct perf_event_attr' */
+#define PERF_ATTR_SIZE_VER1 72 /* Add: config2 */
+#define PERF_ATTR_SIZE_VER2 80 /* Add: branch_sample_type */
+#define PERF_ATTR_SIZE_VER3 96 /* Add: sample_regs_user */
+ /* Add: sample_stack_user */
+#define PERF_ATTR_SIZE_VER4 104 /* Add: sample_regs_intr */
+#define PERF_ATTR_SIZE_VER5 112 /* Add: aux_watermark */
+#define PERF_ATTR_SIZE_VER6 120 /* Add: aux_sample_size */
+#define PERF_ATTR_SIZE_VER7 128 /* Add: sig_data */
+#define PERF_ATTR_SIZE_VER8 136 /* Add: config3 */
/*
- * Hardware event_id to monitor via a performance monitoring event:
- *
- * @sample_max_stack: Max number of frame pointers in a callchain,
- * should be < /proc/sys/kernel/perf_event_max_stack
- * Max number of entries of branch stack
- * should be < hardware limit
+ * 'struct perf_event_attr' contains various attributes that define
+ * a performance event - most of them hardware related configuration
+ * details, but also a lot of behavioral switches and values implemented
+ * by the kernel.
*/
struct perf_event_attr {
@@ -396,7 +397,7 @@ struct perf_event_attr {
__u32 type;
/*
- * Size of the attr structure, for fwd/bwd compat.
+ * Size of the attr structure, for forward/backwards compatibility.
*/
__u32 size;
@@ -451,21 +452,21 @@ struct perf_event_attr {
comm_exec : 1, /* flag comm events that are due to an exec */
use_clockid : 1, /* use @clockid for time fields */
context_switch : 1, /* context switch data */
- write_backward : 1, /* Write ring buffer from end to beginning */
+ write_backward : 1, /* write ring buffer from end to beginning */
namespaces : 1, /* include namespaces data */
ksymbol : 1, /* include ksymbol events */
- bpf_event : 1, /* include bpf events */
+ bpf_event : 1, /* include BPF events */
aux_output : 1, /* generate AUX records instead of events */
cgroup : 1, /* include cgroup events */
text_poke : 1, /* include text poke events */
- build_id : 1, /* use build id in mmap2 events */
+ build_id : 1, /* use build ID in mmap2 events */
inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */
remove_on_exec : 1, /* event is removed from task on exec */
sigtrap : 1, /* send synchronous SIGTRAP on event */
__reserved_1 : 26;
union {
- __u32 wakeup_events; /* wakeup every n events */
+ __u32 wakeup_events; /* wake up every n events */
__u32 wakeup_watermark; /* bytes before wakeup */
};
@@ -474,13 +475,13 @@ struct perf_event_attr {
__u64 bp_addr;
__u64 kprobe_func; /* for perf_kprobe */
__u64 uprobe_path; /* for perf_uprobe */
- __u64 config1; /* extension of config */
+ __u64 config1; /* extension of config */
};
union {
__u64 bp_len;
- __u64 kprobe_addr; /* when kprobe_func == NULL */
+ __u64 kprobe_addr; /* when kprobe_func == NULL */
__u64 probe_offset; /* for perf_[k,u]probe */
- __u64 config2; /* extension of config1 */
+ __u64 config2; /* extension of config1 */
};
__u64 branch_sample_type; /* enum perf_branch_sample_type */
@@ -510,7 +511,16 @@ struct perf_event_attr {
* Wakeup watermark for AUX area
*/
__u32 aux_watermark;
+
+ /*
+ * Max number of frame pointers in a callchain, should be
+ * lower than /proc/sys/kernel/perf_event_max_stack.
+ *
+ * Max number of entries of branch stack should be lower
+ * than the hardware limit.
+ */
__u16 sample_max_stack;
+
__u16 __reserved_2;
__u32 aux_sample_size;
@@ -537,7 +547,7 @@ struct perf_event_attr {
/*
* Structure used by below PERF_EVENT_IOC_QUERY_BPF command
- * to query bpf programs attached to the same perf tracepoint
+ * to query BPF programs attached to the same perf tracepoint
* as the given perf event.
*/
struct perf_event_query_bpf {
@@ -559,21 +569,21 @@ struct perf_event_query_bpf {
/*
* Ioctls that can be done on a perf event fd:
*/
-#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
-#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
-#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
-#define PERF_EVENT_IOC_RESET _IO ('$', 3)
-#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
-#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
-#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
-#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
-#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
-#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
+#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
+#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
+#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
+#define PERF_EVENT_IOC_RESET _IO ('$', 3)
+#define PERF_EVENT_IOC_PERIOD _IOW ('$', 4, __u64)
+#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
+#define PERF_EVENT_IOC_SET_FILTER _IOW ('$', 6, char *)
+#define PERF_EVENT_IOC_ID _IOR ('$', 7, __u64 *)
+#define PERF_EVENT_IOC_SET_BPF _IOW ('$', 8, __u32)
+#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW ('$', 9, __u32)
#define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *)
-#define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW('$', 11, struct perf_event_attr *)
+#define PERF_EVENT_IOC_MODIFY_ATTRIBUTES _IOW ('$', 11, struct perf_event_attr *)
enum perf_event_ioc_flags {
- PERF_IOC_FLAG_GROUP = 1U << 0,
+ PERF_IOC_FLAG_GROUP = 1U << 0,
};
/*
@@ -584,7 +594,7 @@ struct perf_event_mmap_page {
__u32 compat_version; /* lowest version this is compat with */
/*
- * Bits needed to read the hw events in user-space.
+ * Bits needed to read the HW events in user-space.
*
* u32 seq, time_mult, time_shift, index, width;
* u64 count, enabled, running;
@@ -622,7 +632,7 @@ struct perf_event_mmap_page {
__u32 index; /* hardware event identifier */
__s64 offset; /* add to hardware event value */
__u64 time_enabled; /* time event active */
- __u64 time_running; /* time event on cpu */
+ __u64 time_running; /* time event on CPU */
union {
__u64 capabilities;
struct {
@@ -650,7 +660,7 @@ struct perf_event_mmap_page {
/*
* If cap_usr_time the below fields can be used to compute the time
- * delta since time_enabled (in ns) using rdtsc or similar.
+ * delta since time_enabled (in ns) using RDTSC or similar.
*
* u64 quot, rem;
* u64 delta;
@@ -723,7 +733,7 @@ struct perf_event_mmap_page {
* after reading this value.
*
* When the mapping is PROT_WRITE the @data_tail value should be
- * written by userspace to reflect the last read data, after issueing
+ * written by user-space to reflect the last read data, after issuing
* an smp_mb() to separate the data read from the ->data_tail store.
* In this case the kernel will not over-write unread data.
*
@@ -739,7 +749,7 @@ struct perf_event_mmap_page {
/*
* AUX area is defined by aux_{offset,size} fields that should be set
- * by the userspace, so that
+ * by the user-space, so that
*
* aux_offset >= data_offset + data_size
*
@@ -813,7 +823,7 @@ struct perf_event_mmap_page {
* Indicates that thread was preempted in TASK_RUNNING state.
*
* PERF_RECORD_MISC_MMAP_BUILD_ID:
- * Indicates that mmap2 event carries build id data.
+ * Indicates that mmap2 event carries build ID data.
*/
#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14)
@@ -824,26 +834,26 @@ struct perf_event_mmap_page {
#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
struct perf_event_header {
- __u32 type;
- __u16 misc;
- __u16 size;
+ __u32 type;
+ __u16 misc;
+ __u16 size;
};
struct perf_ns_link_info {
- __u64 dev;
- __u64 ino;
+ __u64 dev;
+ __u64 ino;
};
enum {
- NET_NS_INDEX = 0,
- UTS_NS_INDEX = 1,
- IPC_NS_INDEX = 2,
- PID_NS_INDEX = 3,
- USER_NS_INDEX = 4,
- MNT_NS_INDEX = 5,
- CGROUP_NS_INDEX = 6,
-
- NR_NAMESPACES, /* number of available namespaces */
+ NET_NS_INDEX = 0,
+ UTS_NS_INDEX = 1,
+ IPC_NS_INDEX = 2,
+ PID_NS_INDEX = 3,
+ USER_NS_INDEX = 4,
+ MNT_NS_INDEX = 5,
+ CGROUP_NS_INDEX = 6,
+
+ NR_NAMESPACES, /* number of available namespaces */
};
enum perf_event_type {
@@ -859,11 +869,11 @@ enum perf_event_type {
* optional fields being ignored.
*
* struct sample_id {
- * { u32 pid, tid; } && PERF_SAMPLE_TID
- * { u64 time; } && PERF_SAMPLE_TIME
- * { u64 id; } && PERF_SAMPLE_ID
- * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
- * { u32 cpu, res; } && PERF_SAMPLE_CPU
+ * { u32 pid, tid; } && PERF_SAMPLE_TID
+ * { u64 time; } && PERF_SAMPLE_TIME
+ * { u64 id; } && PERF_SAMPLE_ID
+ * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
+ * { u32 cpu, res; } && PERF_SAMPLE_CPU
* { u64 id; } && PERF_SAMPLE_IDENTIFIER
* } && perf_event_attr::sample_id_all
*
@@ -874,7 +884,7 @@ enum perf_event_type {
/*
* The MMAP events record the PROT_EXEC mappings so that we can
- * correlate userspace IPs to code. They have the following structure:
+ * correlate user-space IPs to code. They have the following structure:
*
* struct {
* struct perf_event_header header;
@@ -884,7 +894,7 @@ enum perf_event_type {
* u64 len;
* u64 pgoff;
* char filename[];
- * struct sample_id sample_id;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_MMAP = 1,
@@ -894,7 +904,7 @@ enum perf_event_type {
* struct perf_event_header header;
* u64 id;
* u64 lost;
- * struct sample_id sample_id;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_LOST = 2,
@@ -905,7 +915,7 @@ enum perf_event_type {
*
* u32 pid, tid;
* char comm[];
- * struct sample_id sample_id;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_COMM = 3,
@@ -916,7 +926,7 @@ enum perf_event_type {
* u32 pid, ppid;
* u32 tid, ptid;
* u64 time;
- * struct sample_id sample_id;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_EXIT = 4,
@@ -927,7 +937,7 @@ enum perf_event_type {
* u64 time;
* u64 id;
* u64 stream_id;
- * struct sample_id sample_id;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_THROTTLE = 5,
@@ -939,7 +949,7 @@ enum perf_event_type {
* u32 pid, ppid;
* u32 tid, ptid;
* u64 time;
- * struct sample_id sample_id;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_FORK = 7,
@@ -950,7 +960,7 @@ enum perf_event_type {
* u32 pid, tid;
*
* struct read_format values;
- * struct sample_id sample_id;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_READ = 8,
@@ -1005,12 +1015,12 @@ enum perf_event_type {
* { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS
* } && PERF_SAMPLE_BRANCH_STACK
*
- * { u64 abi; # enum perf_sample_regs_abi
- * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
+ * { u64 abi; # enum perf_sample_regs_abi
+ * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
*
- * { u64 size;
- * char data[size];
- * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
+ * { u64 size;
+ * char data[size];
+ * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
*
* { union perf_sample_weight
* {
@@ -1035,10 +1045,11 @@ enum perf_event_type {
* { u64 abi; # enum perf_sample_regs_abi
* u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
* { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR
- * { u64 size;
- * char data[size]; } && PERF_SAMPLE_AUX
+ * { u64 cgroup;} && PERF_SAMPLE_CGROUP
* { u64 data_page_size;} && PERF_SAMPLE_DATA_PAGE_SIZE
* { u64 code_page_size;} && PERF_SAMPLE_CODE_PAGE_SIZE
+ * { u64 size;
+ * char data[size]; } && PERF_SAMPLE_AUX
* };
*/
PERF_RECORD_SAMPLE = 9,
@@ -1070,7 +1081,7 @@ enum perf_event_type {
* };
* u32 prot, flags;
* char filename[];
- * struct sample_id sample_id;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_MMAP2 = 10,
@@ -1079,12 +1090,12 @@ enum perf_event_type {
* Records that new data landed in the AUX buffer part.
*
* struct {
- * struct perf_event_header header;
+ * struct perf_event_header header;
*
- * u64 aux_offset;
- * u64 aux_size;
+ * u64 aux_offset;
+ * u64 aux_size;
* u64 flags;
- * struct sample_id sample_id;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_AUX = 11,
@@ -1167,7 +1178,7 @@ enum perf_event_type {
PERF_RECORD_KSYMBOL = 17,
/*
- * Record bpf events:
+ * Record BPF events:
* enum perf_bpf_event_type {
* PERF_BPF_EVENT_UNKNOWN = 0,
* PERF_BPF_EVENT_PROG_LOAD = 1,
@@ -1245,181 +1256,181 @@ enum perf_record_ksymbol_type {
#define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER (1 << 0)
enum perf_bpf_event_type {
- PERF_BPF_EVENT_UNKNOWN = 0,
- PERF_BPF_EVENT_PROG_LOAD = 1,
- PERF_BPF_EVENT_PROG_UNLOAD = 2,
- PERF_BPF_EVENT_MAX, /* non-ABI */
+ PERF_BPF_EVENT_UNKNOWN = 0,
+ PERF_BPF_EVENT_PROG_LOAD = 1,
+ PERF_BPF_EVENT_PROG_UNLOAD = 2,
+ PERF_BPF_EVENT_MAX, /* non-ABI */
};
-#define PERF_MAX_STACK_DEPTH 127
-#define PERF_MAX_CONTEXTS_PER_STACK 8
+#define PERF_MAX_STACK_DEPTH 127
+#define PERF_MAX_CONTEXTS_PER_STACK 8
enum perf_callchain_context {
- PERF_CONTEXT_HV = (__u64)-32,
- PERF_CONTEXT_KERNEL = (__u64)-128,
- PERF_CONTEXT_USER = (__u64)-512,
+ PERF_CONTEXT_HV = (__u64)-32,
+ PERF_CONTEXT_KERNEL = (__u64)-128,
+ PERF_CONTEXT_USER = (__u64)-512,
- PERF_CONTEXT_GUEST = (__u64)-2048,
- PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
- PERF_CONTEXT_GUEST_USER = (__u64)-2560,
+ PERF_CONTEXT_GUEST = (__u64)-2048,
+ PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
+ PERF_CONTEXT_GUEST_USER = (__u64)-2560,
- PERF_CONTEXT_MAX = (__u64)-4095,
+ PERF_CONTEXT_MAX = (__u64)-4095,
};
/**
* PERF_RECORD_AUX::flags bits
*/
-#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
-#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
-#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
-#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
+#define PERF_AUX_FLAG_TRUNCATED 0x0001 /* Record was truncated to fit */
+#define PERF_AUX_FLAG_OVERWRITE 0x0002 /* Snapshot from overwrite mode */
+#define PERF_AUX_FLAG_PARTIAL 0x0004 /* Record contains gaps */
+#define PERF_AUX_FLAG_COLLISION 0x0008 /* Sample collided with another */
#define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK 0xff00 /* PMU specific trace format type */
/* CoreSight PMU AUX buffer formats */
-#define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */
-#define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */
+#define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */
+#define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */
-#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
-#define PERF_FLAG_FD_OUTPUT (1UL << 1)
-#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
-#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
+#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
+#define PERF_FLAG_FD_OUTPUT (1UL << 1)
+#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup ID, per-CPU mode only */
+#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
#if defined(__LITTLE_ENDIAN_BITFIELD)
union perf_mem_data_src {
__u64 val;
struct {
- __u64 mem_op:5, /* type of opcode */
- mem_lvl:14, /* memory hierarchy level */
- mem_snoop:5, /* snoop mode */
- mem_lock:2, /* lock instr */
- mem_dtlb:7, /* tlb access */
- mem_lvl_num:4, /* memory hierarchy level number */
- mem_remote:1, /* remote */
- mem_snoopx:2, /* snoop mode, ext */
- mem_blk:3, /* access blocked */
- mem_hops:3, /* hop level */
- mem_rsvd:18;
+ __u64 mem_op : 5, /* Type of opcode */
+ mem_lvl : 14, /* Memory hierarchy level */
+ mem_snoop : 5, /* Snoop mode */
+ mem_lock : 2, /* Lock instr */
+ mem_dtlb : 7, /* TLB access */
+ mem_lvl_num : 4, /* Memory hierarchy level number */
+ mem_remote : 1, /* Remote */
+ mem_snoopx : 2, /* Snoop mode, ext */
+ mem_blk : 3, /* Access blocked */
+ mem_hops : 3, /* Hop level */
+ mem_rsvd : 18;
};
};
#elif defined(__BIG_ENDIAN_BITFIELD)
union perf_mem_data_src {
__u64 val;
struct {
- __u64 mem_rsvd:18,
- mem_hops:3, /* hop level */
- mem_blk:3, /* access blocked */
- mem_snoopx:2, /* snoop mode, ext */
- mem_remote:1, /* remote */
- mem_lvl_num:4, /* memory hierarchy level number */
- mem_dtlb:7, /* tlb access */
- mem_lock:2, /* lock instr */
- mem_snoop:5, /* snoop mode */
- mem_lvl:14, /* memory hierarchy level */
- mem_op:5; /* type of opcode */
+ __u64 mem_rsvd : 18,
+ mem_hops : 3, /* Hop level */
+ mem_blk : 3, /* Access blocked */
+ mem_snoopx : 2, /* Snoop mode, ext */
+ mem_remote : 1, /* Remote */
+ mem_lvl_num : 4, /* Memory hierarchy level number */
+ mem_dtlb : 7, /* TLB access */
+ mem_lock : 2, /* Lock instr */
+ mem_snoop : 5, /* Snoop mode */
+ mem_lvl : 14, /* Memory hierarchy level */
+ mem_op : 5; /* Type of opcode */
};
};
#else
-#error "Unknown endianness"
+# error "Unknown endianness"
#endif
-/* type of opcode (load/store/prefetch,code) */
-#define PERF_MEM_OP_NA 0x01 /* not available */
-#define PERF_MEM_OP_LOAD 0x02 /* load instruction */
-#define PERF_MEM_OP_STORE 0x04 /* store instruction */
-#define PERF_MEM_OP_PFETCH 0x08 /* prefetch */
-#define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
-#define PERF_MEM_OP_SHIFT 0
+/* Type of memory opcode: */
+#define PERF_MEM_OP_NA 0x0001 /* Not available */
+#define PERF_MEM_OP_LOAD 0x0002 /* Load instruction */
+#define PERF_MEM_OP_STORE 0x0004 /* Store instruction */
+#define PERF_MEM_OP_PFETCH 0x0008 /* Prefetch */
+#define PERF_MEM_OP_EXEC 0x0010 /* Code (execution) */
+#define PERF_MEM_OP_SHIFT 0
/*
- * PERF_MEM_LVL_* namespace being depricated to some extent in the
+ * The PERF_MEM_LVL_* namespace is being deprecated to some extent in
* favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields.
- * Supporting this namespace inorder to not break defined ABIs.
+ * We support this namespace in order to not break defined ABIs.
*
- * memory hierarchy (memory level, hit or miss)
+ * Memory hierarchy (memory level, hit or miss)
*/
-#define PERF_MEM_LVL_NA 0x01 /* not available */
-#define PERF_MEM_LVL_HIT 0x02 /* hit level */
-#define PERF_MEM_LVL_MISS 0x04 /* miss level */
-#define PERF_MEM_LVL_L1 0x08 /* L1 */
-#define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */
-#define PERF_MEM_LVL_L2 0x20 /* L2 */
-#define PERF_MEM_LVL_L3 0x40 /* L3 */
-#define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */
-#define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */
-#define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */
-#define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */
-#define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */
-#define PERF_MEM_LVL_IO 0x1000 /* I/O memory */
-#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */
-#define PERF_MEM_LVL_SHIFT 5
-
-#define PERF_MEM_REMOTE_REMOTE 0x01 /* Remote */
-#define PERF_MEM_REMOTE_SHIFT 37
-
-#define PERF_MEM_LVLNUM_L1 0x01 /* L1 */
-#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
-#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
-#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
-#define PERF_MEM_LVLNUM_L2_MHB 0x05 /* L2 Miss Handling Buffer */
-#define PERF_MEM_LVLNUM_MSC 0x06 /* Memory-side Cache */
-/* 0x7 available */
-#define PERF_MEM_LVLNUM_UNC 0x08 /* Uncached */
-#define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */
-#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */
-#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
-#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB / L1 Miss Handling Buffer */
-#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */
-#define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */
-#define PERF_MEM_LVLNUM_NA 0x0f /* N/A */
-
-#define PERF_MEM_LVLNUM_SHIFT 33
-
-/* snoop mode */
-#define PERF_MEM_SNOOP_NA 0x01 /* not available */
-#define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */
-#define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */
-#define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */
-#define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */
-#define PERF_MEM_SNOOP_SHIFT 19
-
-#define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
-#define PERF_MEM_SNOOPX_PEER 0x02 /* xfer from peer */
-#define PERF_MEM_SNOOPX_SHIFT 38
-
-/* locked instruction */
-#define PERF_MEM_LOCK_NA 0x01 /* not available */
-#define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */
-#define PERF_MEM_LOCK_SHIFT 24
+#define PERF_MEM_LVL_NA 0x0001 /* Not available */
+#define PERF_MEM_LVL_HIT 0x0002 /* Hit level */
+#define PERF_MEM_LVL_MISS 0x0004 /* Miss level */
+#define PERF_MEM_LVL_L1 0x0008 /* L1 */
+#define PERF_MEM_LVL_LFB 0x0010 /* Line Fill Buffer */
+#define PERF_MEM_LVL_L2 0x0020 /* L2 */
+#define PERF_MEM_LVL_L3 0x0040 /* L3 */
+#define PERF_MEM_LVL_LOC_RAM 0x0080 /* Local DRAM */
+#define PERF_MEM_LVL_REM_RAM1 0x0100 /* Remote DRAM (1 hop) */
+#define PERF_MEM_LVL_REM_RAM2 0x0200 /* Remote DRAM (2 hops) */
+#define PERF_MEM_LVL_REM_CCE1 0x0400 /* Remote Cache (1 hop) */
+#define PERF_MEM_LVL_REM_CCE2 0x0800 /* Remote Cache (2 hops) */
+#define PERF_MEM_LVL_IO 0x1000 /* I/O memory */
+#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */
+#define PERF_MEM_LVL_SHIFT 5
+
+#define PERF_MEM_REMOTE_REMOTE 0x0001 /* Remote */
+#define PERF_MEM_REMOTE_SHIFT 37
+
+#define PERF_MEM_LVLNUM_L1 0x0001 /* L1 */
+#define PERF_MEM_LVLNUM_L2 0x0002 /* L2 */
+#define PERF_MEM_LVLNUM_L3 0x0003 /* L3 */
+#define PERF_MEM_LVLNUM_L4 0x0004 /* L4 */
+#define PERF_MEM_LVLNUM_L2_MHB 0x0005 /* L2 Miss Handling Buffer */
+#define PERF_MEM_LVLNUM_MSC 0x0006 /* Memory-side Cache */
+/* 0x007 available */
+#define PERF_MEM_LVLNUM_UNC 0x0008 /* Uncached */
+#define PERF_MEM_LVLNUM_CXL 0x0009 /* CXL */
+#define PERF_MEM_LVLNUM_IO 0x000a /* I/O */
+#define PERF_MEM_LVLNUM_ANY_CACHE 0x000b /* Any cache */
+#define PERF_MEM_LVLNUM_LFB 0x000c /* LFB / L1 Miss Handling Buffer */
+#define PERF_MEM_LVLNUM_RAM 0x000d /* RAM */
+#define PERF_MEM_LVLNUM_PMEM 0x000e /* PMEM */
+#define PERF_MEM_LVLNUM_NA 0x000f /* N/A */
+
+#define PERF_MEM_LVLNUM_SHIFT 33
+
+/* Snoop mode */
+#define PERF_MEM_SNOOP_NA 0x0001 /* Not available */
+#define PERF_MEM_SNOOP_NONE 0x0002 /* No snoop */
+#define PERF_MEM_SNOOP_HIT 0x0004 /* Snoop hit */
+#define PERF_MEM_SNOOP_MISS 0x0008 /* Snoop miss */
+#define PERF_MEM_SNOOP_HITM 0x0010 /* Snoop hit modified */
+#define PERF_MEM_SNOOP_SHIFT 19
+
+#define PERF_MEM_SNOOPX_FWD 0x0001 /* Forward */
+#define PERF_MEM_SNOOPX_PEER 0x0002 /* Transfer from peer */
+#define PERF_MEM_SNOOPX_SHIFT 38
+
+/* Locked instruction */
+#define PERF_MEM_LOCK_NA 0x0001 /* Not available */
+#define PERF_MEM_LOCK_LOCKED 0x0002 /* Locked transaction */
+#define PERF_MEM_LOCK_SHIFT 24
/* TLB access */
-#define PERF_MEM_TLB_NA 0x01 /* not available */
-#define PERF_MEM_TLB_HIT 0x02 /* hit level */
-#define PERF_MEM_TLB_MISS 0x04 /* miss level */
-#define PERF_MEM_TLB_L1 0x08 /* L1 */
-#define PERF_MEM_TLB_L2 0x10 /* L2 */
-#define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/
-#define PERF_MEM_TLB_OS 0x40 /* OS fault handler */
-#define PERF_MEM_TLB_SHIFT 26
+#define PERF_MEM_TLB_NA 0x0001 /* Not available */
+#define PERF_MEM_TLB_HIT 0x0002 /* Hit level */
+#define PERF_MEM_TLB_MISS 0x0004 /* Miss level */
+#define PERF_MEM_TLB_L1 0x0008 /* L1 */
+#define PERF_MEM_TLB_L2 0x0010 /* L2 */
+#define PERF_MEM_TLB_WK 0x0020 /* Hardware Walker*/
+#define PERF_MEM_TLB_OS 0x0040 /* OS fault handler */
+#define PERF_MEM_TLB_SHIFT 26
/* Access blocked */
-#define PERF_MEM_BLK_NA 0x01 /* not available */
-#define PERF_MEM_BLK_DATA 0x02 /* data could not be forwarded */
-#define PERF_MEM_BLK_ADDR 0x04 /* address conflict */
-#define PERF_MEM_BLK_SHIFT 40
-
-/* hop level */
-#define PERF_MEM_HOPS_0 0x01 /* remote core, same node */
-#define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */
-#define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */
-#define PERF_MEM_HOPS_3 0x04 /* remote board */
+#define PERF_MEM_BLK_NA 0x0001 /* Not available */
+#define PERF_MEM_BLK_DATA 0x0002 /* Data could not be forwarded */
+#define PERF_MEM_BLK_ADDR 0x0004 /* Address conflict */
+#define PERF_MEM_BLK_SHIFT 40
+
+/* Hop level */
+#define PERF_MEM_HOPS_0 0x0001 /* Remote core, same node */
+#define PERF_MEM_HOPS_1 0x0002 /* Remote node, same socket */
+#define PERF_MEM_HOPS_2 0x0003 /* Remote socket, same board */
+#define PERF_MEM_HOPS_3 0x0004 /* Remote board */
/* 5-7 available */
-#define PERF_MEM_HOPS_SHIFT 43
+#define PERF_MEM_HOPS_SHIFT 43
#define PERF_MEM_S(a, s) \
(((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
/*
- * single taken branch record layout:
+ * Layout of single taken branch records:
*
* from: source instruction (may not always be a branch insn)
* to: branch target
@@ -1438,37 +1449,37 @@ union perf_mem_data_src {
struct perf_branch_entry {
__u64 from;
__u64 to;
- __u64 mispred:1, /* target mispredicted */
- predicted:1,/* target predicted */
- in_tx:1, /* in transaction */
- abort:1, /* transaction abort */
- cycles:16, /* cycle count to last branch */
- type:4, /* branch type */
- spec:2, /* branch speculation info */
- new_type:4, /* additional branch type */
- priv:3, /* privilege level */
- reserved:31;
+ __u64 mispred : 1, /* target mispredicted */
+ predicted : 1, /* target predicted */
+ in_tx : 1, /* in transaction */
+ abort : 1, /* transaction abort */
+ cycles : 16, /* cycle count to last branch */
+ type : 4, /* branch type */
+ spec : 2, /* branch speculation info */
+ new_type : 4, /* additional branch type */
+ priv : 3, /* privilege level */
+ reserved : 31;
};
/* Size of used info bits in struct perf_branch_entry */
#define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33
union perf_sample_weight {
- __u64 full;
+ __u64 full;
#if defined(__LITTLE_ENDIAN_BITFIELD)
struct {
- __u32 var1_dw;
- __u16 var2_w;
- __u16 var3_w;
+ __u32 var1_dw;
+ __u16 var2_w;
+ __u16 var3_w;
};
#elif defined(__BIG_ENDIAN_BITFIELD)
struct {
- __u16 var3_w;
- __u16 var2_w;
- __u32 var1_dw;
+ __u16 var3_w;
+ __u16 var2_w;
+ __u32 var1_dw;
};
#else
-#error "Unknown endianness"
+# error "Unknown endianness"
#endif
};
diff --git a/include/uapi/linux/pidfd.h b/include/uapi/linux/pidfd.h
index 2970ef44655a..c27a4e238e4b 100644
--- a/include/uapi/linux/pidfd.h
+++ b/include/uapi/linux/pidfd.h
@@ -12,7 +12,7 @@
#define PIDFD_THREAD O_EXCL
#ifdef __KERNEL__
#include <linux/sched.h>
-#define PIDFD_CLONE CLONE_PIDFD
+#define PIDFD_STALE CLONE_PIDFD
#endif
/* Flags for pidfd_send_signal(). */
@@ -25,10 +25,24 @@
#define PIDFD_INFO_CREDS (1UL << 1) /* Always returned, even if not requested */
#define PIDFD_INFO_CGROUPID (1UL << 2) /* Always returned if available, even if not requested */
#define PIDFD_INFO_EXIT (1UL << 3) /* Only returned if requested. */
+#define PIDFD_INFO_COREDUMP (1UL << 4) /* Only returned if requested. */
#define PIDFD_INFO_SIZE_VER0 64 /* sizeof first published struct */
/*
+ * Values for @coredump_mask in pidfd_info.
+ * Only valid if PIDFD_INFO_COREDUMP is set in @mask.
+ *
+ * Note, the @PIDFD_COREDUMP_ROOT flag indicates that the generated
+ * coredump should be treated as sensitive and access should only be
+ * granted to privileged users.
+ */
+#define PIDFD_COREDUMPED (1U << 0) /* Did crash and... */
+#define PIDFD_COREDUMP_SKIP (1U << 1) /* coredumping generation was skipped. */
+#define PIDFD_COREDUMP_USER (1U << 2) /* coredump was done as the user. */
+#define PIDFD_COREDUMP_ROOT (1U << 3) /* coredump was done as root. */
+
+/*
* The concept of process and threads in userland and the kernel is a confusing
* one - within the kernel every thread is a 'task' with its own individual PID,
* however from userland's point of view threads are grouped by a single PID,
@@ -92,6 +106,8 @@ struct pidfd_info {
__u32 fsuid;
__u32 fsgid;
__s32 exit_code;
+ __u32 coredump_mask;
+ __u32 __spare1;
};
#define PIDFS_IOCTL_MAGIC 0xFF
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 15c18ef4eb11..43dec6eed559 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -364,4 +364,11 @@ struct prctl_mm_map {
# define PR_TIMER_CREATE_RESTORE_IDS_ON 1
# define PR_TIMER_CREATE_RESTORE_IDS_GET 2
+/* FUTEX hash management */
+#define PR_FUTEX_HASH 78
+# define PR_FUTEX_HASH_SET_SLOTS 1
+# define FH_FLAG_IMMUTABLE (1ULL << 0)
+# define PR_FUTEX_HASH_GET_SLOTS 2
+# define PR_FUTEX_HASH_GET_IMMUTABLE 3
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index f78ee3670dd5..1686861aae20 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -182,8 +182,12 @@ struct statx {
/* File offset alignment for direct I/O reads */
__u32 stx_dio_read_offset_align;
- /* 0xb8 */
- __u64 __spare3[9]; /* Spare space for future expansion */
+ /* Optimised max atomic write unit in bytes */
+ __u32 stx_atomic_write_unit_max_opt;
+ __u32 __spare2[1];
+
+ /* 0xc0 */
+ __u64 __spare3[8]; /* Spare space for future expansion */
/* 0x100 */
};
diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h
index 95762232e018..5929030d4e8b 100644
--- a/include/uapi/linux/taskstats.h
+++ b/include/uapi/linux/taskstats.h
@@ -34,7 +34,7 @@
*/
-#define TASKSTATS_VERSION 15
+#define TASKSTATS_VERSION 16
#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
* in linux/sched.h */
@@ -72,8 +72,6 @@ struct taskstats {
*/
__u64 cpu_count __attribute__((aligned(8)));
__u64 cpu_delay_total;
- __u64 cpu_delay_max;
- __u64 cpu_delay_min;
/* Following four fields atomically updated using task->delays->lock */
@@ -82,14 +80,10 @@ struct taskstats {
*/
__u64 blkio_count;
__u64 blkio_delay_total;
- __u64 blkio_delay_max;
- __u64 blkio_delay_min;
/* Delay waiting for page fault I/O (swap in only) */
__u64 swapin_count;
__u64 swapin_delay_total;
- __u64 swapin_delay_max;
- __u64 swapin_delay_min;
/* cpu "wall-clock" running time
* On some architectures, value will adjust for cpu time stolen
@@ -172,14 +166,11 @@ struct taskstats {
/* Delay waiting for memory reclaim */
__u64 freepages_count;
__u64 freepages_delay_total;
- __u64 freepages_delay_max;
- __u64 freepages_delay_min;
+
/* Delay waiting for thrashing page */
__u64 thrashing_count;
__u64 thrashing_delay_total;
- __u64 thrashing_delay_max;
- __u64 thrashing_delay_min;
/* v10: 64-bit btime to avoid overflow */
__u64 ac_btime64; /* 64-bit begin time */
@@ -187,8 +178,6 @@ struct taskstats {
/* v11: Delay waiting for memory compact */
__u64 compact_count;
__u64 compact_delay_total;
- __u64 compact_delay_max;
- __u64 compact_delay_min;
/* v12 begin */
__u32 ac_tgid; /* thread group ID */
@@ -210,15 +199,37 @@ struct taskstats {
/* v13: Delay waiting for write-protect copy */
__u64 wpcopy_count;
__u64 wpcopy_delay_total;
- __u64 wpcopy_delay_max;
- __u64 wpcopy_delay_min;
/* v14: Delay waiting for IRQ/SOFTIRQ */
__u64 irq_count;
__u64 irq_delay_total;
- __u64 irq_delay_max;
- __u64 irq_delay_min;
- /* v15: add Delay max */
+
+ /* v15: add Delay max and Delay min */
+
+ /* v16: move Delay max and Delay min to the end of taskstat */
+ __u64 cpu_delay_max;
+ __u64 cpu_delay_min;
+
+ __u64 blkio_delay_max;
+ __u64 blkio_delay_min;
+
+ __u64 swapin_delay_max;
+ __u64 swapin_delay_min;
+
+ __u64 freepages_delay_max;
+ __u64 freepages_delay_min;
+
+ __u64 thrashing_delay_max;
+ __u64 thrashing_delay_min;
+
+ __u64 compact_delay_max;
+ __u64 compact_delay_min;
+
+ __u64 wpcopy_delay_max;
+ __u64 wpcopy_delay_min;
+
+ __u64 irq_delay_max;
+ __u64 irq_delay_min;
};
diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h
index 583b86681c93..56c7e3fc666f 100644
--- a/include/uapi/linux/ublk_cmd.h
+++ b/include/uapi/linux/ublk_cmd.h
@@ -51,6 +51,10 @@
_IOR('u', 0x13, struct ublksrv_ctrl_cmd)
#define UBLK_U_CMD_DEL_DEV_ASYNC \
_IOR('u', 0x14, struct ublksrv_ctrl_cmd)
+#define UBLK_U_CMD_UPDATE_SIZE \
+ _IOWR('u', 0x15, struct ublksrv_ctrl_cmd)
+#define UBLK_U_CMD_QUIESCE_DEV \
+ _IOWR('u', 0x16, struct ublksrv_ctrl_cmd)
/*
* 64bits are enough now, and it should be easy to extend in case of
@@ -211,6 +215,63 @@
*/
#define UBLK_F_USER_RECOVERY_FAIL_IO (1ULL << 9)
+/*
+ * Resizing a block device is possible with UBLK_U_CMD_UPDATE_SIZE
+ * New size is passed in cmd->data[0] and is in units of sectors
+ */
+#define UBLK_F_UPDATE_SIZE (1ULL << 10)
+
+/*
+ * request buffer is registered automatically to uring_cmd's io_uring
+ * context before delivering this io command to ublk server, meantime
+ * it is un-registered automatically when completing this io command.
+ *
+ * For using this feature:
+ *
+ * - ublk server has to create sparse buffer table on the same `io_ring_ctx`
+ * for issuing `UBLK_IO_FETCH_REQ` and `UBLK_IO_COMMIT_AND_FETCH_REQ`.
+ * If uring_cmd isn't issued on same `io_ring_ctx`, it is ublk server's
+ * responsibility to unregister the buffer by issuing `IO_UNREGISTER_IO_BUF`
+ * manually, otherwise this ublk request won't complete.
+ *
+ * - ublk server passes auto buf register data via uring_cmd's sqe->addr,
+ * `struct ublk_auto_buf_reg` is populated from sqe->addr, please see
+ * the definition of ublk_sqe_addr_to_auto_buf_reg()
+ *
+ * - pass buffer index from `ublk_auto_buf_reg.index`
+ *
+ * - all reserved fields in `ublk_auto_buf_reg` need to be zeroed
+ *
+ * - pass flags from `ublk_auto_buf_reg.flags` if needed
+ *
+ * This way avoids extra cost from two uring_cmd, but also simplifies backend
+ * implementation, such as, the dependency on IO_REGISTER_IO_BUF and
+ * IO_UNREGISTER_IO_BUF becomes not necessary.
+ *
+ * If wrong data or flags are provided, both IO_FETCH_REQ and
+ * IO_COMMIT_AND_FETCH_REQ are failed, for the latter, the ublk IO request
+ * won't be completed until new IO_COMMIT_AND_FETCH_REQ command is issued
+ * successfully
+ */
+#define UBLK_F_AUTO_BUF_REG (1ULL << 11)
+
+/*
+ * Control command `UBLK_U_CMD_QUIESCE_DEV` is added for quiescing device,
+ * which state can be transitioned to `UBLK_S_DEV_QUIESCED` or
+ * `UBLK_S_DEV_FAIL_IO` finally, and it needs ublk server cooperation for
+ * handling `UBLK_IO_RES_ABORT` correctly.
+ *
+ * Typical use case is for supporting to upgrade ublk server application,
+ * meantime keep ublk block device persistent during the period.
+ *
+ * This feature is only available when UBLK_F_USER_RECOVERY is enabled.
+ *
+ * Note, this command returns -EBUSY in case that all IO commands are being
+ * handled by ublk server and not completed in specified time period which
+ * is passed from the control command parameter.
+ */
+#define UBLK_F_QUIESCE (1ULL << 12)
+
/* device state */
#define UBLK_S_DEV_DEAD 0
#define UBLK_S_DEV_LIVE 1
@@ -297,6 +358,17 @@ struct ublksrv_ctrl_dev_info {
#define UBLK_IO_F_FUA (1U << 13)
#define UBLK_IO_F_NOUNMAP (1U << 15)
#define UBLK_IO_F_SWAP (1U << 16)
+/*
+ * For UBLK_F_AUTO_BUF_REG & UBLK_AUTO_BUF_REG_FALLBACK only.
+ *
+ * This flag is set if auto buffer register is failed & ublk server passes
+ * UBLK_AUTO_BUF_REG_FALLBACK, and ublk server need to register buffer
+ * manually for handling the delivered IO command if this flag is observed
+ *
+ * ublk server has to check this flag if UBLK_AUTO_BUF_REG_FALLBACK is
+ * passed in.
+ */
+#define UBLK_IO_F_NEED_REG_BUF (1U << 17)
/*
* io cmd is described by this structure, and stored in share memory, indexed
@@ -331,6 +403,62 @@ static inline __u32 ublksrv_get_flags(const struct ublksrv_io_desc *iod)
return iod->op_flags >> 8;
}
+/*
+ * If this flag is set, fallback by completing the uring_cmd and setting
+ * `UBLK_IO_F_NEED_REG_BUF` in case of auto-buf-register failure;
+ * otherwise the client ublk request is failed silently
+ *
+ * If ublk server passes this flag, it has to check if UBLK_IO_F_NEED_REG_BUF
+ * is set in `ublksrv_io_desc.op_flags`. If UBLK_IO_F_NEED_REG_BUF is set,
+ * ublk server needs to register io buffer manually for handling IO command.
+ */
+#define UBLK_AUTO_BUF_REG_FALLBACK (1 << 0)
+#define UBLK_AUTO_BUF_REG_F_MASK UBLK_AUTO_BUF_REG_FALLBACK
+
+struct ublk_auto_buf_reg {
+ /* index for registering the delivered request buffer */
+ __u16 index;
+ __u8 flags;
+ __u8 reserved0;
+
+ /*
+ * io_ring FD can be passed via the reserve field in future for
+ * supporting to register io buffer to external io_uring
+ */
+ __u32 reserved1;
+};
+
+/*
+ * For UBLK_F_AUTO_BUF_REG, auto buffer register data is carried via
+ * uring_cmd's sqe->addr:
+ *
+ * - bit0 ~ bit15: buffer index
+ * - bit16 ~ bit23: flags
+ * - bit24 ~ bit31: reserved0
+ * - bit32 ~ bit63: reserved1
+ */
+static inline struct ublk_auto_buf_reg ublk_sqe_addr_to_auto_buf_reg(
+ __u64 sqe_addr)
+{
+ struct ublk_auto_buf_reg reg = {
+ .index = sqe_addr & 0xffff,
+ .flags = (sqe_addr >> 16) & 0xff,
+ .reserved0 = (sqe_addr >> 24) & 0xff,
+ .reserved1 = sqe_addr >> 32,
+ };
+
+ return reg;
+}
+
+static inline __u64
+ublk_auto_buf_reg_to_sqe_addr(const struct ublk_auto_buf_reg *buf)
+{
+ __u64 addr = buf->index | (__u64)buf->flags << 16 | (__u64)buf->reserved0 << 24 |
+ (__u64)buf->reserved1 << 32;
+
+ return addr;
+}
+
/* issued to ublk driver via /dev/ublkcN */
struct ublksrv_io_cmd {
__u16 q_id;
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index c8cb2796130f..9e3b366d5fc7 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -153,10 +153,18 @@ enum v4l2_buf_type {
V4L2_BUF_TYPE_SDR_OUTPUT = 12,
V4L2_BUF_TYPE_META_CAPTURE = 13,
V4L2_BUF_TYPE_META_OUTPUT = 14,
+ /*
+ * Note: V4L2_TYPE_IS_VALID and V4L2_TYPE_IS_OUTPUT must
+ * be updated if a new type is added.
+ */
/* Deprecated, do not use */
V4L2_BUF_TYPE_PRIVATE = 0x80,
};
+#define V4L2_TYPE_IS_VALID(type) \
+ ((type) >= V4L2_BUF_TYPE_VIDEO_CAPTURE &&\
+ (type) <= V4L2_BUF_TYPE_META_OUTPUT)
+
#define V4L2_TYPE_IS_MULTIPLANAR(type) \
((type) == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE \
|| (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
@@ -164,14 +172,14 @@ enum v4l2_buf_type {
#define V4L2_TYPE_IS_OUTPUT(type) \
((type) == V4L2_BUF_TYPE_VIDEO_OUTPUT \
|| (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE \
- || (type) == V4L2_BUF_TYPE_VIDEO_OVERLAY \
|| (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY \
|| (type) == V4L2_BUF_TYPE_VBI_OUTPUT \
|| (type) == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT \
|| (type) == V4L2_BUF_TYPE_SDR_OUTPUT \
|| (type) == V4L2_BUF_TYPE_META_OUTPUT)
-#define V4L2_TYPE_IS_CAPTURE(type) (!V4L2_TYPE_IS_OUTPUT(type))
+#define V4L2_TYPE_IS_CAPTURE(type) \
+ (V4L2_TYPE_IS_VALID(type) && !V4L2_TYPE_IS_OUTPUT(type))
enum v4l2_tuner_type {
V4L2_TUNER_RADIO = 1,
@@ -643,8 +651,10 @@ struct v4l2_pix_format {
/* two planes -- one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */
#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */
+#define V4L2_PIX_FMT_NV15 v4l2_fourcc('N', 'V', '1', '5') /* 15 Y/CbCr 4:2:0 10-bit packed */
#define V4L2_PIX_FMT_NV16 v4l2_fourcc('N', 'V', '1', '6') /* 16 Y/CbCr 4:2:2 */
#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */
+#define V4L2_PIX_FMT_NV20 v4l2_fourcc('N', 'V', '2', '0') /* 20 Y/CbCr 4:2:2 10-bit packed */
#define V4L2_PIX_FMT_NV24 v4l2_fourcc('N', 'V', '2', '4') /* 24 Y/CbCr 4:4:4 */
#define V4L2_PIX_FMT_NV42 v4l2_fourcc('N', 'V', '4', '2') /* 24 Y/CrCb 4:4:4 */
#define V4L2_PIX_FMT_P010 v4l2_fourcc('P', '0', '1', '0') /* 24 Y/CbCr 4:2:0 10-bit per component */
@@ -858,6 +868,10 @@ struct v4l2_pix_format {
#define V4L2_META_FMT_RK_ISP1_STAT_3A v4l2_fourcc('R', 'K', '1', 'S') /* Rockchip ISP1 3A Statistics */
#define V4L2_META_FMT_RK_ISP1_EXT_PARAMS v4l2_fourcc('R', 'K', '1', 'E') /* Rockchip ISP1 3a Extensible Parameters */
+/* Vendor specific - used for C3_ISP */
+#define V4L2_META_FMT_C3ISP_PARAMS v4l2_fourcc('C', '3', 'P', 'M') /* Amlogic C3 ISP Parameters */
+#define V4L2_META_FMT_C3ISP_STATS v4l2_fourcc('C', '3', 'S', 'T') /* Amlogic C3 ISP Statistics */
+
/* Vendor specific - used for RaspberryPi PiSP */
#define V4L2_META_FMT_RPI_BE_CFG v4l2_fourcc('R', 'P', 'B', 'C') /* PiSP BE configuration */
#define V4L2_META_FMT_RPI_FE_CFG v4l2_fourcc('R', 'P', 'F', 'C') /* PiSP FE configuration */
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index bf2c9cabd207..be109777d10d 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -309,8 +309,9 @@ struct virtio_gpu_cmd_submit {
#define VIRTIO_GPU_CAPSET_VIRGL 1
#define VIRTIO_GPU_CAPSET_VIRGL2 2
-/* 3 is reserved for gfxstream */
+#define VIRTIO_GPU_CAPSET_GFXSTREAM_VULKAN 3
#define VIRTIO_GPU_CAPSET_VENUS 4
+#define VIRTIO_GPU_CAPSET_CROSS_DOMAIN 5
#define VIRTIO_GPU_CAPSET_DRM 6
/* VIRTIO_GPU_CMD_GET_CAPSET_INFO */
diff --git a/include/video/pixel_format.h b/include/video/pixel_format.h
new file mode 100644
index 000000000000..b5104b2a3a13
--- /dev/null
+++ b/include/video/pixel_format.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef VIDEO_PIXEL_FORMAT_H
+#define VIDEO_PIXEL_FORMAT_H
+
+struct pixel_format {
+ unsigned char bits_per_pixel;
+ bool indexed;
+ union {
+ struct {
+ struct {
+ unsigned char offset;
+ unsigned char length;
+ } alpha, red, green, blue;
+ };
+ struct {
+ unsigned char offset;
+ unsigned char length;
+ } index;
+ };
+};
+
+#define PIXEL_FORMAT_XRGB1555 \
+ { 16, false, { .alpha = {0, 0}, .red = {10, 5}, .green = {5, 5}, .blue = {0, 5} } }
+
+#define PIXEL_FORMAT_RGB565 \
+ { 16, false, { .alpha = {0, 0}, .red = {11, 5}, .green = {5, 6}, .blue = {0, 5} } }
+
+#define PIXEL_FORMAT_RGB888 \
+ { 24, false, { .alpha = {0, 0}, .red = {16, 8}, .green = {8, 8}, .blue = {0, 8} } }
+
+#define PIXEL_FORMAT_XRGB8888 \
+ { 32, false, { .alpha = {0, 0}, .red = {16, 8}, .green = {8, 8}, .blue = {0, 8} } }
+
+#define PIXEL_FORMAT_XBGR8888 \
+ { 32, false, { .alpha = {0, 0}, .red = {0, 8}, .green = {8, 8}, .blue = {16, 8} } }
+
+#define PIXEL_FORMAT_XRGB2101010 \
+ { 32, false, { .alpha = {0, 0}, .red = {20, 10}, .green = {10, 10}, .blue = {0, 10} } }
+
+#endif