diff options
98 files changed, 819 insertions, 330 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index f6cda468095d..46bd8e033042 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13425,9 +13425,12 @@ F:	mm/kasan/  F:	scripts/Makefile.kasan  KCONFIG +M:	Nathan Chancellor <nathan@kernel.org> +M:	Nicolas Schier <nsc@kernel.org>  L:	linux-kbuild@vger.kernel.org -S:	Orphan +S:	Odd Fixes  Q:	https://patchwork.kernel.org/project/linux-kbuild/list/ +T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kbuild/linux.git  F:	Documentation/kbuild/kconfig*  F:	scripts/Kconfig.include  F:	scripts/kconfig/ @@ -2,7 +2,7 @@  VERSION = 6  PATCHLEVEL = 18  SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc4  NAME = Baby Opossum Posse  # *DOCUMENTATION* diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index ab83089c3d8f..0c9a50a1e73e 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1213,6 +1213,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,  	u8 src = bpf2a64[insn->src_reg];  	const u8 tmp = bpf2a64[TMP_REG_1];  	const u8 tmp2 = bpf2a64[TMP_REG_2]; +	const u8 tmp3 = bpf2a64[TMP_REG_3];  	const u8 fp = bpf2a64[BPF_REG_FP];  	const u8 arena_vm_base = bpf2a64[ARENA_VM_START];  	const u8 priv_sp = bpf2a64[PRIVATE_SP]; @@ -1757,8 +1758,8 @@ emit_cond_jmp:  	case BPF_ST | BPF_PROBE_MEM32 | BPF_W:  	case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:  		if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) { -			emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx); -			dst = tmp2; +			emit(A64_ADD(1, tmp3, dst, arena_vm_base), ctx); +			dst = tmp3;  		}  		if (dst == fp) {  			dst_adj = ctx->priv_sp_used ? priv_sp : A64_SP; diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index c4145672ca34..df22b10d9141 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -158,7 +158,6 @@ config S390  	select ARCH_WANT_IRQS_OFF_ACTIVATE_MM  	select ARCH_WANT_KERNEL_PMD_MKWRITE  	select ARCH_WANT_LD_ORPHAN_WARN -	select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP  	select ARCH_WANTS_THP_SWAP  	select BUILDTIME_TABLE_SORT  	select CLONE_BACKWARDS2 diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index b31c1df90257..8433f769f7e1 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -101,6 +101,7 @@ CONFIG_SLUB_STATS=y  CONFIG_MEMORY_HOTPLUG=y  CONFIG_MEMORY_HOTREMOVE=y  CONFIG_KSM=y +CONFIG_PERSISTENT_HUGE_ZERO_FOLIO=y  CONFIG_TRANSPARENT_HUGEPAGE=y  CONFIG_CMA_DEBUGFS=y  CONFIG_CMA_SYSFS=y @@ -123,12 +124,12 @@ CONFIG_TLS_DEVICE=y  CONFIG_TLS_TOE=y  CONFIG_XFRM_USER=m  CONFIG_NET_KEY=m -CONFIG_XDP_SOCKETS=y -CONFIG_XDP_SOCKETS_DIAG=m -CONFIG_DIBS=y -CONFIG_DIBS_LO=y  CONFIG_SMC=m  CONFIG_SMC_DIAG=m +CONFIG_DIBS=y +CONFIG_DIBS_LO=y +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m  CONFIG_INET=y  CONFIG_IP_MULTICAST=y  CONFIG_IP_ADVANCED_ROUTER=y @@ -472,6 +473,7 @@ CONFIG_SCSI_DH_EMC=m  CONFIG_SCSI_DH_ALUA=m  CONFIG_MD=y  CONFIG_BLK_DEV_MD=y +CONFIG_MD_LLBITMAP=y  # CONFIG_MD_BITMAP_FILE is not set  CONFIG_MD_LINEAR=m  CONFIG_MD_CLUSTER=m @@ -654,9 +656,12 @@ CONFIG_JFS_POSIX_ACL=y  CONFIG_JFS_SECURITY=y  CONFIG_JFS_STATISTICS=y  CONFIG_XFS_FS=y +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y  CONFIG_XFS_QUOTA=y  CONFIG_XFS_POSIX_ACL=y  CONFIG_XFS_RT=y +# CONFIG_XFS_ONLINE_SCRUB is not set  CONFIG_XFS_DEBUG=y  CONFIG_GFS2_FS=m  CONFIG_GFS2_FS_LOCKING_DLM=y @@ -666,7 +671,6 @@ CONFIG_BTRFS_FS_POSIX_ACL=y  CONFIG_BTRFS_DEBUG=y  CONFIG_BTRFS_ASSERT=y  CONFIG_NILFS2_FS=m -CONFIG_FS_DAX=y  CONFIG_EXPORTFS_BLOCK_OPS=y  CONFIG_FS_ENCRYPTION=y  CONFIG_FS_VERITY=y diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 161dad7ef211..4414dabd04a6 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -94,6 +94,7 @@ CONFIG_SLAB_BUCKETS=y  CONFIG_MEMORY_HOTPLUG=y  CONFIG_MEMORY_HOTREMOVE=y  CONFIG_KSM=y +CONFIG_PERSISTENT_HUGE_ZERO_FOLIO=y  CONFIG_TRANSPARENT_HUGEPAGE=y  CONFIG_CMA_SYSFS=y  CONFIG_CMA_AREAS=7 @@ -114,12 +115,12 @@ CONFIG_TLS_DEVICE=y  CONFIG_TLS_TOE=y  CONFIG_XFRM_USER=m  CONFIG_NET_KEY=m -CONFIG_XDP_SOCKETS=y -CONFIG_XDP_SOCKETS_DIAG=m -CONFIG_DIBS=y -CONFIG_DIBS_LO=y  CONFIG_SMC=m  CONFIG_SMC_DIAG=m +CONFIG_DIBS=y +CONFIG_DIBS_LO=y +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m  CONFIG_INET=y  CONFIG_IP_MULTICAST=y  CONFIG_IP_ADVANCED_ROUTER=y @@ -462,6 +463,7 @@ CONFIG_SCSI_DH_EMC=m  CONFIG_SCSI_DH_ALUA=m  CONFIG_MD=y  CONFIG_BLK_DEV_MD=y +CONFIG_MD_LLBITMAP=y  # CONFIG_MD_BITMAP_FILE is not set  CONFIG_MD_LINEAR=m  CONFIG_MD_CLUSTER=m @@ -644,16 +646,18 @@ CONFIG_JFS_POSIX_ACL=y  CONFIG_JFS_SECURITY=y  CONFIG_JFS_STATISTICS=y  CONFIG_XFS_FS=y +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y  CONFIG_XFS_QUOTA=y  CONFIG_XFS_POSIX_ACL=y  CONFIG_XFS_RT=y +# CONFIG_XFS_ONLINE_SCRUB is not set  CONFIG_GFS2_FS=m  CONFIG_GFS2_FS_LOCKING_DLM=y  CONFIG_OCFS2_FS=m  CONFIG_BTRFS_FS=y  CONFIG_BTRFS_FS_POSIX_ACL=y  CONFIG_NILFS2_FS=m -CONFIG_FS_DAX=y  CONFIG_EXPORTFS_BLOCK_OPS=y  CONFIG_FS_ENCRYPTION=y  CONFIG_FS_VERITY=y diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index ed0b137353ad..b5478267d6a7 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -33,7 +33,6 @@ CONFIG_NET=y  CONFIG_DEVTMPFS=y  CONFIG_DEVTMPFS_SAFE=y  CONFIG_BLK_DEV_RAM=y -# CONFIG_DCSSBLK is not set  # CONFIG_DASD is not set  CONFIG_ENCLOSURE_SERVICES=y  CONFIG_SCSI=y diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 6890925d5587..a32f465ecf73 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -145,7 +145,6 @@ struct zpci_dev {  	u8		has_resources	: 1;  	u8		is_physfn	: 1;  	u8		util_str_avail	: 1; -	u8		irqs_registered	: 1;  	u8		tid_avail	: 1;  	u8		rtr_avail	: 1; /* Relaxed translation allowed */  	unsigned int	devfn;		/* DEVFN part of the RID*/ diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index 9af2aae0a515..528d7c70979f 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c @@ -291,16 +291,14 @@ static int ptdump_cmp(const void *a, const void *b)  static int add_marker(unsigned long start, unsigned long end, const char *name)  { -	size_t oldsize, newsize; - -	oldsize = markers_cnt * sizeof(*markers); -	newsize = oldsize + 2 * sizeof(*markers); -	if (!oldsize) -		markers = kvmalloc(newsize, GFP_KERNEL); -	else -		markers = kvrealloc(markers, newsize, GFP_KERNEL); -	if (!markers) -		goto error; +	struct addr_marker *new; +	size_t newsize; + +	newsize = (markers_cnt + 2) * sizeof(*markers); +	new = kvrealloc(markers, newsize, GFP_KERNEL); +	if (!new) +		return -ENOMEM; +	markers = new;  	markers[markers_cnt].is_start = 1;  	markers[markers_cnt].start_address = start;  	markers[markers_cnt].size = end - start; @@ -312,9 +310,6 @@ static int add_marker(unsigned long start, unsigned long end, const char *name)  	markers[markers_cnt].name = name;  	markers_cnt++;  	return 0; -error: -	markers_cnt = 0; -	return -ENOMEM;  }  static int pt_dump_init(void) diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index b95376041501..27db1e72c623 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -188,7 +188,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)  	 * is unbound or probed and that userspace can't access its  	 * configuration space while we perform recovery.  	 */ -	pci_dev_lock(pdev); +	device_lock(&pdev->dev);  	if (pdev->error_state == pci_channel_io_perm_failure) {  		ers_res = PCI_ERS_RESULT_DISCONNECT;  		goto out_unlock; @@ -257,7 +257,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)  		driver->err_handler->resume(pdev);  	pci_uevent_ers(pdev, PCI_ERS_RESULT_RECOVERED);  out_unlock: -	pci_dev_unlock(pdev); +	device_unlock(&pdev->dev);  	zpci_report_status(zdev, "recovery", status_str);  	return ers_res; diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c index 84482a921332..e73be96ce5fe 100644 --- a/arch/s390/pci/pci_irq.c +++ b/arch/s390/pci/pci_irq.c @@ -107,9 +107,6 @@ static int zpci_set_irq(struct zpci_dev *zdev)  	else  		rc = zpci_set_airq(zdev); -	if (!rc) -		zdev->irqs_registered = 1; -  	return rc;  } @@ -123,9 +120,6 @@ static int zpci_clear_irq(struct zpci_dev *zdev)  	else  		rc = zpci_clear_airq(zdev); -	if (!rc) -		zdev->irqs_registered = 0; -  	return rc;  } @@ -427,8 +421,7 @@ bool arch_restore_msi_irqs(struct pci_dev *pdev)  {  	struct zpci_dev *zdev = to_zpci(pdev); -	if (!zdev->irqs_registered) -		zpci_set_irq(zdev); +	zpci_set_irq(zdev);  	return true;  } diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 4db7e4bf69f5..8fbff3106c56 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -75,7 +75,7 @@ export BITS  #  #    https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383  # -KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx +KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx -mno-sse4a  KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json  KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2 diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 28f5468a6ea3..fe65be0b9d9c 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -7596,6 +7596,7 @@ __init int intel_pmu_init(void)  		break;  	case INTEL_PANTHERLAKE_L: +	case INTEL_WILDCATLAKE_L:  		pr_cont("Pantherlake Hybrid events, ");  		name = "pantherlake_hybrid";  		goto lnl_common; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index c0b7ac1c7594..01bc59e9286c 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -317,7 +317,8 @@ static u64 __grt_latency_data(struct perf_event *event, u64 status,  {  	u64 val; -	WARN_ON_ONCE(hybrid_pmu(event->pmu)->pmu_type == hybrid_big); +	WARN_ON_ONCE(is_hybrid() && +		     hybrid_pmu(event->pmu)->pmu_type == hybrid_big);  	dse &= PERF_PEBS_DATA_SOURCE_GRT_MASK;  	val = hybrid_var(event->pmu, pebs_data_source)[dse]; diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index a762f7f5b161..d6c945cc5d07 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -1895,6 +1895,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {  	X86_MATCH_VFM(INTEL_ARROWLAKE_H,	&mtl_uncore_init),  	X86_MATCH_VFM(INTEL_LUNARLAKE_M,	&lnl_uncore_init),  	X86_MATCH_VFM(INTEL_PANTHERLAKE_L,	&ptl_uncore_init), +	X86_MATCH_VFM(INTEL_WILDCATLAKE_L,	&ptl_uncore_init),  	X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X,	&spr_uncore_init),  	X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X,	&spr_uncore_init),  	X86_MATCH_VFM(INTEL_GRANITERAPIDS_X,	&gnr_uncore_init), diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index f32a0eca2ae5..950bfd006905 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -150,12 +150,12 @@  #define INTEL_LUNARLAKE_M		IFM(6, 0xBD) /* Lion Cove / Skymont */ -#define INTEL_PANTHERLAKE_L		IFM(6, 0xCC) /* Cougar Cove / Crestmont */ +#define INTEL_PANTHERLAKE_L		IFM(6, 0xCC) /* Cougar Cove / Darkmont */  #define INTEL_WILDCATLAKE_L		IFM(6, 0xD5) -#define INTEL_NOVALAKE			IFM(18, 0x01) -#define INTEL_NOVALAKE_L		IFM(18, 0x03) +#define INTEL_NOVALAKE			IFM(18, 0x01) /* Coyote Cove / Arctic Wolf */ +#define INTEL_NOVALAKE_L		IFM(18, 0x03) /* Coyote Cove / Arctic Wolf */  /* "Small Core" Processors (Atom/E-Core) */ diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index 015d23f3e01f..53f4089333f2 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h @@ -43,6 +43,9 @@ extern unsigned long __phys_addr_symbol(unsigned long);  void clear_page_orig(void *page);  void clear_page_rep(void *page);  void clear_page_erms(void *page); +KCFI_REFERENCE(clear_page_orig); +KCFI_REFERENCE(clear_page_rep); +KCFI_REFERENCE(clear_page_erms);  static inline void clear_page(void *page)  { diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index ccaa51ce63f6..8e36964a7721 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -516,7 +516,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)  			setup_force_cpu_cap(X86_FEATURE_ZEN5);  			break;  		case 0x50 ... 0x5f: -		case 0x90 ... 0xaf: +		case 0x80 ... 0xaf:  		case 0xc0 ... 0xcf:  			setup_force_cpu_cap(X86_FEATURE_ZEN6);  			break; @@ -1035,8 +1035,18 @@ static void init_amd_zen4(struct cpuinfo_x86 *c)  	}  } +static const struct x86_cpu_id zen5_rdseed_microcode[] = { +	ZEN_MODEL_STEP_UCODE(0x1a, 0x02, 0x1, 0x0b00215a), +	ZEN_MODEL_STEP_UCODE(0x1a, 0x11, 0x0, 0x0b101054), +}; +  static void init_amd_zen5(struct cpuinfo_x86 *c)  { +	if (!x86_match_min_microcode_rev(zen5_rdseed_microcode)) { +		clear_cpu_cap(c, X86_FEATURE_RDSEED); +		msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18); +		pr_emerg_once("RDSEED32 is broken. Disabling the corresponding CPUID bit.\n"); +	}  }  static void init_amd(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 28ed8c089024..b7c797dc94f4 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -233,13 +233,31 @@ static bool need_sha_check(u32 cur_rev)  	return true;  } +static bool cpu_has_entrysign(void) +{ +	unsigned int fam   = x86_family(bsp_cpuid_1_eax); +	unsigned int model = x86_model(bsp_cpuid_1_eax); + +	if (fam == 0x17 || fam == 0x19) +		return true; + +	if (fam == 0x1a) { +		if (model <= 0x2f || +		    (0x40 <= model && model <= 0x4f) || +		    (0x60 <= model && model <= 0x6f)) +			return true; +	} + +	return false; +} +  static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)  {  	struct patch_digest *pd = NULL;  	u8 digest[SHA256_DIGEST_SIZE];  	int i; -	if (x86_family(bsp_cpuid_1_eax) < 0x17) +	if (!cpu_has_entrysign())  		return true;  	if (!need_sha_check(cur_rev)) diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 1f71cc135e9a..e88eacb1b5bb 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -825,6 +825,9 @@ void fpu__clear_user_states(struct fpu *fpu)  	    !fpregs_state_valid(fpu, smp_processor_id()))  		os_xrstor_supervisor(fpu->fpstate); +	/* Ensure XFD state is in sync before reloading XSTATE */ +	xfd_update_state(fpu->fpstate); +  	/* Reset user states in registers. */  	restore_fpregs_from_init_fpstate(XFEATURE_MASK_USER_RESTORE); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index d4c93d9e73e4..de5083cb1d37 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -2701,7 +2701,7 @@ emit_jmp:  			/* Update cleanup_addr */  			ctx->cleanup_addr = proglen;  			if (bpf_prog_was_classic(bpf_prog) && -			    !capable(CAP_SYS_ADMIN)) { +			    !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) {  				u8 *ip = image + addrs[i - 1];  				if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog)) diff --git a/block/blk-crypto.c b/block/blk-crypto.c index 4b1ad84d1b5a..3e7bf1974cbd 100644 --- a/block/blk-crypto.c +++ b/block/blk-crypto.c @@ -292,7 +292,7 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr)  	}  	if (!bio_crypt_check_alignment(bio)) { -		bio->bi_status = BLK_STS_IOERR; +		bio->bi_status = BLK_STS_INVAL;  		goto fail;  	} diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c index 54eb7d227cf4..e523fae73004 100644 --- a/drivers/base/regmap/regmap-slimbus.c +++ b/drivers/base/regmap/regmap-slimbus.c @@ -48,8 +48,7 @@ struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,  	if (IS_ERR(bus))  		return ERR_CAST(bus); -	return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config, -			     lock_key, lock_name); +	return __regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);  }  EXPORT_SYMBOL_GPL(__regmap_init_slimbus); @@ -63,8 +62,7 @@ struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,  	if (IS_ERR(bus))  		return ERR_CAST(bus); -	return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config, -				  lock_key, lock_name); +	return __devm_regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);  }  EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus); diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index f982027e8c85..0ee55f889cfd 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1949,6 +1949,7 @@ static int null_add_dev(struct nullb_device *dev)  		.logical_block_size	= dev->blocksize,  		.physical_block_size	= dev->blocksize,  		.max_hw_sectors		= dev->max_sectors, +		.dma_alignment		= dev->blocksize - 1,  	};  	struct nullb *nullb; diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 3f78c56b58dc..39e6f93dc310 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -1141,7 +1141,7 @@ const char __rcu *dma_fence_timeline_name(struct dma_fence *fence)  			 "RCU protection is required for safe access to returned string");  	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) -		return fence->ops->get_driver_name(fence); +		return fence->ops->get_timeline_name(fence);  	else  		return "signaled-timeline";  } diff --git a/drivers/edac/versalnet_edac.c b/drivers/edac/versalnet_edac.c index 7c5db8bf0595..1ded4c3f0213 100644 --- a/drivers/edac/versalnet_edac.c +++ b/drivers/edac/versalnet_edac.c @@ -433,7 +433,7 @@ static void handle_error(struct mc_priv  *priv, struct ecc_status *stat,  	phys_addr_t pfn;  	int err; -	if (WARN_ON_ONCE(ctl_num > NUM_CONTROLLERS)) +	if (WARN_ON_ONCE(ctl_num >= NUM_CONTROLLERS))  		return;  	mci = priv->mci[ctl_num]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c index ef996493115f..425a3e564360 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: MIT  /*   * Copyright 2025 Advanced Micro Devices, Inc.   * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h index bcb97d245673..353421807387 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: MIT */  /*   * Copyright 2025 Advanced Micro Devices, Inc.   * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c index 474bfe36c0c2..aa78c2ee9e21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c @@ -322,6 +322,26 @@ static int vpe_early_init(struct amdgpu_ip_block *ip_block)  	return 0;  } +static bool vpe_need_dpm0_at_power_down(struct amdgpu_device *adev) +{ +	switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { +	case IP_VERSION(6, 1, 1): +		return adev->pm.fw_version < 0x0a640500; +	default: +		return false; +	} +} + +static int vpe_get_dpm_level(struct amdgpu_device *adev) +{ +	struct amdgpu_vpe *vpe = &adev->vpe; + +	if (!adev->pm.dpm_enabled) +		return 0; + +	return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv)); +} +  static void vpe_idle_work_handler(struct work_struct *work)  {  	struct amdgpu_device *adev = @@ -329,11 +349,17 @@ static void vpe_idle_work_handler(struct work_struct *work)  	unsigned int fences = 0;  	fences += amdgpu_fence_count_emitted(&adev->vpe.ring); +	if (fences) +		goto reschedule; -	if (fences == 0) -		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE); -	else -		schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); +	if (vpe_need_dpm0_at_power_down(adev) && vpe_get_dpm_level(adev) != 0) +		goto reschedule; + +	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE); +	return; + +reschedule: +	schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);  }  static int vpe_common_init(struct amdgpu_vpe *vpe) diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c index 96616a865aac..ed1e25661706 100644 --- a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: MIT  /*   * Copyright 2018 Advanced Micro Devices, Inc.   * diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 1ec9d03ad747..38f9ea313dcb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -248,6 +248,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)  	struct vblank_control_work *vblank_work =  		container_of(work, struct vblank_control_work, work);  	struct amdgpu_display_manager *dm = vblank_work->dm; +	struct amdgpu_device *adev = drm_to_adev(dm->ddev); +	int r;  	mutex_lock(&dm->dc_lock); @@ -277,7 +279,16 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)  	if (dm->active_vblank_irq_count == 0) {  		dc_post_update_surfaces_to_stream(dm->dc); + +		r = amdgpu_dpm_pause_power_profile(adev, true); +		if (r) +			dev_warn(adev->dev, "failed to set default power profile mode\n"); +  		dc_allow_idle_optimizations(dm->dc, true); + +		r = amdgpu_dpm_pause_power_profile(adev, false); +		if (r) +			dev_warn(adev->dev, "failed to restore the power profile mode\n");  	}  	mutex_unlock(&dm->dc_lock); @@ -297,8 +308,12 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)  	int irq_type;  	int rc = 0; -	if (acrtc->otg_inst == -1) -		goto skip; +	if (enable && !acrtc->base.enabled) { +		drm_dbg_vbl(crtc->dev, +				"Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n", +				acrtc->crtc_id, acrtc->base.enabled); +		return -EINVAL; +	}  	irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); @@ -383,7 +398,7 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)  			return rc;  	}  #endif -skip: +  	if (amdgpu_in_reset(adev))  		return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index fe100e4c9801..cc21337a182f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -83,6 +83,7 @@ static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct  		edid_caps->panel_patch.remove_sink_ext_caps = true;  		break;  	case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154): +	case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171):  		drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id);  		edid_caps->panel_patch.disable_colorimetry = true;  		break; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c index 09be2a90cc79..4f569cd8a5d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c @@ -578,9 +578,6 @@ static void dpp3_power_on_blnd_lut(  			dpp_base->ctx->dc->optimized_required = true;  			dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true;  		} -	} else { -		REG_SET(CM_MEM_PWR_CTRL, 0, -				BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);  	}  } diff --git a/drivers/gpu/drm/amd/include/amd_cper.h b/drivers/gpu/drm/amd/include/amd_cper.h index 086869264425..a252ee4c7874 100644 --- a/drivers/gpu/drm/amd/include/amd_cper.h +++ b/drivers/gpu/drm/amd/include/amd_cper.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: MIT */  /*   * Copyright 2025 Advanced Micro Devices, Inc.   * diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h index 64b553e7de1a..e7fdcee22a71 100644 --- a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h +++ b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: MIT */  /*   * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved. diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c index d2dbd90bb427..0a876c840c79 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c @@ -2024,7 +2024,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)  	table->VoltageResponseTime = 0;  	table->PhaseResponseTime = 0;  	table->MemoryThermThrottleEnable = 1; -	table->PCIeBootLinkLevel = 0;      /* 0:Gen1 1:Gen2 2:Gen3*/ +	table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);  	table->PCIeGenInterval = 1;  	table->VRConfig = 0; diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c index 1f50f1e74c48..aa3ae9b115c4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c @@ -2028,7 +2028,7 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)  	table->VoltageResponseTime  = 0;  	table->PhaseResponseTime  = 0;  	table->MemoryThermThrottleEnable  = 1; -	table->PCIeBootLinkLevel = 0; +	table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);  	table->PCIeGenInterval = 1;  	result = iceland_populate_smc_svi2_config(hwmgr, table); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index f532f7c69259..a8961a8f5c42 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -969,7 +969,7 @@ int smu_cmn_update_table(struct smu_context *smu,  						      table_index);  	uint32_t table_size;  	int ret = 0; -	if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) +	if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0)  		return -EINVAL;  	table_size = smu_table->tables[table_index].size; diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index c15aef014f69..d41bd876167c 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -282,13 +282,13 @@ static inline void __ast_write8_i(void __iomem *addr, u32 reg, u8 index, u8 val)  	__ast_write8(addr, reg + 1, val);  } -static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask, +static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 preserve_mask,  					 u8 val)  { -	u8 tmp = __ast_read8_i_masked(addr, reg, index, read_mask); +	u8 tmp = __ast_read8_i_masked(addr, reg, index, preserve_mask); -	tmp |= val; -	__ast_write8_i(addr, reg, index, tmp); +	val &= ~preserve_mask; +	__ast_write8_i(addr, reg, index, tmp | val);  }  static inline u32 ast_read32(struct ast_device *ast, u32 reg) diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml index d502d146b177..56638814bb28 100644 --- a/drivers/gpu/drm/ci/gitlab-ci.yml +++ b/drivers/gpu/drm/ci/gitlab-ci.yml @@ -280,7 +280,7 @@ sanity:      GIT_STRATEGY: none    script:      # ci-fairy check-commits --junit-xml=check-commits.xml -    - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml +    # - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml      - |        set -eu        image_tags=( diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c index ebf305fb24f0..6fb55601252f 100644 --- a/drivers/gpu/drm/drm_gem_atomic_helper.c +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c @@ -310,8 +310,12 @@ EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state);  void __drm_gem_reset_shadow_plane(struct drm_plane *plane,  				  struct drm_shadow_plane_state *shadow_plane_state)  { -	__drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); -	drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state); +	if (shadow_plane_state) { +		__drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); +		drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state); +	} else { +		__drm_atomic_helper_plane_reset(plane, NULL); +	}  }  EXPORT_SYMBOL(__drm_gem_reset_shadow_plane); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index b13a17276d07..88385dc3b30d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -347,7 +347,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,  	u32 link_target, link_dwords;  	bool switch_context = gpu->exec_state != exec_state;  	bool switch_mmu_context = gpu->mmu_context != mmu_context; -	unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq); +	unsigned int new_flush_seq = READ_ONCE(mmu_context->flush_seq);  	bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;  	bool has_blt = !!(gpu->identity.minor_features5 &  			  chipMinorFeatures5_BLT_ENGINE); diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 77a0199f9ea5..4a4cace1f879 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -546,6 +546,36 @@ static bool is_event_handler(struct intel_display *display,  		REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id;  } +static bool fixup_dmc_evt(struct intel_display *display, +			  enum intel_dmc_id dmc_id, +			  i915_reg_t reg_ctl, u32 *data_ctl, +			  i915_reg_t reg_htp, u32 *data_htp) +{ +	if (!is_dmc_evt_ctl_reg(display, dmc_id, reg_ctl)) +		return false; + +	if (!is_dmc_evt_htp_reg(display, dmc_id, reg_htp)) +		return false; + +	/* make sure reg_ctl and reg_htp are for the same event */ +	if (i915_mmio_reg_offset(reg_ctl) - i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, 0)) != +	    i915_mmio_reg_offset(reg_htp) - i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, 0))) +		return false; + +	/* +	 * On ADL-S the HRR event handler is not restored after DC6. +	 * Clear it to zero from the beginning to avoid mismatches later. +	 */ +	if (display->platform.alderlake_s && dmc_id == DMC_FW_MAIN && +	    is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg_ctl, *data_ctl)) { +		*data_ctl = 0; +		*data_htp = 0; +		return true; +	} + +	return false; +} +  static bool disable_dmc_evt(struct intel_display *display,  			    enum intel_dmc_id dmc_id,  			    i915_reg_t reg, u32 data) @@ -1064,9 +1094,32 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,  	for (i = 0; i < mmio_count; i++) {  		dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);  		dmc_info->mmiodata[i] = mmiodata[i]; +	} + +	for (i = 0; i < mmio_count - 1; i++) { +		u32 orig_mmiodata[2] = { +			dmc_info->mmiodata[i], +			dmc_info->mmiodata[i+1], +		}; + +		if (!fixup_dmc_evt(display, dmc_id, +				   dmc_info->mmioaddr[i], &dmc_info->mmiodata[i], +				   dmc_info->mmioaddr[i+1], &dmc_info->mmiodata[i+1])) +			continue; + +		drm_dbg_kms(display->drm, +			    " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_CTL)\n", +			    i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), +			    orig_mmiodata[0], dmc_info->mmiodata[i]); +		drm_dbg_kms(display->drm, +			    " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_HTP)\n", +			    i+1, i915_mmio_reg_offset(dmc_info->mmioaddr[i+1]), +			    orig_mmiodata[1], dmc_info->mmiodata[i+1]); +	} +	for (i = 0; i < mmio_count; i++) {  		drm_dbg_kms(display->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n", -			    i, mmioaddr[i], mmiodata[i], +			    i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), dmc_info->mmiodata[i],  			    is_dmc_evt_ctl_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" :  			    is_dmc_evt_htp_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "",  			    disable_dmc_evt(display, dmc_id, dmc_info->mmioaddr[i], diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c index 6d8325c76697..7fc6af703307 100644 --- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c +++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c @@ -25,19 +25,18 @@  struct imx_parallel_display_encoder {  	struct drm_encoder encoder; -	struct drm_bridge bridge; -	struct imx_parallel_display *pd;  };  struct imx_parallel_display {  	struct device *dev;  	u32 bus_format;  	struct drm_bridge *next_bridge; +	struct drm_bridge bridge;  };  static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)  { -	return container_of(b, struct imx_parallel_display_encoder, bridge)->pd; +	return container_of(b, struct imx_parallel_display, bridge);  }  static const u32 imx_pd_bus_fmts[] = { @@ -195,15 +194,13 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)  	if (IS_ERR(imxpd_encoder))  		return PTR_ERR(imxpd_encoder); -	imxpd_encoder->pd = imxpd;  	encoder = &imxpd_encoder->encoder; -	bridge = &imxpd_encoder->bridge; +	bridge = &imxpd->bridge;  	ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node);  	if (ret)  		return ret; -	bridge->funcs = &imx_pd_bridge_funcs;  	drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);  	connector = drm_bridge_connector_init(drm, encoder); @@ -228,9 +225,10 @@ static int imx_pd_probe(struct platform_device *pdev)  	u32 bus_format = 0;  	const char *fmt; -	imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL); -	if (!imxpd) -		return -ENOMEM; +	imxpd = devm_drm_bridge_alloc(dev, struct imx_parallel_display, bridge, +				      &imx_pd_bridge_funcs); +	if (IS_ERR(imxpd)) +		return PTR_ERR(imxpd);  	/* port@1 is the output port */  	imxpd->next_bridge = devm_drm_of_get_bridge(dev, np, 1, 0); @@ -258,6 +256,8 @@ static int imx_pd_probe(struct platform_device *pdev)  	platform_set_drvdata(pdev, imxpd); +	devm_drm_bridge_add(dev, &imxpd->bridge); +  	return component_add(dev, &imx_pd_ops);  } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index eb5537f0ac90..31ff2922758a 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -686,10 +686,6 @@ err_free:  	for (i = 0; i < private->data->mmsys_dev_num; i++)  		private->all_drm_private[i]->drm = NULL;  err_put_dev: -	for (i = 0; i < private->data->mmsys_dev_num; i++) { -		/* For device_find_child in mtk_drm_get_all_priv() */ -		put_device(private->all_drm_private[i]->dev); -	}  	put_device(private->mutex_dev);  	return ret;  } @@ -697,18 +693,12 @@ err_put_dev:  static void mtk_drm_unbind(struct device *dev)  {  	struct mtk_drm_private *private = dev_get_drvdata(dev); -	int i;  	/* for multi mmsys dev, unregister drm dev in mmsys master */  	if (private->drm_master) {  		drm_dev_unregister(private->drm);  		mtk_drm_kms_deinit(private->drm);  		drm_dev_put(private->drm); - -		for (i = 0; i < private->data->mmsys_dev_num; i++) { -			/* For device_find_child in mtk_drm_get_all_priv() */ -			put_device(private->all_drm_private[i]->dev); -		}  		put_device(private->mutex_dev);  	}  	private->mtk_drm_bound = false; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index fc62fef2fed8..4e6dc16e4a4c 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -780,6 +780,9 @@ static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)  	return true;  } +#define NEXT_BLK(blk) \ +	((const struct block_header *)((const char *)(blk) + sizeof(*(blk)) + (blk)->size)) +  static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)  {  	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); @@ -811,7 +814,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)  	for (blk = (const struct block_header *) fw_image->data;  	     (const u8*) blk < fw_image->data + fw_image->size; -	     blk = (const struct block_header *) &blk->data[blk->size >> 2]) { +	     blk = NEXT_BLK(blk)) {  		if (blk->size == 0)  			continue; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index afaa3cfefd35..4b5a4edd0702 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -348,13 +348,6 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,  	return 0;  } -static bool -adreno_smmu_has_prr(struct msm_gpu *gpu) -{ -	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev); -	return adreno_smmu && adreno_smmu->set_prr_addr; -} -  int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,  		     uint32_t param, uint64_t *value, uint32_t *len)  { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 4b970a59deaf..2f8156051d9b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -1545,6 +1545,9 @@ static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,  	adjusted_mode_clk = dpu_core_perf_adjusted_mode_clk(mode->clock,  							    dpu_kms->perf.perf_cfg); +	if (dpu_kms->catalog->caps->has_3d_merge) +		adjusted_mode_clk /= 2; +  	/*  	 * The given mode, adjusted for the perf clock factor, should not exceed  	 * the max core clock rate diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 6641455c4ec6..9f8d1bba9139 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -267,8 +267,8 @@ static const u32 wb2_formats_rgb_yuv[] = {  		.base = 0x200, .len = 0xa0,}, \  	.csc_blk = {.name = "csc", \  		.base = 0x320, .len = 0x100,}, \ -	.format_list = plane_formats_yuv, \ -	.num_formats = ARRAY_SIZE(plane_formats_yuv), \ +	.format_list = plane_formats, \ +	.num_formats = ARRAY_SIZE(plane_formats), \  	.rotation_cfg = NULL, \  	} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index f54cf0faa1c7..905524ceeb1f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -500,13 +500,15 @@ static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg,  	int i;  	for (i = 0; i < DPU_MAX_PLANES; i++) { +		uint32_t w = src_w, h = src_h; +  		if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) { -			src_w /= chroma_subsmpl_h; -			src_h /= chroma_subsmpl_v; +			w /= chroma_subsmpl_h; +			h /= chroma_subsmpl_v;  		} -		pixel_ext->num_ext_pxls_top[i] = src_h; -		pixel_ext->num_ext_pxls_left[i] = src_w; +		pixel_ext->num_ext_pxls_top[i] = h; +		pixel_ext->num_ext_pxls_left[i] = w;  	}  } @@ -740,7 +742,7 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,  	 * We already have verified scaling against platform limitations.  	 * Now check if the SSPP supports scaling at all.  	 */ -	if (!sblk->scaler_blk.len && +	if (!(sblk->scaler_blk.len && pipe->sspp->ops.setup_scaler) &&  	    ((drm_rect_width(&new_plane_state->src) >> 16 !=  	      drm_rect_width(&new_plane_state->dst)) ||  	     (drm_rect_height(&new_plane_state->src) >> 16 != @@ -1278,7 +1280,7 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,  							     state, plane_state,  							     prev_adjacent_plane_state);  		if (ret) -			break; +			return ret;  		prev_adjacent_plane_state = plane_state;  	} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 2c77c74fac0f..d9c3b0a1d091 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -842,7 +842,7 @@ struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,  	if (!reqs->scale && !reqs->yuv)  		hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA); -	if (!hw_sspp && reqs->scale) +	if (!hw_sspp && !reqs->yuv)  		hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB);  	if (!hw_sspp)  		hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c index cd73468e369a..7545c0293efb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c @@ -72,6 +72,9 @@ static int dpu_wb_conn_atomic_check(struct drm_connector *connector,  		DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",  			  fb->width, dpu_wb_conn->maxlinewidth);  		return -EINVAL; +	} else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { +		DPU_ERROR("unsupported fb modifier:%#llx\n", fb->modifier); +		return -EINVAL;  	}  	return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state); diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index e391505fdaf0..3cbf08231492 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -109,7 +109,6 @@ struct msm_dsi_phy {  	struct msm_dsi_dphy_timing timing;  	const struct msm_dsi_phy_cfg *cfg;  	void *tuning_cfg; -	void *pll_data;  	enum msm_dsi_phy_usecase usecase;  	bool regulator_ldo_mode; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c index 32f06edd21a9..c5e1d2016bcc 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -426,11 +426,8 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)  	u32 data;  	spin_lock_irqsave(&pll->pll_enable_lock, flags); -	if (pll->pll_enable_cnt++) { -		spin_unlock_irqrestore(&pll->pll_enable_lock, flags); -		WARN_ON(pll->pll_enable_cnt == INT_MAX); -		return; -	} +	pll->pll_enable_cnt++; +	WARN_ON(pll->pll_enable_cnt == INT_MAX);  	data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);  	data |= DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB; @@ -876,7 +873,6 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)  	spin_lock_init(&pll_7nm->pll_enable_lock);  	pll_7nm->phy = phy; -	phy->pll_data = pll_7nm;  	ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws);  	if (ret) { @@ -965,10 +961,8 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,  	u32 const delay_us = 5;  	u32 const timeout_us = 1000;  	struct msm_dsi_dphy_timing *timing = &phy->timing; -	struct dsi_pll_7nm *pll = phy->pll_data;  	void __iomem *base = phy->base;  	bool less_than_1500_mhz; -	unsigned long flags;  	u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0;  	u32 glbl_pemph_ctrl_0;  	u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0; @@ -1090,13 +1084,10 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,  		glbl_rescode_bot_ctrl = 0x3c;  	} -	spin_lock_irqsave(&pll->pll_enable_lock, flags); -	pll->pll_enable_cnt = 1;  	/* de-assert digital and pll power down */  	data = DSI_7nm_PHY_CMN_CTRL_0_DIGTOP_PWRDN_B |  	       DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;  	writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0); -	spin_unlock_irqrestore(&pll->pll_enable_lock, flags);  	/* Assert PLL core reset */  	writel(0x00, base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL); @@ -1209,9 +1200,7 @@ static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)  static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)  { -	struct dsi_pll_7nm *pll = phy->pll_data;  	void __iomem *base = phy->base; -	unsigned long flags;  	u32 data;  	DBG(""); @@ -1238,11 +1227,8 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)  	writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);  	writel(0, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0); -	spin_lock_irqsave(&pll->pll_enable_lock, flags); -	pll->pll_enable_cnt = 0;  	/* Turn off all PHY blocks */  	writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_0); -	spin_unlock_irqrestore(&pll->pll_enable_lock, flags);  	/* make sure phy is turned off */  	wmb(); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 07d8cdd6bb2e..9f7fbe577abb 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -1120,12 +1120,16 @@ static void msm_gem_free_object(struct drm_gem_object *obj)  		put_pages(obj);  	} -	if (obj->resv != &obj->_resv) { +	/* +	 * In error paths, we could end up here before msm_gem_new_handle() +	 * has changed obj->resv to point to the shared resv.  In this case, +	 * we don't want to drop a ref to the shared r_obj that we haven't +	 * taken yet. +	 */ +	if ((msm_obj->flags & MSM_BO_NO_SHARE) && (obj->resv != &obj->_resv)) {  		struct drm_gem_object *r_obj =  			container_of(obj->resv, struct drm_gem_object, _resv); -		WARN_ON(!(msm_obj->flags & MSM_BO_NO_SHARE)); -  		/* Drop reference we hold to shared resv obj: */  		drm_gem_object_put(r_obj);  	} diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 3ab3b27134f9..75d9f3574370 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -414,6 +414,11 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)  					 submit->user_fence,  					 DMA_RESV_USAGE_BOOKKEEP,  					 DMA_RESV_USAGE_BOOKKEEP); + +		last_fence = vm->last_fence; +		vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence); +		dma_fence_put(last_fence); +  		return;  	} @@ -427,10 +432,6 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)  			dma_resv_add_fence(obj->resv, submit->user_fence,  					   DMA_RESV_USAGE_READ);  	} - -	last_fence = vm->last_fence; -	vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence); -	dma_fence_put(last_fence);  }  static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 8316af1723c2..89a95977f41e 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -971,6 +971,7 @@ static int  lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)  {  	struct drm_device *dev = job->vm->drm; +	struct msm_drm_private *priv = dev->dev_private;  	int i = job->nr_ops++;  	int ret = 0; @@ -1017,6 +1018,11 @@ lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)  		break;  	} +	if ((op->op == MSM_VM_BIND_OP_MAP_NULL) && +	    !adreno_smmu_has_prr(priv->gpu)) { +		ret = UERR(EINVAL, dev, "PRR not supported\n"); +	} +  	return ret;  } @@ -1421,7 +1427,7 @@ msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)  	 * Maybe we could allow just UNMAP ops?  OTOH userspace should just  	 * immediately close the device file and all will be torn down.  	 */ -	if (to_msm_vm(ctx->vm)->unusable) +	if (to_msm_vm(msm_context_vm(dev, ctx))->unusable)  		return UERR(EPIPE, dev, "context is unusable");  	/* diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index a597f2bee30b..2894fc118485 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -299,6 +299,17 @@ static inline struct msm_gpu *dev_to_gpu(struct device *dev)  	return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);  } +static inline bool +adreno_smmu_has_prr(struct msm_gpu *gpu) +{ +	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev); + +	if (!adreno_smmu) +		return false; + +	return adreno_smmu && adreno_smmu->set_prr_addr; +} +  /* It turns out that all targets use the same ringbuffer size */  #define MSM_GPU_RINGBUFFER_SZ SZ_32K  #define MSM_GPU_RINGBUFFER_BLKSIZE 32 diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 0e18619f96cb..a188617653e8 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -338,6 +338,8 @@ msm_iommu_pagetable_prealloc_allocate(struct msm_mmu *mmu, struct msm_mmu_preall  	ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages);  	if (ret != p->count) { +		kfree(p->pages); +		p->pages = NULL;  		p->count = ret;  		return -ENOMEM;  	} @@ -351,6 +353,9 @@ msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_preallo  	struct kmem_cache *pt_cache = get_pt_cache(mmu);  	uint32_t remaining_pt_count = p->count - p->ptr; +	if (!p->pages) +		return; +  	if (p->count > 0)  		trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count); diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c index e60f7892f5ce..a7bf539e5d86 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.c +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c @@ -482,6 +482,17 @@ nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,  	return 0;  } +static bool +nouveau_sched_job_list_empty(struct nouveau_sched *sched) +{ +	bool empty; + +	spin_lock(&sched->job.list.lock); +	empty = list_empty(&sched->job.list.head); +	spin_unlock(&sched->job.list.lock); + +	return empty; +}  static void  nouveau_sched_fini(struct nouveau_sched *sched) @@ -489,8 +500,7 @@ nouveau_sched_fini(struct nouveau_sched *sched)  	struct drm_gpu_scheduler *drm_sched = &sched->base;  	struct drm_sched_entity *entity = &sched->entity; -	rmb(); /* for list_empty to work without lock */ -	wait_event(sched->job.wq, list_empty(&sched->job.list.head)); +	wait_event(sched->job.wq, nouveau_sched_job_list_empty(sched));  	drm_sched_entity_fini(entity);  	drm_sched_fini(drm_sched); diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c index 2fc7b0779b37..893af9b16756 100644 --- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c +++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c @@ -359,7 +359,7 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi)  	dsi->lanes = 4;  	dsi->format = MIPI_DSI_FMT_RGB888;  	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | -			  MIPI_DSI_MODE_LPM; +			  MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;  	kingdisplay = devm_drm_panel_alloc(&dsi->dev, __typeof(*kingdisplay), base,  					   &kingdisplay_panel_funcs, diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c index 04d91929eedd..d5f821d6b23c 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c @@ -249,6 +249,11 @@ static const struct drm_display_mode default_mode = {  	.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,  }; +/* + * The mode data for this panel has been reverse engineered without access + * to the panel datasheet / manual. Using DRM_MODE_FLAG_PHSYNC like all + * other panels results in garbage data on the display. + */  static const struct drm_display_mode t28cp45tn89_mode = {  	.clock = 6008,  	.hdisplay = 240, @@ -261,7 +266,7 @@ static const struct drm_display_mode t28cp45tn89_mode = {  	.vtotal = 320 + 8 + 4 + 4,  	.width_mm = 43,  	.height_mm = 57, -	.flags = DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC, +	.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC,  };  static const struct drm_display_mode et028013dma_mode = { diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 88e821d67af7..9c8907bc61d9 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -314,17 +314,17 @@ static int radeon_pci_probe(struct pci_dev *pdev,  	ret = pci_enable_device(pdev);  	if (ret) -		goto err_free; +		return ret;  	pci_set_drvdata(pdev, ddev);  	ret = radeon_driver_load_kms(ddev, flags);  	if (ret) -		goto err_agp; +		goto err;  	ret = drm_dev_register(ddev, flags);  	if (ret) -		goto err_agp; +		goto err;  	if (rdev->mc.real_vram_size <= (8 * 1024 * 1024))  		format = drm_format_info(DRM_FORMAT_C8); @@ -337,30 +337,14 @@ static int radeon_pci_probe(struct pci_dev *pdev,  	return 0; -err_agp: +err:  	pci_disable_device(pdev); -err_free: -	drm_dev_put(ddev);  	return ret;  }  static void -radeon_pci_remove(struct pci_dev *pdev) -{ -	struct drm_device *dev = pci_get_drvdata(pdev); - -	drm_put_dev(dev); -} - -static void  radeon_pci_shutdown(struct pci_dev *pdev)  { -	/* if we are running in a VM, make sure the device -	 * torn down properly on reboot/shutdown -	 */ -	if (radeon_device_is_virtual()) -		radeon_pci_remove(pdev); -  #if defined(CONFIG_PPC64) || defined(CONFIG_MACH_LOONGSON64)  	/*  	 * Some adapters need to be suspended before a @@ -613,7 +597,6 @@ static struct pci_driver radeon_kms_pci_driver = {  	.name = DRIVER_NAME,  	.id_table = pciidlist,  	.probe = radeon_pci_probe, -	.remove = radeon_pci_remove,  	.shutdown = radeon_pci_shutdown,  	.driver.pm = &radeon_pm_ops,  }; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 645e33bf7947..ba1446acd703 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -84,7 +84,6 @@ void radeon_driver_unload_kms(struct drm_device *dev)  	rdev->agp = NULL;  done_free: -	kfree(rdev);  	dev->dev_private = NULL;  } diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 5a4697f636f2..c8e949f4a568 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -70,6 +70,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,  	entity->guilty = guilty;  	entity->num_sched_list = num_sched_list;  	entity->priority = priority; +	entity->last_user = current->group_leader;  	/*  	 * It's perfectly valid to initialize an entity without having a valid  	 * scheduler attached. It's just not valid to use the scheduler before it @@ -302,7 +303,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)  	/* For a killed process disallow further enqueueing of jobs. */  	last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); -	if ((!last_user || last_user == current->group_leader) && +	if (last_user == current->group_leader &&  	    (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))  		drm_sched_entity_kill(entity); @@ -552,10 +553,11 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)  		drm_sched_rq_remove_entity(entity->rq, entity);  		entity->rq = rq;  	} -	spin_unlock(&entity->lock);  	if (entity->num_sched_list == 1)  		entity->sched_list = NULL; + +	spin_unlock(&entity->lock);  }  /** diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 3e0ad7e5b5df..6d3db5e55d98 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -813,12 +813,16 @@ static int gt_reset(struct xe_gt *gt)  	unsigned int fw_ref;  	int err; -	if (xe_device_wedged(gt_to_xe(gt))) -		return -ECANCELED; +	if (xe_device_wedged(gt_to_xe(gt))) { +		err = -ECANCELED; +		goto err_pm_put; +	}  	/* We only support GT resets with GuC submission */ -	if (!xe_device_uc_enabled(gt_to_xe(gt))) -		return -ENODEV; +	if (!xe_device_uc_enabled(gt_to_xe(gt))) { +		err = -ENODEV; +		goto err_pm_put; +	}  	xe_gt_info(gt, "reset started\n"); @@ -826,8 +830,6 @@ static int gt_reset(struct xe_gt *gt)  	if (!err)  		xe_gt_warn(gt, "reset block failed to get lifted"); -	xe_pm_runtime_get(gt_to_xe(gt)); -  	if (xe_fault_inject_gt_reset()) {  		err = -ECANCELED;  		goto err_fail; @@ -874,6 +876,7 @@ err_fail:  	xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));  	xe_device_declare_wedged(gt_to_xe(gt)); +err_pm_put:  	xe_pm_runtime_put(gt_to_xe(gt));  	return err; @@ -895,7 +898,9 @@ void xe_gt_reset_async(struct xe_gt *gt)  		return;  	xe_gt_info(gt, "reset queued\n"); -	queue_work(gt->ordered_wq, >->reset.worker); +	xe_pm_runtime_get_noresume(gt_to_xe(gt)); +	if (!queue_work(gt->ordered_wq, >->reset.worker)) +		xe_pm_runtime_put(gt_to_xe(gt));  }  void xe_gt_suspend_prepare(struct xe_gt *gt) diff --git a/drivers/gpu/drm/xe/xe_validation.h b/drivers/gpu/drm/xe/xe_validation.h index fec331d791e7..b2d09c596714 100644 --- a/drivers/gpu/drm/xe/xe_validation.h +++ b/drivers/gpu/drm/xe/xe_validation.h @@ -166,10 +166,10 @@ xe_validation_device_init(struct xe_validation_device *val)   */  DEFINE_CLASS(xe_validation, struct xe_validation_ctx *,  	     if (_T) xe_validation_ctx_fini(_T);, -	     ({_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags); -	       _ret ? NULL : _ctx; }), +	     ({*_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags); +	       *_ret ? NULL : _ctx; }),  	     struct xe_validation_ctx *_ctx, struct xe_validation_device *_val, -	     struct drm_exec *_exec, const struct xe_val_flags _flags, int _ret); +	     struct drm_exec *_exec, const struct xe_val_flags _flags, int *_ret);  static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T)  {return *_T; }  #define class_xe_validation_is_conditional true @@ -186,7 +186,7 @@ static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T)   * exhaustive eviction.   */  #define xe_validation_guard(_ctx, _val, _exec, _flags, _ret)		\ -	scoped_guard(xe_validation, _ctx, _val, _exec, _flags, _ret) \ +	scoped_guard(xe_validation, _ctx, _val, _exec, _flags, &_ret) \  	drm_exec_until_all_locked(_exec)  #endif diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index c916176bd9f0..72fb675a696f 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1042,7 +1042,7 @@ static blk_status_t nvme_map_data(struct request *req)  	return nvme_pci_setup_data_prp(req, &iter);  } -static blk_status_t nvme_pci_setup_meta_sgls(struct request *req) +static blk_status_t nvme_pci_setup_meta_iter(struct request *req)  {  	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;  	unsigned int entries = req->nr_integrity_segments; @@ -1072,8 +1072,12 @@ static blk_status_t nvme_pci_setup_meta_sgls(struct request *req)  	 * descriptor provides an explicit length, so we're relying on that  	 * mechanism to catch any misunderstandings between the application and  	 * device. +	 * +	 * P2P DMA also needs to use the blk_dma_iter method, so mptr setup +	 * leverages this routine when that happens.  	 */ -	if (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD)) { +	if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl) || +	    (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD))) {  		iod->cmd.common.metadata = cpu_to_le64(iter.addr);  		iod->meta_total_len = iter.len;  		iod->meta_dma = iter.addr; @@ -1114,6 +1118,9 @@ static blk_status_t nvme_pci_setup_meta_mptr(struct request *req)  	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;  	struct bio_vec bv = rq_integrity_vec(req); +	if (is_pci_p2pdma_page(bv.bv_page)) +		return nvme_pci_setup_meta_iter(req); +  	iod->meta_dma = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0);  	if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma))  		return BLK_STS_IOERR; @@ -1128,7 +1135,7 @@ static blk_status_t nvme_map_metadata(struct request *req)  	if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) &&  	    nvme_pci_metadata_use_sgls(req)) -		return nvme_pci_setup_meta_sgls(req); +		return nvme_pci_setup_meta_iter(req);  	return nvme_pci_setup_meta_mptr(req);  } diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c index b340380f3892..ceba21684e82 100644 --- a/drivers/nvme/target/auth.c +++ b/drivers/nvme/target/auth.c @@ -298,7 +298,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,  	const char *hash_name;  	u8 *challenge = req->sq->dhchap_c1;  	struct nvme_dhchap_key *transformed_key; -	u8 buf[4]; +	u8 buf[4], sc_c = ctrl->concat ? 1 : 0;  	int ret;  	hash_name = nvme_auth_hmac_name(ctrl->shash_id); @@ -367,13 +367,14 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,  	ret = crypto_shash_update(shash, buf, 2);  	if (ret)  		goto out; -	memset(buf, 0, 4); +	*buf = sc_c;  	ret = crypto_shash_update(shash, buf, 1);  	if (ret)  		goto out;  	ret = crypto_shash_update(shash, "HostHost", 8);  	if (ret)  		goto out; +	memset(buf, 0, 4);  	ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));  	if (ret)  		goto out; diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 6948824642dc..c48a20602d7f 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -247,6 +247,7 @@ struct qcom_pcie_ops {  	int (*get_resources)(struct qcom_pcie *pcie);  	int (*init)(struct qcom_pcie *pcie);  	int (*post_init)(struct qcom_pcie *pcie); +	void (*host_post_init)(struct qcom_pcie *pcie);  	void (*deinit)(struct qcom_pcie *pcie);  	void (*ltssm_enable)(struct qcom_pcie *pcie);  	int (*config_sid)(struct qcom_pcie *pcie); @@ -1038,6 +1039,25 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)  	return 0;  } +static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata) +{ +	/* +	 * Downstream devices need to be in D0 state before enabling PCI PM +	 * substates. +	 */ +	pci_set_power_state_locked(pdev, PCI_D0); +	pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL); + +	return 0; +} + +static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie) +{ +	struct dw_pcie_rp *pp = &pcie->pci->pp; + +	pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL); +} +  static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)  {  	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; @@ -1312,9 +1332,19 @@ static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)  	pcie->cfg->ops->deinit(pcie);  } +static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp) +{ +	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	struct qcom_pcie *pcie = to_qcom_pcie(pci); + +	if (pcie->cfg->ops->host_post_init) +		pcie->cfg->ops->host_post_init(pcie); +} +  static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {  	.init		= qcom_pcie_host_init,  	.deinit		= qcom_pcie_host_deinit, +	.post_init	= qcom_pcie_host_post_init,  };  /* Qcom IP rev.: 2.1.0	Synopsys IP rev.: 4.01a */ @@ -1376,6 +1406,7 @@ static const struct qcom_pcie_ops ops_1_9_0 = {  	.get_resources = qcom_pcie_get_resources_2_7_0,  	.init = qcom_pcie_init_2_7_0,  	.post_init = qcom_pcie_post_init_2_7_0, +	.host_post_init = qcom_pcie_host_post_init_2_7_0,  	.deinit = qcom_pcie_deinit_2_7_0,  	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,  	.config_sid = qcom_pcie_config_sid_1_9_0, @@ -1386,6 +1417,7 @@ static const struct qcom_pcie_ops ops_1_21_0 = {  	.get_resources = qcom_pcie_get_resources_2_7_0,  	.init = qcom_pcie_init_2_7_0,  	.post_init = qcom_pcie_post_init_2_7_0, +	.host_post_init = qcom_pcie_host_post_init_2_7_0,  	.deinit = qcom_pcie_deinit_2_7_0,  	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,  }; diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 4a8735b275e4..3645f392a9fd 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -1604,7 +1604,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)  		pbus_size_io(bus, realloc_head ? 0 : additional_io_size,  			     additional_io_size, realloc_head); -		if (pref) { +		if (pref && (pref->flags & IORESOURCE_PREFETCH)) {  			pbus_size_mem(bus,  				      IORESOURCE_MEM | IORESOURCE_PREFETCH |  				      (pref->flags & IORESOURCE_MEM_64), diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c index 022d98f3c32a..ea9c4058ee6a 100644 --- a/drivers/regulator/bd718x7-regulator.c +++ b/drivers/regulator/bd718x7-regulator.c @@ -1613,6 +1613,8 @@ static int setup_feedback_loop(struct device *dev, struct device_node *np,  				step /= r1;  				new[j].min = min; +				new[j].min_sel = desc->linear_ranges[j].min_sel; +				new[j].max_sel = desc->linear_ranges[j].max_sel;  				new[j].step = step;  				dev_dbg(dev, "%s: old range min %d, step %d\n", diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c index 7765fb27c37c..b8c572394aac 100644 --- a/drivers/spi/spi-intel-pci.c +++ b/drivers/spi/spi-intel-pci.c @@ -80,6 +80,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {  	{ PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info },  	{ PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info },  	{ PCI_VDEVICE(INTEL, 0x5794), (unsigned long)&cnl_info }, +	{ PCI_VDEVICE(INTEL, 0x5825), (unsigned long)&cnl_info },  	{ PCI_VDEVICE(INTEL, 0x7723), (unsigned long)&cnl_info },  	{ PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },  	{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info }, diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 916cad80941c..5167bec14e36 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -38,6 +38,7 @@  #include <linux/workqueue.h>  #include <linux/notifier.h>  #include <linux/mm_inline.h> +#include <linux/overflow.h>  #include "vfio.h"  #define DRIVER_VERSION  "0.2" @@ -167,12 +168,14 @@ static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,  {  	struct rb_node *node = iommu->dma_list.rb_node; +	WARN_ON(!size); +  	while (node) {  		struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); -		if (start + size <= dma->iova) +		if (start + size - 1 < dma->iova)  			node = node->rb_left; -		else if (start >= dma->iova + dma->size) +		else if (start > dma->iova + dma->size - 1)  			node = node->rb_right;  		else  			return dma; @@ -182,16 +185,19 @@ static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,  }  static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu, -						dma_addr_t start, u64 size) +						dma_addr_t start, +						dma_addr_t end)  {  	struct rb_node *res = NULL;  	struct rb_node *node = iommu->dma_list.rb_node;  	struct vfio_dma *dma_res = NULL; +	WARN_ON(end < start); +  	while (node) {  		struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); -		if (start < dma->iova + dma->size) { +		if (start <= dma->iova + dma->size - 1) {  			res = node;  			dma_res = dma;  			if (start >= dma->iova) @@ -201,7 +207,7 @@ static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu,  			node = node->rb_right;  		}  	} -	if (res && size && dma_res->iova >= start + size) +	if (res && dma_res->iova > end)  		res = NULL;  	return res;  } @@ -211,11 +217,13 @@ static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new)  	struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL;  	struct vfio_dma *dma; +	WARN_ON(new->size != 0); +  	while (*link) {  		parent = *link;  		dma = rb_entry(parent, struct vfio_dma, node); -		if (new->iova + new->size <= dma->iova) +		if (new->iova <= dma->iova)  			link = &(*link)->rb_left;  		else  			link = &(*link)->rb_right; @@ -895,14 +903,20 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,  	unsigned long remote_vaddr;  	struct vfio_dma *dma;  	bool do_accounting; +	dma_addr_t iova_end; +	size_t iova_size; -	if (!iommu || !pages) +	if (!iommu || !pages || npage <= 0)  		return -EINVAL;  	/* Supported for v2 version only */  	if (!iommu->v2)  		return -EACCES; +	if (check_mul_overflow(npage, PAGE_SIZE, &iova_size) || +	    check_add_overflow(user_iova, iova_size - 1, &iova_end)) +		return -EOVERFLOW; +  	mutex_lock(&iommu->lock);  	if (WARN_ONCE(iommu->vaddr_invalid_count, @@ -1008,12 +1022,21 @@ static void vfio_iommu_type1_unpin_pages(void *iommu_data,  {  	struct vfio_iommu *iommu = iommu_data;  	bool do_accounting; +	dma_addr_t iova_end; +	size_t iova_size;  	int i;  	/* Supported for v2 version only */  	if (WARN_ON(!iommu->v2))  		return; +	if (WARN_ON(npage <= 0)) +		return; + +	if (WARN_ON(check_mul_overflow(npage, PAGE_SIZE, &iova_size) || +		    check_add_overflow(user_iova, iova_size - 1, &iova_end))) +		return; +  	mutex_lock(&iommu->lock);  	do_accounting = list_empty(&iommu->domain_list); @@ -1067,7 +1090,7 @@ static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,  #define VFIO_IOMMU_TLB_SYNC_MAX		512  static size_t unmap_unpin_fast(struct vfio_domain *domain, -			       struct vfio_dma *dma, dma_addr_t *iova, +			       struct vfio_dma *dma, dma_addr_t iova,  			       size_t len, phys_addr_t phys, long *unlocked,  			       struct list_head *unmapped_list,  			       int *unmapped_cnt, @@ -1077,18 +1100,17 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain,  	struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL);  	if (entry) { -		unmapped = iommu_unmap_fast(domain->domain, *iova, len, +		unmapped = iommu_unmap_fast(domain->domain, iova, len,  					    iotlb_gather);  		if (!unmapped) {  			kfree(entry);  		} else { -			entry->iova = *iova; +			entry->iova = iova;  			entry->phys = phys;  			entry->len  = unmapped;  			list_add_tail(&entry->list, unmapped_list); -			*iova += unmapped;  			(*unmapped_cnt)++;  		}  	} @@ -1107,18 +1129,17 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain,  }  static size_t unmap_unpin_slow(struct vfio_domain *domain, -			       struct vfio_dma *dma, dma_addr_t *iova, +			       struct vfio_dma *dma, dma_addr_t iova,  			       size_t len, phys_addr_t phys,  			       long *unlocked)  { -	size_t unmapped = iommu_unmap(domain->domain, *iova, len); +	size_t unmapped = iommu_unmap(domain->domain, iova, len);  	if (unmapped) { -		*unlocked += vfio_unpin_pages_remote(dma, *iova, +		*unlocked += vfio_unpin_pages_remote(dma, iova,  						     phys >> PAGE_SHIFT,  						     unmapped >> PAGE_SHIFT,  						     false); -		*iova += unmapped;  		cond_resched();  	}  	return unmapped; @@ -1127,12 +1148,12 @@ static size_t unmap_unpin_slow(struct vfio_domain *domain,  static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,  			     bool do_accounting)  { -	dma_addr_t iova = dma->iova, end = dma->iova + dma->size;  	struct vfio_domain *domain, *d;  	LIST_HEAD(unmapped_region_list);  	struct iommu_iotlb_gather iotlb_gather;  	int unmapped_region_cnt = 0;  	long unlocked = 0; +	size_t pos = 0;  	if (!dma->size)  		return 0; @@ -1156,13 +1177,14 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,  	}  	iommu_iotlb_gather_init(&iotlb_gather); -	while (iova < end) { +	while (pos < dma->size) {  		size_t unmapped, len;  		phys_addr_t phys, next; +		dma_addr_t iova = dma->iova + pos;  		phys = iommu_iova_to_phys(domain->domain, iova);  		if (WARN_ON(!phys)) { -			iova += PAGE_SIZE; +			pos += PAGE_SIZE;  			continue;  		} @@ -1171,7 +1193,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,  		 * may require hardware cache flushing, try to find the  		 * largest contiguous physical memory chunk to unmap.  		 */ -		for (len = PAGE_SIZE; iova + len < end; len += PAGE_SIZE) { +		for (len = PAGE_SIZE; pos + len < dma->size; len += PAGE_SIZE) {  			next = iommu_iova_to_phys(domain->domain, iova + len);  			if (next != phys + len)  				break; @@ -1181,16 +1203,18 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,  		 * First, try to use fast unmap/unpin. In case of failure,  		 * switch to slow unmap/unpin path.  		 */ -		unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys, +		unmapped = unmap_unpin_fast(domain, dma, iova, len, phys,  					    &unlocked, &unmapped_region_list,  					    &unmapped_region_cnt,  					    &iotlb_gather);  		if (!unmapped) { -			unmapped = unmap_unpin_slow(domain, dma, &iova, len, +			unmapped = unmap_unpin_slow(domain, dma, iova, len,  						    phys, &unlocked);  			if (WARN_ON(!unmapped))  				break;  		} + +		pos += unmapped;  	}  	dma->iommu_mapped = false; @@ -1282,7 +1306,7 @@ static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,  }  static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, -				  dma_addr_t iova, size_t size, size_t pgsize) +				  dma_addr_t iova, dma_addr_t iova_end, size_t pgsize)  {  	struct vfio_dma *dma;  	struct rb_node *n; @@ -1299,8 +1323,8 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,  	if (dma && dma->iova != iova)  		return -EINVAL; -	dma = vfio_find_dma(iommu, iova + size - 1, 0); -	if (dma && dma->iova + dma->size != iova + size) +	dma = vfio_find_dma(iommu, iova_end, 1); +	if (dma && dma->iova + dma->size - 1 != iova_end)  		return -EINVAL;  	for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { @@ -1309,7 +1333,7 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,  		if (dma->iova < iova)  			continue; -		if (dma->iova > iova + size - 1) +		if (dma->iova > iova_end)  			break;  		ret = update_user_bitmap(bitmap, iommu, dma, iova, pgsize); @@ -1374,7 +1398,8 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,  	int ret = -EINVAL, retries = 0;  	unsigned long pgshift;  	dma_addr_t iova = unmap->iova; -	u64 size = unmap->size; +	dma_addr_t iova_end; +	size_t size = unmap->size;  	bool unmap_all = unmap->flags & VFIO_DMA_UNMAP_FLAG_ALL;  	bool invalidate_vaddr = unmap->flags & VFIO_DMA_UNMAP_FLAG_VADDR;  	struct rb_node *n, *first_n; @@ -1387,6 +1412,11 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,  		goto unlock;  	} +	if (iova != unmap->iova || size != unmap->size) { +		ret = -EOVERFLOW; +		goto unlock; +	} +  	pgshift = __ffs(iommu->pgsize_bitmap);  	pgsize = (size_t)1 << pgshift; @@ -1396,10 +1426,15 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,  	if (unmap_all) {  		if (iova || size)  			goto unlock; -		size = U64_MAX; -	} else if (!size || size & (pgsize - 1) || -		   iova + size - 1 < iova || size > SIZE_MAX) { -		goto unlock; +		iova_end = ~(dma_addr_t)0; +	} else { +		if (!size || size & (pgsize - 1)) +			goto unlock; + +		if (check_add_overflow(iova, size - 1, &iova_end)) { +			ret = -EOVERFLOW; +			goto unlock; +		}  	}  	/* When dirty tracking is enabled, allow only min supported pgsize */ @@ -1446,17 +1481,17 @@ again:  		if (dma && dma->iova != iova)  			goto unlock; -		dma = vfio_find_dma(iommu, iova + size - 1, 0); -		if (dma && dma->iova + dma->size != iova + size) +		dma = vfio_find_dma(iommu, iova_end, 1); +		if (dma && dma->iova + dma->size - 1 != iova_end)  			goto unlock;  	}  	ret = 0; -	n = first_n = vfio_find_dma_first_node(iommu, iova, size); +	n = first_n = vfio_find_dma_first_node(iommu, iova, iova_end);  	while (n) {  		dma = rb_entry(n, struct vfio_dma, node); -		if (dma->iova >= iova + size) +		if (dma->iova > iova_end)  			break;  		if (!iommu->v2 && iova > dma->iova) @@ -1648,7 +1683,9 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,  {  	bool set_vaddr = map->flags & VFIO_DMA_MAP_FLAG_VADDR;  	dma_addr_t iova = map->iova; +	dma_addr_t iova_end;  	unsigned long vaddr = map->vaddr; +	unsigned long vaddr_end;  	size_t size = map->size;  	int ret = 0, prot = 0;  	size_t pgsize; @@ -1656,8 +1693,15 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,  	/* Verify that none of our __u64 fields overflow */  	if (map->size != size || map->vaddr != vaddr || map->iova != iova) +		return -EOVERFLOW; + +	if (!size)  		return -EINVAL; +	if (check_add_overflow(iova, size - 1, &iova_end) || +	    check_add_overflow(vaddr, size - 1, &vaddr_end)) +		return -EOVERFLOW; +  	/* READ/WRITE from device perspective */  	if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)  		prot |= IOMMU_WRITE; @@ -1673,13 +1717,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,  	WARN_ON((pgsize - 1) & PAGE_MASK); -	if (!size || (size | iova | vaddr) & (pgsize - 1)) { -		ret = -EINVAL; -		goto out_unlock; -	} - -	/* Don't allow IOVA or virtual address wrap */ -	if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) { +	if ((size | iova | vaddr) & (pgsize - 1)) {  		ret = -EINVAL;  		goto out_unlock;  	} @@ -1710,7 +1748,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,  		goto out_unlock;  	} -	if (!vfio_iommu_iova_dma_valid(iommu, iova, iova + size - 1)) { +	if (!vfio_iommu_iova_dma_valid(iommu, iova, iova_end)) {  		ret = -EINVAL;  		goto out_unlock;  	} @@ -1783,12 +1821,12 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,  	for (; n; n = rb_next(n)) {  		struct vfio_dma *dma; -		dma_addr_t iova; +		size_t pos = 0;  		dma = rb_entry(n, struct vfio_dma, node); -		iova = dma->iova; -		while (iova < dma->iova + dma->size) { +		while (pos < dma->size) { +			dma_addr_t iova = dma->iova + pos;  			phys_addr_t phys;  			size_t size; @@ -1804,14 +1842,14 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,  				phys = iommu_iova_to_phys(d->domain, iova);  				if (WARN_ON(!phys)) { -					iova += PAGE_SIZE; +					pos += PAGE_SIZE;  					continue;  				}  				size = PAGE_SIZE;  				p = phys + size;  				i = iova + size; -				while (i < dma->iova + dma->size && +				while (pos + size < dma->size &&  				       p == iommu_iova_to_phys(d->domain, i)) {  					size += PAGE_SIZE;  					p += PAGE_SIZE; @@ -1819,9 +1857,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,  				}  			} else {  				unsigned long pfn; -				unsigned long vaddr = dma->vaddr + -						     (iova - dma->iova); -				size_t n = dma->iova + dma->size - iova; +				unsigned long vaddr = dma->vaddr + pos; +				size_t n = dma->size - pos;  				long npage;  				npage = vfio_pin_pages_remote(dma, vaddr, @@ -1852,7 +1889,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,  				goto unwind;  			} -			iova += size; +			pos += size;  		}  	} @@ -1869,29 +1906,29 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,  unwind:  	for (; n; n = rb_prev(n)) {  		struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); -		dma_addr_t iova; +		size_t pos = 0;  		if (dma->iommu_mapped) {  			iommu_unmap(domain->domain, dma->iova, dma->size);  			continue;  		} -		iova = dma->iova; -		while (iova < dma->iova + dma->size) { +		while (pos < dma->size) { +			dma_addr_t iova = dma->iova + pos;  			phys_addr_t phys, p;  			size_t size;  			dma_addr_t i;  			phys = iommu_iova_to_phys(domain->domain, iova);  			if (!phys) { -				iova += PAGE_SIZE; +				pos += PAGE_SIZE;  				continue;  			}  			size = PAGE_SIZE;  			p = phys + size;  			i = iova + size; -			while (i < dma->iova + dma->size && +			while (pos + size < dma->size &&  			       p == iommu_iova_to_phys(domain->domain, i)) {  				size += PAGE_SIZE;  				p += PAGE_SIZE; @@ -2977,7 +3014,8 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,  		struct vfio_iommu_type1_dirty_bitmap_get range;  		unsigned long pgshift;  		size_t data_size = dirty.argsz - minsz; -		size_t iommu_pgsize; +		size_t size, iommu_pgsize; +		dma_addr_t iova, iova_end;  		if (!data_size || data_size < sizeof(range))  			return -EINVAL; @@ -2986,14 +3024,24 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,  				   sizeof(range)))  			return -EFAULT; -		if (range.iova + range.size < range.iova) +		iova = range.iova; +		size = range.size; + +		if (iova != range.iova || size != range.size) +			return -EOVERFLOW; + +		if (!size)  			return -EINVAL; + +		if (check_add_overflow(iova, size - 1, &iova_end)) +			return -EOVERFLOW; +  		if (!access_ok((void __user *)range.bitmap.data,  			       range.bitmap.size))  			return -EINVAL;  		pgshift = __ffs(range.bitmap.pgsize); -		ret = verify_bitmap_size(range.size >> pgshift, +		ret = verify_bitmap_size(size >> pgshift,  					 range.bitmap.size);  		if (ret)  			return ret; @@ -3007,19 +3055,18 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,  			ret = -EINVAL;  			goto out_unlock;  		} -		if (range.iova & (iommu_pgsize - 1)) { +		if (iova & (iommu_pgsize - 1)) {  			ret = -EINVAL;  			goto out_unlock;  		} -		if (!range.size || range.size & (iommu_pgsize - 1)) { +		if (size & (iommu_pgsize - 1)) {  			ret = -EINVAL;  			goto out_unlock;  		}  		if (iommu->dirty_page_tracking)  			ret = vfio_iova_dirty_bitmap(range.bitmap.data, -						     iommu, range.iova, -						     range.size, +						     iommu, iova, iova_end,  						     range.bitmap.pgsize);  		else  			ret = -EINVAL; diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c index 4f959f1e08d2..185ac41bd7e9 100644 --- a/fs/smb/client/cifsfs.c +++ b/fs/smb/client/cifsfs.c @@ -173,7 +173,7 @@ module_param(enable_oplocks, bool, 0644);  MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");  module_param(enable_gcm_256, bool, 0644); -MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0"); +MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/1");  module_param(require_gcm_256, bool, 0644);  MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0"); diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h index fb1813cbe0eb..3528c365a452 100644 --- a/fs/smb/client/cifsproto.h +++ b/fs/smb/client/cifsproto.h @@ -616,6 +616,8 @@ extern int E_md4hash(const unsigned char *passwd, unsigned char *p16,  extern struct TCP_Server_Info *  cifs_find_tcp_session(struct smb3_fs_context *ctx); +struct cifs_tcon *cifs_setup_ipc(struct cifs_ses *ses, bool seal); +  void __cifs_put_smb_ses(struct cifs_ses *ses);  extern struct cifs_ses * diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c index dd12f3eb61dc..55cb4b0cbd48 100644 --- a/fs/smb/client/connect.c +++ b/fs/smb/client/connect.c @@ -310,6 +310,8 @@ cifs_abort_connection(struct TCP_Server_Info *server)  			 server->ssocket->flags);  		sock_release(server->ssocket);  		server->ssocket = NULL; +	} else if (cifs_rdma_enabled(server)) { +		smbd_destroy(server);  	}  	server->sequence_number = 0;  	server->session_estab = false; @@ -338,12 +340,6 @@ cifs_abort_connection(struct TCP_Server_Info *server)  		mid_execute_callback(mid);  		release_mid(mid);  	} - -	if (cifs_rdma_enabled(server)) { -		cifs_server_lock(server); -		smbd_destroy(server); -		cifs_server_unlock(server); -	}  }  static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets) @@ -2015,39 +2011,31 @@ static int match_session(struct cifs_ses *ses,  /**   * cifs_setup_ipc - helper to setup the IPC tcon for the session   * @ses: smb session to issue the request on - * @ctx: the superblock configuration context to use for building the - *       new tree connection for the IPC (interprocess communication RPC) + * @seal: if encryption is requested   *   * A new IPC connection is made and stored in the session   * tcon_ipc. The IPC tcon has the same lifetime as the session.   */ -static int -cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx) +struct cifs_tcon *cifs_setup_ipc(struct cifs_ses *ses, bool seal)  {  	int rc = 0, xid;  	struct cifs_tcon *tcon;  	char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0}; -	bool seal = false;  	struct TCP_Server_Info *server = ses->server;  	/*  	 * If the mount request that resulted in the creation of the  	 * session requires encryption, force IPC to be encrypted too.  	 */ -	if (ctx->seal) { -		if (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) -			seal = true; -		else { -			cifs_server_dbg(VFS, -				 "IPC: server doesn't support encryption\n"); -			return -EOPNOTSUPP; -		} +	if (seal && !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) { +		cifs_server_dbg(VFS, "IPC: server doesn't support encryption\n"); +		return ERR_PTR(-EOPNOTSUPP);  	}  	/* no need to setup directory caching on IPC share, so pass in false */  	tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_ipc);  	if (tcon == NULL) -		return -ENOMEM; +		return ERR_PTR(-ENOMEM);  	spin_lock(&server->srv_lock);  	scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname); @@ -2057,13 +2045,13 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)  	tcon->ses = ses;  	tcon->ipc = true;  	tcon->seal = seal; -	rc = server->ops->tree_connect(xid, ses, unc, tcon, ctx->local_nls); +	rc = server->ops->tree_connect(xid, ses, unc, tcon, ses->local_nls);  	free_xid(xid);  	if (rc) { -		cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc); +		cifs_server_dbg(VFS | ONCE, "failed to connect to IPC (rc=%d)\n", rc);  		tconInfoFree(tcon, netfs_trace_tcon_ref_free_ipc_fail); -		goto out; +		return ERR_PTR(rc);  	}  	cifs_dbg(FYI, "IPC tcon rc=%d ipc tid=0x%x\n", rc, tcon->tid); @@ -2071,9 +2059,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)  	spin_lock(&tcon->tc_lock);  	tcon->status = TID_GOOD;  	spin_unlock(&tcon->tc_lock); -	ses->tcon_ipc = tcon; -out: -	return rc; +	return tcon;  }  static struct cifs_ses * @@ -2347,6 +2333,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)  {  	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;  	struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; +	struct cifs_tcon *ipc;  	struct cifs_ses *ses;  	unsigned int xid;  	int retries = 0; @@ -2525,7 +2512,12 @@ retry_new_session:  	list_add(&ses->smb_ses_list, &server->smb_ses_list);  	spin_unlock(&cifs_tcp_ses_lock); -	cifs_setup_ipc(ses, ctx); +	ipc = cifs_setup_ipc(ses, ctx->seal); +	spin_lock(&cifs_tcp_ses_lock); +	spin_lock(&ses->ses_lock); +	ses->tcon_ipc = !IS_ERR(ipc) ? ipc : NULL; +	spin_unlock(&ses->ses_lock); +	spin_unlock(&cifs_tcp_ses_lock);  	free_xid(xid); diff --git a/fs/smb/client/dfs_cache.c b/fs/smb/client/dfs_cache.c index 4dada26d56b5..f2ad0ccd08a7 100644 --- a/fs/smb/client/dfs_cache.c +++ b/fs/smb/client/dfs_cache.c @@ -1120,24 +1120,63 @@ static bool target_share_equal(struct cifs_tcon *tcon, const char *s1)  	return match;  } -static bool is_ses_good(struct cifs_ses *ses) +static bool is_ses_good(struct cifs_tcon *tcon, struct cifs_ses *ses)  {  	struct TCP_Server_Info *server = ses->server; -	struct cifs_tcon *tcon = ses->tcon_ipc; +	struct cifs_tcon *ipc = NULL;  	bool ret; +	spin_lock(&cifs_tcp_ses_lock);  	spin_lock(&ses->ses_lock);  	spin_lock(&ses->chan_lock); +  	ret = !cifs_chan_needs_reconnect(ses, server) && -		ses->ses_status == SES_GOOD && -		!tcon->need_reconnect; +		ses->ses_status == SES_GOOD; +  	spin_unlock(&ses->chan_lock); + +	if (!ret) +		goto out; + +	if (likely(ses->tcon_ipc)) { +		if (ses->tcon_ipc->need_reconnect) { +			ret = false; +			goto out; +		} +	} else { +		spin_unlock(&ses->ses_lock); +		spin_unlock(&cifs_tcp_ses_lock); + +		ipc = cifs_setup_ipc(ses, tcon->seal); + +		spin_lock(&cifs_tcp_ses_lock); +		spin_lock(&ses->ses_lock); +		if (!IS_ERR(ipc)) { +			if (!ses->tcon_ipc) { +				ses->tcon_ipc = ipc; +				ipc = NULL; +			} +		} else { +			ret = false; +			ipc = NULL; +		} +	} + +out:  	spin_unlock(&ses->ses_lock); +	spin_unlock(&cifs_tcp_ses_lock); +	if (ipc && server->ops->tree_disconnect) { +		unsigned int xid = get_xid(); + +		(void)server->ops->tree_disconnect(xid, ipc); +		_free_xid(xid); +	} +	tconInfoFree(ipc, netfs_trace_tcon_ref_free_ipc);  	return ret;  }  /* Refresh dfs referral of @ses */ -static void refresh_ses_referral(struct cifs_ses *ses) +static void refresh_ses_referral(struct cifs_tcon *tcon, struct cifs_ses *ses)  {  	struct cache_entry *ce;  	unsigned int xid; @@ -1153,7 +1192,7 @@ static void refresh_ses_referral(struct cifs_ses *ses)  	}  	ses = CIFS_DFS_ROOT_SES(ses); -	if (!is_ses_good(ses)) { +	if (!is_ses_good(tcon, ses)) {  		cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",  			 __func__);  		goto out; @@ -1241,7 +1280,7 @@ static void refresh_tcon_referral(struct cifs_tcon *tcon, bool force_refresh)  	up_read(&htable_rw_lock);  	ses = CIFS_DFS_ROOT_SES(ses); -	if (!is_ses_good(ses)) { +	if (!is_ses_good(tcon, ses)) {  		cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",  			 __func__);  		goto out; @@ -1309,7 +1348,7 @@ void dfs_cache_refresh(struct work_struct *work)  	tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);  	list_for_each_entry(ses, &tcon->dfs_ses_list, dlist) -		refresh_ses_referral(ses); +		refresh_ses_referral(tcon, ses);  	refresh_tcon_referral(tcon, false);  	queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work, diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index 0f9130ef2e7d..1e39f2165e42 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -2799,11 +2799,12 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,  	struct cifs_fid fid;  	int rc;  	__le16 *utf16_path; -	struct cached_fid *cfid = NULL; +	struct cached_fid *cfid;  	int retries = 0, cur_sleep = 1;  replay_again:  	/* reinitialize for possible replay */ +	cfid = NULL;  	flags = CIFS_CP_CREATE_CLOSE_OP;  	oplock = SMB2_OPLOCK_LEVEL_NONE;  	server = cifs_pick_channel(ses); diff --git a/fs/xfs/libxfs/xfs_rtgroup.h b/fs/xfs/libxfs/xfs_rtgroup.h index d36a6ae0abe5..d4fcf591e63d 100644 --- a/fs/xfs/libxfs/xfs_rtgroup.h +++ b/fs/xfs/libxfs/xfs_rtgroup.h @@ -50,6 +50,12 @@ struct xfs_rtgroup {  		uint8_t			*rtg_rsum_cache;  		struct xfs_open_zone	*rtg_open_zone;  	}; + +	/* +	 * Count of outstanding GC operations for zoned XFS.  Any RTG with a +	 * non-zero rtg_gccount will not be picked as new GC victim. +	 */ +	atomic_t		rtg_gccount;  };  /* diff --git a/fs/xfs/xfs_zone_alloc.c b/fs/xfs/xfs_zone_alloc.c index 23cdab4515bb..040402240807 100644 --- a/fs/xfs/xfs_zone_alloc.c +++ b/fs/xfs/xfs_zone_alloc.c @@ -246,6 +246,14 @@ xfs_zoned_map_extent(  	 * If a data write raced with this GC write, keep the existing data in  	 * the data fork, mark our newly written GC extent as reclaimable, then  	 * move on to the next extent. +	 * +	 * Note that this can also happen when racing with operations that do +	 * not actually invalidate the data, but just move it to a different +	 * inode (XFS_IOC_EXCHANGE_RANGE), or to a different offset inside the +	 * inode (FALLOC_FL_COLLAPSE_RANGE / FALLOC_FL_INSERT_RANGE).  If the +	 * data was just moved around, GC fails to free the zone, but the zone +	 * becomes a GC candidate again as soon as all previous GC I/O has +	 * finished and these blocks will be moved out eventually.  	 */  	if (old_startblock != NULLFSBLOCK &&  	    old_startblock != data.br_startblock) diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c index 109877d9a6bf..4ade54445532 100644 --- a/fs/xfs/xfs_zone_gc.c +++ b/fs/xfs/xfs_zone_gc.c @@ -114,6 +114,8 @@ struct xfs_gc_bio {  	/* Open Zone being written to */  	struct xfs_open_zone		*oz; +	struct xfs_rtgroup		*victim_rtg; +  	/* Bio used for reads and writes, including the bvec used by it */  	struct bio_vec			bv;  	struct bio			bio;	/* must be last */ @@ -264,6 +266,7 @@ xfs_zone_gc_iter_init(  	iter->rec_count = 0;  	iter->rec_idx = 0;  	iter->victim_rtg = victim_rtg; +	atomic_inc(&victim_rtg->rtg_gccount);  }  /* @@ -362,6 +365,7 @@ xfs_zone_gc_query(  	return 0;  done: +	atomic_dec(&iter->victim_rtg->rtg_gccount);  	xfs_rtgroup_rele(iter->victim_rtg);  	iter->victim_rtg = NULL;  	return 0; @@ -451,6 +455,20 @@ xfs_zone_gc_pick_victim_from(  		if (!rtg)  			continue; +		/* +		 * If the zone is already undergoing GC, don't pick it again. +		 * +		 * This prevents us from picking one of the zones for which we +		 * already submitted GC I/O, but for which the remapping hasn't +		 * concluded yet.  This won't cause data corruption, but +		 * increases write amplification and slows down GC, so this is +		 * a bad thing. +		 */ +		if (atomic_read(&rtg->rtg_gccount)) { +			xfs_rtgroup_rele(rtg); +			continue; +		} +  		/* skip zones that are just waiting for a reset */  		if (rtg_rmap(rtg)->i_used_blocks == 0 ||  		    rtg_rmap(rtg)->i_used_blocks >= victim_used) { @@ -688,6 +706,9 @@ xfs_zone_gc_start_chunk(  	chunk->scratch = &data->scratch[data->scratch_idx];  	chunk->data = data;  	chunk->oz = oz; +	chunk->victim_rtg = iter->victim_rtg; +	atomic_inc(&chunk->victim_rtg->rtg_group.xg_active_ref); +	atomic_inc(&chunk->victim_rtg->rtg_gccount);  	bio->bi_iter.bi_sector = xfs_rtb_to_daddr(mp, chunk->old_startblock);  	bio->bi_end_io = xfs_zone_gc_end_io; @@ -710,6 +731,8 @@ static void  xfs_zone_gc_free_chunk(  	struct xfs_gc_bio	*chunk)  { +	atomic_dec(&chunk->victim_rtg->rtg_gccount); +	xfs_rtgroup_rele(chunk->victim_rtg);  	list_del(&chunk->entry);  	xfs_open_zone_put(chunk->oz);  	xfs_irele(chunk->ip); @@ -770,6 +793,10 @@ xfs_zone_gc_split_write(  	split_chunk->oz = chunk->oz;  	atomic_inc(&chunk->oz->oz_ref); +	split_chunk->victim_rtg = chunk->victim_rtg; +	atomic_inc(&chunk->victim_rtg->rtg_group.xg_active_ref); +	atomic_inc(&chunk->victim_rtg->rtg_gccount); +  	chunk->offset += split_len;  	chunk->len -= split_len;  	chunk->old_startblock += XFS_B_TO_FSB(data->mp, split_len); diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 8a9a2e732a65..e04d56a5332e 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -832,7 +832,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)  /* Required sections not related to debugging. */  #define ELF_DETAILS							\ -		.modinfo : { *(.modinfo) }				\ +		.modinfo : { *(.modinfo) . = ALIGN(8); }		\  		.comment 0 : { *(.comment) }				\  		.symtab 0 : { *(.symtab) }				\  		.strtab 0 : { *(.strtab) }				\ diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 8e8d1cc8b06c..44c30183ecc3 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -341,15 +341,15 @@ enum req_op {  	/* write the zero filled sector many times */  	REQ_OP_WRITE_ZEROES	= (__force blk_opf_t)9,  	/* Open a zone */ -	REQ_OP_ZONE_OPEN	= (__force blk_opf_t)10, +	REQ_OP_ZONE_OPEN	= (__force blk_opf_t)11,  	/* Close a zone */ -	REQ_OP_ZONE_CLOSE	= (__force blk_opf_t)11, +	REQ_OP_ZONE_CLOSE	= (__force blk_opf_t)13,  	/* Transition a zone to full */ -	REQ_OP_ZONE_FINISH	= (__force blk_opf_t)13, +	REQ_OP_ZONE_FINISH	= (__force blk_opf_t)15,  	/* reset a zone write pointer */ -	REQ_OP_ZONE_RESET	= (__force blk_opf_t)15, +	REQ_OP_ZONE_RESET	= (__force blk_opf_t)17,  	/* reset all the zone present on the device */ -	REQ_OP_ZONE_RESET_ALL	= (__force blk_opf_t)17, +	REQ_OP_ZONE_RESET_ALL	= (__force blk_opf_t)19,  	/* Driver private requests */  	REQ_OP_DRV_IN		= (__force blk_opf_t)34, @@ -478,6 +478,7 @@ static inline bool op_is_zone_mgmt(enum req_op op)  {  	switch (op & REQ_OP_MASK) {  	case REQ_OP_ZONE_RESET: +	case REQ_OP_ZONE_RESET_ALL:  	case REQ_OP_ZONE_OPEN:  	case REQ_OP_ZONE_CLOSE:  	case REQ_OP_ZONE_FINISH: diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 4e1ac1fbcec4..55343795644b 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -1643,7 +1643,7 @@ struct regmap_irq_chip_data;   * @status_invert: Inverted status register: cleared bits are active interrupts.   * @status_is_level: Status register is actuall signal level: Xor status   *		     register with previous value to get active interrupts. - * @wake_invert: Inverted wake register: cleared bits are wake enabled. + * @wake_invert: Inverted wake register: cleared bits are wake disabled.   * @type_in_mask: Use the mask registers for controlling irq type. Use this if   *		  the hardware provides separate bits for rising/falling edge   *		  or low/high level interrupts and they should be combined into diff --git a/include/linux/sched.h b/include/linux/sched.h index cbb7340c5866..b469878de25c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2407,12 +2407,12 @@ static inline void __migrate_enable(void) { }   * be defined in kernel/sched/core.c.   */  #ifndef INSTANTIATE_EXPORTED_MIGRATE_DISABLE -static inline void migrate_disable(void) +static __always_inline void migrate_disable(void)  {  	__migrate_disable();  } -static inline void migrate_enable(void) +static __always_inline void migrate_enable(void)  {  	__migrate_enable();  } diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 8eb117c52817..eb25e70e0bdc 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -4345,6 +4345,7 @@ BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLE  BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)  BTF_ID_FLAGS(func, bpf_local_irq_save)  BTF_ID_FLAGS(func, bpf_local_irq_restore) +#ifdef CONFIG_BPF_EVENTS  BTF_ID_FLAGS(func, bpf_probe_read_user_dynptr)  BTF_ID_FLAGS(func, bpf_probe_read_kernel_dynptr)  BTF_ID_FLAGS(func, bpf_probe_read_user_str_dynptr) @@ -4353,6 +4354,7 @@ BTF_ID_FLAGS(func, bpf_copy_from_user_dynptr, KF_SLEEPABLE)  BTF_ID_FLAGS(func, bpf_copy_from_user_str_dynptr, KF_SLEEPABLE)  BTF_ID_FLAGS(func, bpf_copy_from_user_task_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS)  BTF_ID_FLAGS(func, bpf_copy_from_user_task_str_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS) +#endif  #ifdef CONFIG_DMA_SHARED_BUFFER  BTF_ID_FLAGS(func, bpf_iter_dmabuf_new, KF_ITER_NEW | KF_SLEEPABLE)  BTF_ID_FLAGS(func, bpf_iter_dmabuf_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE) diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c index 719d73299397..d706c4b7f532 100644 --- a/kernel/bpf/ringbuf.c +++ b/kernel/bpf/ringbuf.c @@ -216,6 +216,8 @@ static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)  static void bpf_ringbuf_free(struct bpf_ringbuf *rb)  { +	irq_work_sync(&rb->work); +  	/* copy pages pointer and nr_pages to local variable, as we are going  	 * to unmap rb itself with vunmap() below  	 */ diff --git a/lib/Kconfig.kmsan b/lib/Kconfig.kmsan index 7251b6b59e69..cae1ddcc18e1 100644 --- a/lib/Kconfig.kmsan +++ b/lib/Kconfig.kmsan @@ -3,7 +3,7 @@ config HAVE_ARCH_KMSAN  	bool  config HAVE_KMSAN_COMPILER -	def_bool CC_IS_CLANG +	def_bool $(cc-option,-fsanitize=kernel-memory)  config KMSAN  	bool "KMSAN: detector of uninitialized values use" diff --git a/net/core/filter.c b/net/core/filter.c index 76628df1fc82..fa06c5a08e22 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3877,7 +3877,8 @@ static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,  	u32 new_len = skb->len + head_room;  	int ret; -	if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) || +	if (unlikely(flags || (int)head_room < 0 || +		     (!skb_is_gso(skb) && new_len > max_len) ||  		     new_len < skb->len))  		return -EINVAL; diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c index 84ea9215c0a7..b8b7bba84a65 100644 --- a/scripts/kconfig/mconf.c +++ b/scripts/kconfig/mconf.c @@ -12,6 +12,7 @@  #include <errno.h>  #include <fcntl.h>  #include <limits.h> +#include <locale.h>  #include <stdarg.h>  #include <stdlib.h>  #include <string.h> @@ -931,6 +932,8 @@ int main(int ac, char **av)  	signal(SIGINT, sig_handler); +	setlocale(LC_ALL, ""); +  	if (ac > 1 && strcmp(av[1], "-s") == 0) {  		silent = 1;  		/* Silence conf_read() until the real callback is set up */ diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c index ae1fe5f60327..521700ed7152 100644 --- a/scripts/kconfig/nconf.c +++ b/scripts/kconfig/nconf.c @@ -7,6 +7,7 @@  #ifndef _GNU_SOURCE  #define _GNU_SOURCE  #endif +#include <locale.h>  #include <string.h>  #include <strings.h>  #include <stdlib.h> @@ -1478,6 +1479,8 @@ int main(int ac, char **av)  	int lines, columns;  	char *mode; +	setlocale(LC_ALL, ""); +  	if (ac > 1 && strcmp(av[1], "-s") == 0) {  		/* Silence conf_read() until the real callback is set up */  		conf_set_message_callback(NULL); diff --git a/scripts/package/install-extmod-build b/scripts/package/install-extmod-build index b96538787f3d..054fdf45cc37 100755 --- a/scripts/package/install-extmod-build +++ b/scripts/package/install-extmod-build @@ -63,7 +63,7 @@ if [ "${CC}" != "${HOSTCC}" ]; then  	# Clear VPATH and srcroot because the source files reside in the output  	# directory.  	# shellcheck disable=SC2016 # $(MAKE) and $(build) will be expanded by Make -	"${MAKE}" run-command KBUILD_RUN_COMMAND='+$(MAKE) HOSTCC='"${CC}"' VPATH= srcroot=. $(build)='"$(realpath --relative-base=. "${destdir}")"/scripts +	"${MAKE}" run-command KBUILD_RUN_COMMAND='+$(MAKE) HOSTCC='"${CC}"' VPATH= srcroot=. $(build)='"$(realpath --relative-to=. "${destdir}")"/scripts  	rm -f "${destdir}/scripts/Kbuild"  fi diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index a8f6cd4841b0..dbe32a5d02cd 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -311,7 +311,7 @@ struct pt_regs___arm64 {  #define __PT_RET_REG regs[31]  #define __PT_FP_REG __unsupported__  #define __PT_RC_REG gpr[3] -#define __PT_SP_REG sp +#define __PT_SP_REG gpr[1]  #define __PT_IP_REG nip  #elif defined(bpf_target_sparc) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 620854fdaaf6..9004fbc06769 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -3516,8 +3516,11 @@ static bool skip_alt_group(struct instruction *insn)  {  	struct instruction *alt_insn = insn->alts ? insn->alts->insn : NULL; +	if (!insn->alt_group) +		return false; +  	/* ANNOTATE_IGNORE_ALTERNATIVE */ -	if (insn->alt_group && insn->alt_group->ignore) +	if (insn->alt_group->ignore)  		return true;  	/* diff --git a/tools/testing/selftests/vfio/lib/include/vfio_util.h b/tools/testing/selftests/vfio/lib/include/vfio_util.h index ed31606e01b7..240409bf5f8a 100644 --- a/tools/testing/selftests/vfio/lib/include/vfio_util.h +++ b/tools/testing/selftests/vfio/lib/include/vfio_util.h @@ -206,10 +206,29 @@ struct vfio_pci_device *vfio_pci_device_init(const char *bdf, const char *iommu_  void vfio_pci_device_cleanup(struct vfio_pci_device *device);  void vfio_pci_device_reset(struct vfio_pci_device *device); -void vfio_pci_dma_map(struct vfio_pci_device *device, -		      struct vfio_dma_region *region); -void vfio_pci_dma_unmap(struct vfio_pci_device *device, -			struct vfio_dma_region *region); +int __vfio_pci_dma_map(struct vfio_pci_device *device, +		       struct vfio_dma_region *region); +int __vfio_pci_dma_unmap(struct vfio_pci_device *device, +			 struct vfio_dma_region *region, +			 u64 *unmapped); +int __vfio_pci_dma_unmap_all(struct vfio_pci_device *device, u64 *unmapped); + +static inline void vfio_pci_dma_map(struct vfio_pci_device *device, +				    struct vfio_dma_region *region) +{ +	VFIO_ASSERT_EQ(__vfio_pci_dma_map(device, region), 0); +} + +static inline void vfio_pci_dma_unmap(struct vfio_pci_device *device, +				      struct vfio_dma_region *region) +{ +	VFIO_ASSERT_EQ(__vfio_pci_dma_unmap(device, region, NULL), 0); +} + +static inline void vfio_pci_dma_unmap_all(struct vfio_pci_device *device) +{ +	VFIO_ASSERT_EQ(__vfio_pci_dma_unmap_all(device, NULL), 0); +}  void vfio_pci_config_access(struct vfio_pci_device *device, bool write,  			    size_t config, size_t size, void *data); diff --git a/tools/testing/selftests/vfio/lib/vfio_pci_device.c b/tools/testing/selftests/vfio/lib/vfio_pci_device.c index 0921b2451ba5..a381fd253aa7 100644 --- a/tools/testing/selftests/vfio/lib/vfio_pci_device.c +++ b/tools/testing/selftests/vfio/lib/vfio_pci_device.c @@ -2,6 +2,7 @@  #include <dirent.h>  #include <fcntl.h>  #include <libgen.h> +#include <stdint.h>  #include <stdlib.h>  #include <string.h>  #include <unistd.h> @@ -141,7 +142,7 @@ static void vfio_pci_irq_get(struct vfio_pci_device *device, u32 index,  	ioctl_assert(device->fd, VFIO_DEVICE_GET_IRQ_INFO, irq_info);  } -static void vfio_iommu_dma_map(struct vfio_pci_device *device, +static int vfio_iommu_dma_map(struct vfio_pci_device *device,  			       struct vfio_dma_region *region)  {  	struct vfio_iommu_type1_dma_map args = { @@ -152,10 +153,13 @@ static void vfio_iommu_dma_map(struct vfio_pci_device *device,  		.size = region->size,  	}; -	ioctl_assert(device->container_fd, VFIO_IOMMU_MAP_DMA, &args); +	if (ioctl(device->container_fd, VFIO_IOMMU_MAP_DMA, &args)) +		return -errno; + +	return 0;  } -static void iommufd_dma_map(struct vfio_pci_device *device, +static int iommufd_dma_map(struct vfio_pci_device *device,  			    struct vfio_dma_region *region)  {  	struct iommu_ioas_map args = { @@ -169,54 +173,108 @@ static void iommufd_dma_map(struct vfio_pci_device *device,  		.ioas_id = device->ioas_id,  	}; -	ioctl_assert(device->iommufd, IOMMU_IOAS_MAP, &args); +	if (ioctl(device->iommufd, IOMMU_IOAS_MAP, &args)) +		return -errno; + +	return 0;  } -void vfio_pci_dma_map(struct vfio_pci_device *device, +int __vfio_pci_dma_map(struct vfio_pci_device *device,  		      struct vfio_dma_region *region)  { +	int ret; +  	if (device->iommufd) -		iommufd_dma_map(device, region); +		ret = iommufd_dma_map(device, region);  	else -		vfio_iommu_dma_map(device, region); +		ret = vfio_iommu_dma_map(device, region); + +	if (ret) +		return ret;  	list_add(®ion->link, &device->dma_regions); + +	return 0;  } -static void vfio_iommu_dma_unmap(struct vfio_pci_device *device, -				 struct vfio_dma_region *region) +static int vfio_iommu_dma_unmap(int fd, u64 iova, u64 size, u32 flags, +				u64 *unmapped)  {  	struct vfio_iommu_type1_dma_unmap args = {  		.argsz = sizeof(args), -		.iova = region->iova, -		.size = region->size, +		.iova = iova, +		.size = size, +		.flags = flags,  	}; -	ioctl_assert(device->container_fd, VFIO_IOMMU_UNMAP_DMA, &args); +	if (ioctl(fd, VFIO_IOMMU_UNMAP_DMA, &args)) +		return -errno; + +	if (unmapped) +		*unmapped = args.size; + +	return 0;  } -static void iommufd_dma_unmap(struct vfio_pci_device *device, -			      struct vfio_dma_region *region) +static int iommufd_dma_unmap(int fd, u64 iova, u64 length, u32 ioas_id, +			     u64 *unmapped)  {  	struct iommu_ioas_unmap args = {  		.size = sizeof(args), -		.iova = region->iova, -		.length = region->size, -		.ioas_id = device->ioas_id, +		.iova = iova, +		.length = length, +		.ioas_id = ioas_id,  	}; -	ioctl_assert(device->iommufd, IOMMU_IOAS_UNMAP, &args); +	if (ioctl(fd, IOMMU_IOAS_UNMAP, &args)) +		return -errno; + +	if (unmapped) +		*unmapped = args.length; + +	return 0;  } -void vfio_pci_dma_unmap(struct vfio_pci_device *device, -			struct vfio_dma_region *region) +int __vfio_pci_dma_unmap(struct vfio_pci_device *device, +			 struct vfio_dma_region *region, u64 *unmapped)  { +	int ret; + +	if (device->iommufd) +		ret = iommufd_dma_unmap(device->iommufd, region->iova, +					region->size, device->ioas_id, +					unmapped); +	else +		ret = vfio_iommu_dma_unmap(device->container_fd, region->iova, +					   region->size, 0, unmapped); + +	if (ret) +		return ret; + +	list_del_init(®ion->link); + +	return 0; +} + +int __vfio_pci_dma_unmap_all(struct vfio_pci_device *device, u64 *unmapped) +{ +	int ret; +	struct vfio_dma_region *curr, *next; +  	if (device->iommufd) -		iommufd_dma_unmap(device, region); +		ret = iommufd_dma_unmap(device->iommufd, 0, UINT64_MAX, +					device->ioas_id, unmapped);  	else -		vfio_iommu_dma_unmap(device, region); +		ret = vfio_iommu_dma_unmap(device->container_fd, 0, 0, +					   VFIO_DMA_UNMAP_FLAG_ALL, unmapped); + +	if (ret) +		return ret; + +	list_for_each_entry_safe(curr, next, &device->dma_regions, link) +		list_del_init(&curr->link); -	list_del(®ion->link); +	return 0;  }  static void vfio_pci_region_get(struct vfio_pci_device *device, int index, diff --git a/tools/testing/selftests/vfio/vfio_dma_mapping_test.c b/tools/testing/selftests/vfio/vfio_dma_mapping_test.c index ab19c54a774d..4f1ea79a200c 100644 --- a/tools/testing/selftests/vfio/vfio_dma_mapping_test.c +++ b/tools/testing/selftests/vfio/vfio_dma_mapping_test.c @@ -112,6 +112,8 @@ FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous, 0, 0);  FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_2mb, SZ_2M, MAP_HUGETLB | MAP_HUGE_2MB);  FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_1gb, SZ_1G, MAP_HUGETLB | MAP_HUGE_1GB); +#undef FIXTURE_VARIANT_ADD_IOMMU_MODE +  FIXTURE_SETUP(vfio_dma_mapping_test)  {  	self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode); @@ -129,6 +131,7 @@ TEST_F(vfio_dma_mapping_test, dma_map_unmap)  	struct vfio_dma_region region;  	struct iommu_mapping mapping;  	u64 mapping_size = size; +	u64 unmapped;  	int rc;  	region.vaddr = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0); @@ -184,7 +187,9 @@ TEST_F(vfio_dma_mapping_test, dma_map_unmap)  	}  unmap: -	vfio_pci_dma_unmap(self->device, ®ion); +	rc = __vfio_pci_dma_unmap(self->device, ®ion, &unmapped); +	ASSERT_EQ(rc, 0); +	ASSERT_EQ(unmapped, region.size);  	printf("Unmapped IOVA 0x%lx\n", region.iova);  	ASSERT_EQ(INVALID_IOVA, __to_iova(self->device, region.vaddr));  	ASSERT_NE(0, iommu_mapping_get(device_bdf, region.iova, &mapping)); @@ -192,6 +197,94 @@ unmap:  	ASSERT_TRUE(!munmap(region.vaddr, size));  } +FIXTURE(vfio_dma_map_limit_test) { +	struct vfio_pci_device *device; +	struct vfio_dma_region region; +	size_t mmap_size; +}; + +FIXTURE_VARIANT(vfio_dma_map_limit_test) { +	const char *iommu_mode; +}; + +#define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode)			       \ +FIXTURE_VARIANT_ADD(vfio_dma_map_limit_test, _iommu_mode) {		       \ +	.iommu_mode = #_iommu_mode,					       \ +} + +FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(); + +#undef FIXTURE_VARIANT_ADD_IOMMU_MODE + +FIXTURE_SETUP(vfio_dma_map_limit_test) +{ +	struct vfio_dma_region *region = &self->region; +	u64 region_size = getpagesize(); + +	/* +	 * Over-allocate mmap by double the size to provide enough backing vaddr +	 * for overflow tests +	 */ +	self->mmap_size = 2 * region_size; + +	self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode); +	region->vaddr = mmap(NULL, self->mmap_size, PROT_READ | PROT_WRITE, +			     MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); +	ASSERT_NE(region->vaddr, MAP_FAILED); + +	/* One page prior to the end of address space */ +	region->iova = ~(iova_t)0 & ~(region_size - 1); +	region->size = region_size; +} + +FIXTURE_TEARDOWN(vfio_dma_map_limit_test) +{ +	vfio_pci_device_cleanup(self->device); +	ASSERT_EQ(munmap(self->region.vaddr, self->mmap_size), 0); +} + +TEST_F(vfio_dma_map_limit_test, unmap_range) +{ +	struct vfio_dma_region *region = &self->region; +	u64 unmapped; +	int rc; + +	vfio_pci_dma_map(self->device, region); +	ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr)); + +	rc = __vfio_pci_dma_unmap(self->device, region, &unmapped); +	ASSERT_EQ(rc, 0); +	ASSERT_EQ(unmapped, region->size); +} + +TEST_F(vfio_dma_map_limit_test, unmap_all) +{ +	struct vfio_dma_region *region = &self->region; +	u64 unmapped; +	int rc; + +	vfio_pci_dma_map(self->device, region); +	ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr)); + +	rc = __vfio_pci_dma_unmap_all(self->device, &unmapped); +	ASSERT_EQ(rc, 0); +	ASSERT_EQ(unmapped, region->size); +} + +TEST_F(vfio_dma_map_limit_test, overflow) +{ +	struct vfio_dma_region *region = &self->region; +	int rc; + +	region->size = self->mmap_size; + +	rc = __vfio_pci_dma_map(self->device, region); +	ASSERT_EQ(rc, -EOVERFLOW); + +	rc = __vfio_pci_dma_unmap(self->device, region, NULL); +	ASSERT_EQ(rc, -EOVERFLOW); +} +  int main(int argc, char *argv[])  {  	device_bdf = vfio_selftests_get_bdf(&argc, argv);  | 
