diff options
257 files changed, 2043 insertions, 854 deletions
@@ -644,6 +644,7 @@ Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com>  Quentin Monnet <qmo@kernel.org> <quentin.monnet@netronome.com>  Quentin Monnet <qmo@kernel.org> <quentin@isovalent.com>  Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com> +Rae Moar <raemoar63@gmail.com> <rmoar@google.com>  Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>  Rajeev Nandan <quic_rajeevny@quicinc.com> <rajeevny@codeaurora.org>  Rajendra Nayak <quic_rjendra@quicinc.com> <rnayak@codeaurora.org> @@ -2036,6 +2036,10 @@ S: Botanicka' 68a  S: 602 00 Brno  S: Czech Republic +N: Karsten Keil +E: isdn@linux-pingi.de +D: ISDN subsystem maintainer +  N: Jakob Kemi  E: jakob.kemi@telia.com  D: V4L W9966 Webcam driver diff --git a/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml b/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml index 5caa3779660d..5491d0775ede 100644 --- a/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml +++ b/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml @@ -180,9 +180,9 @@ allOf:      then:        properties:          reg: -          minItems: 2 +          maxItems: 2          reg-names: -          minItems: 2 +          maxItems: 2      else:        properties:          reg: diff --git a/Documentation/devicetree/bindings/sound/qcom,pm4125-sdw.yaml b/Documentation/devicetree/bindings/sound/qcom,pm4125-sdw.yaml index 23624f32ac30..769e4cb5b99b 100644 --- a/Documentation/devicetree/bindings/sound/qcom,pm4125-sdw.yaml +++ b/Documentation/devicetree/bindings/sound/qcom,pm4125-sdw.yaml @@ -32,7 +32,7 @@ properties:      $ref: /schemas/types.yaml#/definitions/uint32-array      minItems: 2 -    maxItems: 2 +    maxItems: 4      items:        enum: [1, 2, 3, 4] @@ -48,7 +48,7 @@ properties:      $ref: /schemas/types.yaml#/definitions/uint32-array      minItems: 2 -    maxItems: 2 +    maxItems: 5      items:        enum: [1, 2, 3, 4, 5] diff --git a/Documentation/netlink/specs/dpll.yaml b/Documentation/netlink/specs/dpll.yaml index cafb4ec20447..80728f6f9bc8 100644 --- a/Documentation/netlink/specs/dpll.yaml +++ b/Documentation/netlink/specs/dpll.yaml @@ -605,6 +605,8 @@ operations:          reply: &pin-attrs            attributes:              - id +            - module-name +            - clock-id              - board-label              - panel-label              - package-label diff --git a/Documentation/networking/netconsole.rst b/Documentation/networking/netconsole.rst index 59cb9982afe6..2555e75e5cc1 100644 --- a/Documentation/networking/netconsole.rst +++ b/Documentation/networking/netconsole.rst @@ -19,9 +19,6 @@ Userdata append support by Matthew Wood <thepacketgeek@gmail.com>, Jan 22 2024  Sysdata append support by Breno Leitao <leitao@debian.org>, Jan 15 2025 -Please send bug reports to Matt Mackall <mpm@selenic.com> -Satyam Sharma <satyam.sharma@gmail.com>, and Cong Wang <xiyou.wangcong@gmail.com> -  Introduction:  ============= diff --git a/MAINTAINERS b/MAINTAINERS index 3da2c26a796b..46bd8e033042 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13260,10 +13260,8 @@ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git mast  F:	drivers/infiniband/ulp/isert  ISDN/CMTP OVER BLUETOOTH -M:	Karsten Keil <isdn@linux-pingi.de> -L:	isdn4linux@listserv.isdn4linux.de (subscribers-only)  L:	netdev@vger.kernel.org -S:	Odd Fixes +S:	Orphan  W:	http://www.isdn4linux.de  F:	Documentation/isdn/  F:	drivers/isdn/capi/ @@ -13272,10 +13270,8 @@ F:	include/uapi/linux/isdn/  F:	net/bluetooth/cmtp/  ISDN/mISDN SUBSYSTEM -M:	Karsten Keil <isdn@linux-pingi.de> -L:	isdn4linux@listserv.isdn4linux.de (subscribers-only)  L:	netdev@vger.kernel.org -S:	Maintained +S:	Orphan  W:	http://www.isdn4linux.de  F:	drivers/isdn/Kconfig  F:	drivers/isdn/Makefile @@ -13429,9 +13425,12 @@ F:	mm/kasan/  F:	scripts/Makefile.kasan  KCONFIG +M:	Nathan Chancellor <nathan@kernel.org> +M:	Nicolas Schier <nsc@kernel.org>  L:	linux-kbuild@vger.kernel.org -S:	Orphan +S:	Odd Fixes  Q:	https://patchwork.kernel.org/project/linux-kbuild/list/ +T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kbuild/linux.git  F:	Documentation/kbuild/kconfig*  F:	scripts/Kconfig.include  F:	scripts/kconfig/ @@ -13616,7 +13615,7 @@ F:	fs/smb/server/  KERNEL UNIT TESTING FRAMEWORK (KUnit)  M:	Brendan Higgins <brendan.higgins@linux.dev>  M:	David Gow <davidgow@google.com> -R:	Rae Moar <rmoar@google.com> +R:	Rae Moar <raemoar63@gmail.com>  L:	linux-kselftest@vger.kernel.org  L:	kunit-dev@googlegroups.com  S:	Maintained @@ -21332,6 +21331,7 @@ F:	drivers/media/platform/qcom/venus/  QUALCOMM WCN36XX WIRELESS DRIVER  M:	Loic Poulain <loic.poulain@oss.qualcomm.com>  L:	wcn36xx@lists.infradead.org +L:	linux-wireless@vger.kernel.org  S:	Supported  W:	https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx  F:	drivers/net/wireless/ath/wcn36xx/ @@ -2,7 +2,7 @@  VERSION = 6  PATCHLEVEL = 18  SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc4  NAME = Baby Opossum Posse  # *DOCUMENTATION* diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index ab83089c3d8f..0c9a50a1e73e 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1213,6 +1213,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,  	u8 src = bpf2a64[insn->src_reg];  	const u8 tmp = bpf2a64[TMP_REG_1];  	const u8 tmp2 = bpf2a64[TMP_REG_2]; +	const u8 tmp3 = bpf2a64[TMP_REG_3];  	const u8 fp = bpf2a64[BPF_REG_FP];  	const u8 arena_vm_base = bpf2a64[ARENA_VM_START];  	const u8 priv_sp = bpf2a64[PRIVATE_SP]; @@ -1757,8 +1758,8 @@ emit_cond_jmp:  	case BPF_ST | BPF_PROBE_MEM32 | BPF_W:  	case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:  		if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) { -			emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx); -			dst = tmp2; +			emit(A64_ADD(1, tmp3, dst, arena_vm_base), ctx); +			dst = tmp3;  		}  		if (dst == fp) {  			dst_adj = ctx->priv_sp_used ? priv_sp : A64_SP; diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index c4145672ca34..df22b10d9141 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -158,7 +158,6 @@ config S390  	select ARCH_WANT_IRQS_OFF_ACTIVATE_MM  	select ARCH_WANT_KERNEL_PMD_MKWRITE  	select ARCH_WANT_LD_ORPHAN_WARN -	select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP  	select ARCH_WANTS_THP_SWAP  	select BUILDTIME_TABLE_SORT  	select CLONE_BACKWARDS2 diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index b31c1df90257..8433f769f7e1 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -101,6 +101,7 @@ CONFIG_SLUB_STATS=y  CONFIG_MEMORY_HOTPLUG=y  CONFIG_MEMORY_HOTREMOVE=y  CONFIG_KSM=y +CONFIG_PERSISTENT_HUGE_ZERO_FOLIO=y  CONFIG_TRANSPARENT_HUGEPAGE=y  CONFIG_CMA_DEBUGFS=y  CONFIG_CMA_SYSFS=y @@ -123,12 +124,12 @@ CONFIG_TLS_DEVICE=y  CONFIG_TLS_TOE=y  CONFIG_XFRM_USER=m  CONFIG_NET_KEY=m -CONFIG_XDP_SOCKETS=y -CONFIG_XDP_SOCKETS_DIAG=m -CONFIG_DIBS=y -CONFIG_DIBS_LO=y  CONFIG_SMC=m  CONFIG_SMC_DIAG=m +CONFIG_DIBS=y +CONFIG_DIBS_LO=y +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m  CONFIG_INET=y  CONFIG_IP_MULTICAST=y  CONFIG_IP_ADVANCED_ROUTER=y @@ -472,6 +473,7 @@ CONFIG_SCSI_DH_EMC=m  CONFIG_SCSI_DH_ALUA=m  CONFIG_MD=y  CONFIG_BLK_DEV_MD=y +CONFIG_MD_LLBITMAP=y  # CONFIG_MD_BITMAP_FILE is not set  CONFIG_MD_LINEAR=m  CONFIG_MD_CLUSTER=m @@ -654,9 +656,12 @@ CONFIG_JFS_POSIX_ACL=y  CONFIG_JFS_SECURITY=y  CONFIG_JFS_STATISTICS=y  CONFIG_XFS_FS=y +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y  CONFIG_XFS_QUOTA=y  CONFIG_XFS_POSIX_ACL=y  CONFIG_XFS_RT=y +# CONFIG_XFS_ONLINE_SCRUB is not set  CONFIG_XFS_DEBUG=y  CONFIG_GFS2_FS=m  CONFIG_GFS2_FS_LOCKING_DLM=y @@ -666,7 +671,6 @@ CONFIG_BTRFS_FS_POSIX_ACL=y  CONFIG_BTRFS_DEBUG=y  CONFIG_BTRFS_ASSERT=y  CONFIG_NILFS2_FS=m -CONFIG_FS_DAX=y  CONFIG_EXPORTFS_BLOCK_OPS=y  CONFIG_FS_ENCRYPTION=y  CONFIG_FS_VERITY=y diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 161dad7ef211..4414dabd04a6 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -94,6 +94,7 @@ CONFIG_SLAB_BUCKETS=y  CONFIG_MEMORY_HOTPLUG=y  CONFIG_MEMORY_HOTREMOVE=y  CONFIG_KSM=y +CONFIG_PERSISTENT_HUGE_ZERO_FOLIO=y  CONFIG_TRANSPARENT_HUGEPAGE=y  CONFIG_CMA_SYSFS=y  CONFIG_CMA_AREAS=7 @@ -114,12 +115,12 @@ CONFIG_TLS_DEVICE=y  CONFIG_TLS_TOE=y  CONFIG_XFRM_USER=m  CONFIG_NET_KEY=m -CONFIG_XDP_SOCKETS=y -CONFIG_XDP_SOCKETS_DIAG=m -CONFIG_DIBS=y -CONFIG_DIBS_LO=y  CONFIG_SMC=m  CONFIG_SMC_DIAG=m +CONFIG_DIBS=y +CONFIG_DIBS_LO=y +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m  CONFIG_INET=y  CONFIG_IP_MULTICAST=y  CONFIG_IP_ADVANCED_ROUTER=y @@ -462,6 +463,7 @@ CONFIG_SCSI_DH_EMC=m  CONFIG_SCSI_DH_ALUA=m  CONFIG_MD=y  CONFIG_BLK_DEV_MD=y +CONFIG_MD_LLBITMAP=y  # CONFIG_MD_BITMAP_FILE is not set  CONFIG_MD_LINEAR=m  CONFIG_MD_CLUSTER=m @@ -644,16 +646,18 @@ CONFIG_JFS_POSIX_ACL=y  CONFIG_JFS_SECURITY=y  CONFIG_JFS_STATISTICS=y  CONFIG_XFS_FS=y +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y  CONFIG_XFS_QUOTA=y  CONFIG_XFS_POSIX_ACL=y  CONFIG_XFS_RT=y +# CONFIG_XFS_ONLINE_SCRUB is not set  CONFIG_GFS2_FS=m  CONFIG_GFS2_FS_LOCKING_DLM=y  CONFIG_OCFS2_FS=m  CONFIG_BTRFS_FS=y  CONFIG_BTRFS_FS_POSIX_ACL=y  CONFIG_NILFS2_FS=m -CONFIG_FS_DAX=y  CONFIG_EXPORTFS_BLOCK_OPS=y  CONFIG_FS_ENCRYPTION=y  CONFIG_FS_VERITY=y diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index ed0b137353ad..b5478267d6a7 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -33,7 +33,6 @@ CONFIG_NET=y  CONFIG_DEVTMPFS=y  CONFIG_DEVTMPFS_SAFE=y  CONFIG_BLK_DEV_RAM=y -# CONFIG_DCSSBLK is not set  # CONFIG_DASD is not set  CONFIG_ENCLOSURE_SERVICES=y  CONFIG_SCSI=y diff --git a/arch/s390/crypto/phmac_s390.c b/arch/s390/crypto/phmac_s390.c index 7ecfdc4fba2d..89f3e6d8fd89 100644 --- a/arch/s390/crypto/phmac_s390.c +++ b/arch/s390/crypto/phmac_s390.c @@ -169,11 +169,18 @@ struct kmac_sha2_ctx {  	u64 buflen[2];  }; +enum async_op { +	OP_NOP = 0, +	OP_UPDATE, +	OP_FINAL, +	OP_FINUP, +}; +  /* phmac request context */  struct phmac_req_ctx {  	struct hash_walk_helper hwh;  	struct kmac_sha2_ctx kmac_ctx; -	bool final; +	enum async_op async_op;  };  /* @@ -610,6 +617,7 @@ static int phmac_update(struct ahash_request *req)  	 * using engine to serialize requests.  	 */  	if (rc == 0 || rc == -EKEYEXPIRED) { +		req_ctx->async_op = OP_UPDATE;  		atomic_inc(&tfm_ctx->via_engine_ctr);  		rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);  		if (rc != -EINPROGRESS) @@ -647,8 +655,7 @@ static int phmac_final(struct ahash_request *req)  	 * using engine to serialize requests.  	 */  	if (rc == 0 || rc == -EKEYEXPIRED) { -		req->nbytes = 0; -		req_ctx->final = true; +		req_ctx->async_op = OP_FINAL;  		atomic_inc(&tfm_ctx->via_engine_ctr);  		rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);  		if (rc != -EINPROGRESS) @@ -676,13 +683,16 @@ static int phmac_finup(struct ahash_request *req)  	if (rc)  		goto out; +	req_ctx->async_op = OP_FINUP; +  	/* Try synchronous operations if no active engine usage */  	if (!atomic_read(&tfm_ctx->via_engine_ctr)) {  		rc = phmac_kmac_update(req, false);  		if (rc == 0) -			req->nbytes = 0; +			req_ctx->async_op = OP_FINAL;  	} -	if (!rc && !req->nbytes && !atomic_read(&tfm_ctx->via_engine_ctr)) { +	if (!rc && req_ctx->async_op == OP_FINAL && +	    !atomic_read(&tfm_ctx->via_engine_ctr)) {  		rc = phmac_kmac_final(req, false);  		if (rc == 0)  			goto out; @@ -694,7 +704,7 @@ static int phmac_finup(struct ahash_request *req)  	 * using engine to serialize requests.  	 */  	if (rc == 0 || rc == -EKEYEXPIRED) { -		req_ctx->final = true; +		/* req->async_op has been set to either OP_FINUP or OP_FINAL */  		atomic_inc(&tfm_ctx->via_engine_ctr);  		rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);  		if (rc != -EINPROGRESS) @@ -855,15 +865,16 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq)  	/*  	 * Three kinds of requests come in here: -	 * update when req->nbytes > 0 and req_ctx->final is false -	 * final when req->nbytes = 0 and req_ctx->final is true -	 * finup when req->nbytes > 0 and req_ctx->final is true -	 * For update and finup the hwh walk needs to be prepared and -	 * up to date but the actual nr of bytes in req->nbytes may be -	 * any non zero number. For final there is no hwh walk needed. +	 * 1. req->async_op == OP_UPDATE with req->nbytes > 0 +	 * 2. req->async_op == OP_FINUP with req->nbytes > 0 +	 * 3. req->async_op == OP_FINAL +	 * For update and finup the hwh walk has already been prepared +	 * by the caller. For final there is no hwh walk needed.  	 */ -	if (req->nbytes) { +	switch (req_ctx->async_op) { +	case OP_UPDATE: +	case OP_FINUP:  		rc = phmac_kmac_update(req, true);  		if (rc == -EKEYEXPIRED) {  			/* @@ -880,10 +891,11 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq)  			hwh_advance(hwh, rc);  			goto out;  		} -		req->nbytes = 0; -	} - -	if (req_ctx->final) { +		if (req_ctx->async_op == OP_UPDATE) +			break; +		req_ctx->async_op = OP_FINAL; +		fallthrough; +	case OP_FINAL:  		rc = phmac_kmac_final(req, true);  		if (rc == -EKEYEXPIRED) {  			/* @@ -897,10 +909,14 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq)  			cond_resched();  			return -ENOSPC;  		} +		break; +	default: +		/* unknown/unsupported/unimplemented asynch op */ +		return -EOPNOTSUPP;  	}  out: -	if (rc || req_ctx->final) +	if (rc || req_ctx->async_op == OP_FINAL)  		memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));  	pr_debug("request complete with rc=%d\n", rc);  	local_bh_disable(); diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 6890925d5587..a32f465ecf73 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -145,7 +145,6 @@ struct zpci_dev {  	u8		has_resources	: 1;  	u8		is_physfn	: 1;  	u8		util_str_avail	: 1; -	u8		irqs_registered	: 1;  	u8		tid_avail	: 1;  	u8		rtr_avail	: 1; /* Relaxed translation allowed */  	unsigned int	devfn;		/* DEVFN part of the RID*/ diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index 9af2aae0a515..528d7c70979f 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c @@ -291,16 +291,14 @@ static int ptdump_cmp(const void *a, const void *b)  static int add_marker(unsigned long start, unsigned long end, const char *name)  { -	size_t oldsize, newsize; - -	oldsize = markers_cnt * sizeof(*markers); -	newsize = oldsize + 2 * sizeof(*markers); -	if (!oldsize) -		markers = kvmalloc(newsize, GFP_KERNEL); -	else -		markers = kvrealloc(markers, newsize, GFP_KERNEL); -	if (!markers) -		goto error; +	struct addr_marker *new; +	size_t newsize; + +	newsize = (markers_cnt + 2) * sizeof(*markers); +	new = kvrealloc(markers, newsize, GFP_KERNEL); +	if (!new) +		return -ENOMEM; +	markers = new;  	markers[markers_cnt].is_start = 1;  	markers[markers_cnt].start_address = start;  	markers[markers_cnt].size = end - start; @@ -312,9 +310,6 @@ static int add_marker(unsigned long start, unsigned long end, const char *name)  	markers[markers_cnt].name = name;  	markers_cnt++;  	return 0; -error: -	markers_cnt = 0; -	return -ENOMEM;  }  static int pt_dump_init(void) diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index b95376041501..27db1e72c623 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -188,7 +188,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)  	 * is unbound or probed and that userspace can't access its  	 * configuration space while we perform recovery.  	 */ -	pci_dev_lock(pdev); +	device_lock(&pdev->dev);  	if (pdev->error_state == pci_channel_io_perm_failure) {  		ers_res = PCI_ERS_RESULT_DISCONNECT;  		goto out_unlock; @@ -257,7 +257,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)  		driver->err_handler->resume(pdev);  	pci_uevent_ers(pdev, PCI_ERS_RESULT_RECOVERED);  out_unlock: -	pci_dev_unlock(pdev); +	device_unlock(&pdev->dev);  	zpci_report_status(zdev, "recovery", status_str);  	return ers_res; diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c index 84482a921332..e73be96ce5fe 100644 --- a/arch/s390/pci/pci_irq.c +++ b/arch/s390/pci/pci_irq.c @@ -107,9 +107,6 @@ static int zpci_set_irq(struct zpci_dev *zdev)  	else  		rc = zpci_set_airq(zdev); -	if (!rc) -		zdev->irqs_registered = 1; -  	return rc;  } @@ -123,9 +120,6 @@ static int zpci_clear_irq(struct zpci_dev *zdev)  	else  		rc = zpci_clear_airq(zdev); -	if (!rc) -		zdev->irqs_registered = 0; -  	return rc;  } @@ -427,8 +421,7 @@ bool arch_restore_msi_irqs(struct pci_dev *pdev)  {  	struct zpci_dev *zdev = to_zpci(pdev); -	if (!zdev->irqs_registered) -		zpci_set_irq(zdev); +	zpci_set_irq(zdev);  	return true;  } diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 4db7e4bf69f5..8fbff3106c56 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -75,7 +75,7 @@ export BITS  #  #    https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383  # -KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx +KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx -mno-sse4a  KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json  KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2 diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 28f5468a6ea3..fe65be0b9d9c 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -7596,6 +7596,7 @@ __init int intel_pmu_init(void)  		break;  	case INTEL_PANTHERLAKE_L: +	case INTEL_WILDCATLAKE_L:  		pr_cont("Pantherlake Hybrid events, ");  		name = "pantherlake_hybrid";  		goto lnl_common; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index c0b7ac1c7594..01bc59e9286c 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -317,7 +317,8 @@ static u64 __grt_latency_data(struct perf_event *event, u64 status,  {  	u64 val; -	WARN_ON_ONCE(hybrid_pmu(event->pmu)->pmu_type == hybrid_big); +	WARN_ON_ONCE(is_hybrid() && +		     hybrid_pmu(event->pmu)->pmu_type == hybrid_big);  	dse &= PERF_PEBS_DATA_SOURCE_GRT_MASK;  	val = hybrid_var(event->pmu, pebs_data_source)[dse]; diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index a762f7f5b161..d6c945cc5d07 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -1895,6 +1895,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {  	X86_MATCH_VFM(INTEL_ARROWLAKE_H,	&mtl_uncore_init),  	X86_MATCH_VFM(INTEL_LUNARLAKE_M,	&lnl_uncore_init),  	X86_MATCH_VFM(INTEL_PANTHERLAKE_L,	&ptl_uncore_init), +	X86_MATCH_VFM(INTEL_WILDCATLAKE_L,	&ptl_uncore_init),  	X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X,	&spr_uncore_init),  	X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X,	&spr_uncore_init),  	X86_MATCH_VFM(INTEL_GRANITERAPIDS_X,	&gnr_uncore_init), diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index f32a0eca2ae5..950bfd006905 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -150,12 +150,12 @@  #define INTEL_LUNARLAKE_M		IFM(6, 0xBD) /* Lion Cove / Skymont */ -#define INTEL_PANTHERLAKE_L		IFM(6, 0xCC) /* Cougar Cove / Crestmont */ +#define INTEL_PANTHERLAKE_L		IFM(6, 0xCC) /* Cougar Cove / Darkmont */  #define INTEL_WILDCATLAKE_L		IFM(6, 0xD5) -#define INTEL_NOVALAKE			IFM(18, 0x01) -#define INTEL_NOVALAKE_L		IFM(18, 0x03) +#define INTEL_NOVALAKE			IFM(18, 0x01) /* Coyote Cove / Arctic Wolf */ +#define INTEL_NOVALAKE_L		IFM(18, 0x03) /* Coyote Cove / Arctic Wolf */  /* "Small Core" Processors (Atom/E-Core) */ diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index 015d23f3e01f..53f4089333f2 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h @@ -43,6 +43,9 @@ extern unsigned long __phys_addr_symbol(unsigned long);  void clear_page_orig(void *page);  void clear_page_rep(void *page);  void clear_page_erms(void *page); +KCFI_REFERENCE(clear_page_orig); +KCFI_REFERENCE(clear_page_rep); +KCFI_REFERENCE(clear_page_erms);  static inline void clear_page(void *page)  { diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index ccaa51ce63f6..8e36964a7721 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -516,7 +516,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)  			setup_force_cpu_cap(X86_FEATURE_ZEN5);  			break;  		case 0x50 ... 0x5f: -		case 0x90 ... 0xaf: +		case 0x80 ... 0xaf:  		case 0xc0 ... 0xcf:  			setup_force_cpu_cap(X86_FEATURE_ZEN6);  			break; @@ -1035,8 +1035,18 @@ static void init_amd_zen4(struct cpuinfo_x86 *c)  	}  } +static const struct x86_cpu_id zen5_rdseed_microcode[] = { +	ZEN_MODEL_STEP_UCODE(0x1a, 0x02, 0x1, 0x0b00215a), +	ZEN_MODEL_STEP_UCODE(0x1a, 0x11, 0x0, 0x0b101054), +}; +  static void init_amd_zen5(struct cpuinfo_x86 *c)  { +	if (!x86_match_min_microcode_rev(zen5_rdseed_microcode)) { +		clear_cpu_cap(c, X86_FEATURE_RDSEED); +		msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18); +		pr_emerg_once("RDSEED32 is broken. Disabling the corresponding CPUID bit.\n"); +	}  }  static void init_amd(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 28ed8c089024..b7c797dc94f4 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -233,13 +233,31 @@ static bool need_sha_check(u32 cur_rev)  	return true;  } +static bool cpu_has_entrysign(void) +{ +	unsigned int fam   = x86_family(bsp_cpuid_1_eax); +	unsigned int model = x86_model(bsp_cpuid_1_eax); + +	if (fam == 0x17 || fam == 0x19) +		return true; + +	if (fam == 0x1a) { +		if (model <= 0x2f || +		    (0x40 <= model && model <= 0x4f) || +		    (0x60 <= model && model <= 0x6f)) +			return true; +	} + +	return false; +} +  static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)  {  	struct patch_digest *pd = NULL;  	u8 digest[SHA256_DIGEST_SIZE];  	int i; -	if (x86_family(bsp_cpuid_1_eax) < 0x17) +	if (!cpu_has_entrysign())  		return true;  	if (!need_sha_check(cur_rev)) diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 1f71cc135e9a..e88eacb1b5bb 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -825,6 +825,9 @@ void fpu__clear_user_states(struct fpu *fpu)  	    !fpregs_state_valid(fpu, smp_processor_id()))  		os_xrstor_supervisor(fpu->fpstate); +	/* Ensure XFD state is in sync before reloading XSTATE */ +	xfd_update_state(fpu->fpstate); +  	/* Reset user states in registers. */  	restore_fpregs_from_init_fpstate(XFEATURE_MASK_USER_RESTORE); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index d4c93d9e73e4..de5083cb1d37 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -2701,7 +2701,7 @@ emit_jmp:  			/* Update cleanup_addr */  			ctx->cleanup_addr = proglen;  			if (bpf_prog_was_classic(bpf_prog) && -			    !capable(CAP_SYS_ADMIN)) { +			    !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) {  				u8 *ip = image + addrs[i - 1];  				if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog)) diff --git a/block/blk-crypto.c b/block/blk-crypto.c index 4b1ad84d1b5a..3e7bf1974cbd 100644 --- a/block/blk-crypto.c +++ b/block/blk-crypto.c @@ -292,7 +292,7 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr)  	}  	if (!bio_crypt_check_alignment(bio)) { -		bio->bi_status = BLK_STS_IOERR; +		bio->bi_status = BLK_STS_INVAL;  		goto fail;  	} diff --git a/drivers/acpi/acpi_mrrm.c b/drivers/acpi/acpi_mrrm.c index 47ea3ccc2142..a6dbf623e557 100644 --- a/drivers/acpi/acpi_mrrm.c +++ b/drivers/acpi/acpi_mrrm.c @@ -63,6 +63,9 @@ static __init int acpi_parse_mrrm(struct acpi_table_header *table)  	if (!mrrm)  		return -ENODEV; +	if (mrrm->header.revision != 1) +		return -EINVAL; +  	if (mrrm->flags & ACPI_MRRM_FLAGS_REGION_ASSIGNMENT_OS)  		return -EOPNOTSUPP; diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 103f29661576..be8e7e18abca 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -1959,8 +1959,10 @@ static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video)  	struct acpi_video_device *dev;  	mutex_lock(&video->device_list_lock); -	list_for_each_entry(dev, &video->video_device_list, entry) +	list_for_each_entry(dev, &video->video_device_list, entry) {  		acpi_video_dev_remove_notify_handler(dev); +		cancel_delayed_work_sync(&dev->switch_brightness_work); +	}  	mutex_unlock(&video->device_list_lock);  	acpi_video_bus_stop_devices(video); diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 0a7026040188..3c6dd9b4ba0a 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -619,8 +619,10 @@ static int acpi_button_add(struct acpi_device *device)  	input_set_drvdata(input, device);  	error = input_register_device(input); -	if (error) +	if (error) { +		input_free_device(input);  		goto err_remove_fs; +	}  	switch (device->device_type) {  	case ACPI_BUS_TYPE_POWER_BUTTON: diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h index 8a28a72a7c6a..bedbab0e8e4e 100644 --- a/drivers/acpi/fan.h +++ b/drivers/acpi/fan.h @@ -49,6 +49,7 @@ struct acpi_fan_fst {  };  struct acpi_fan { +	acpi_handle handle;  	bool acpi4;  	bool has_fst;  	struct acpi_fan_fif fif; @@ -59,14 +60,14 @@ struct acpi_fan {  	struct device_attribute fine_grain_control;  }; -int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst); +int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst);  int acpi_fan_create_attributes(struct acpi_device *device);  void acpi_fan_delete_attributes(struct acpi_device *device);  #if IS_REACHABLE(CONFIG_HWMON) -int devm_acpi_fan_create_hwmon(struct acpi_device *device); +int devm_acpi_fan_create_hwmon(struct device *dev);  #else -static inline int devm_acpi_fan_create_hwmon(struct acpi_device *device) { return 0; }; +static inline int devm_acpi_fan_create_hwmon(struct device *dev) { return 0; };  #endif  #endif diff --git a/drivers/acpi/fan_attr.c b/drivers/acpi/fan_attr.c index c1afb7b5ed3d..9b7fa52f3c2a 100644 --- a/drivers/acpi/fan_attr.c +++ b/drivers/acpi/fan_attr.c @@ -55,7 +55,7 @@ static ssize_t show_fan_speed(struct device *dev, struct device_attribute *attr,  	struct acpi_fan_fst fst;  	int status; -	status = acpi_fan_get_fst(acpi_dev, &fst); +	status = acpi_fan_get_fst(acpi_dev->handle, &fst);  	if (status)  		return status; diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c index 04ff608f2ff0..46e7fe7a506d 100644 --- a/drivers/acpi/fan_core.c +++ b/drivers/acpi/fan_core.c @@ -44,25 +44,30 @@ static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long  	return 0;  } -int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst) +int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst)  {  	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };  	union acpi_object *obj;  	acpi_status status;  	int ret = 0; -	status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer); -	if (ACPI_FAILURE(status)) { -		dev_err(&device->dev, "Get fan state failed\n"); -		return -ENODEV; -	} +	status = acpi_evaluate_object(handle, "_FST", NULL, &buffer); +	if (ACPI_FAILURE(status)) +		return -EIO;  	obj = buffer.pointer; -	if (!obj || obj->type != ACPI_TYPE_PACKAGE || -	    obj->package.count != 3 || -	    obj->package.elements[1].type != ACPI_TYPE_INTEGER) { -		dev_err(&device->dev, "Invalid _FST data\n"); -		ret = -EINVAL; +	if (!obj) +		return -ENODATA; + +	if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3) { +		ret = -EPROTO; +		goto err; +	} + +	if (obj->package.elements[0].type != ACPI_TYPE_INTEGER || +	    obj->package.elements[1].type != ACPI_TYPE_INTEGER || +	    obj->package.elements[2].type != ACPI_TYPE_INTEGER) { +		ret = -EPROTO;  		goto err;  	} @@ -81,7 +86,7 @@ static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)  	struct acpi_fan_fst fst;  	int status, i; -	status = acpi_fan_get_fst(device, &fst); +	status = acpi_fan_get_fst(device->handle, &fst);  	if (status)  		return status; @@ -311,11 +316,16 @@ static int acpi_fan_probe(struct platform_device *pdev)  	struct acpi_device *device = ACPI_COMPANION(&pdev->dev);  	char *name; +	if (!device) +		return -ENODEV; +  	fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);  	if (!fan) {  		dev_err(&device->dev, "No memory for fan\n");  		return -ENOMEM;  	} + +	fan->handle = device->handle;  	device->driver_data = fan;  	platform_set_drvdata(pdev, fan); @@ -337,7 +347,7 @@ static int acpi_fan_probe(struct platform_device *pdev)  	}  	if (fan->has_fst) { -		result = devm_acpi_fan_create_hwmon(device); +		result = devm_acpi_fan_create_hwmon(&pdev->dev);  		if (result)  			return result; diff --git a/drivers/acpi/fan_hwmon.c b/drivers/acpi/fan_hwmon.c index e8d90605106e..4b2c2007f2d7 100644 --- a/drivers/acpi/fan_hwmon.c +++ b/drivers/acpi/fan_hwmon.c @@ -93,13 +93,12 @@ static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_  static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,  			       int channel, long *val)  { -	struct acpi_device *adev = to_acpi_device(dev->parent);  	struct acpi_fan *fan = dev_get_drvdata(dev);  	struct acpi_fan_fps *fps;  	struct acpi_fan_fst fst;  	int ret; -	ret = acpi_fan_get_fst(adev, &fst); +	ret = acpi_fan_get_fst(fan->handle, &fst);  	if (ret < 0)  		return ret; @@ -167,12 +166,12 @@ static const struct hwmon_chip_info acpi_fan_hwmon_chip_info = {  	.info = acpi_fan_hwmon_info,  }; -int devm_acpi_fan_create_hwmon(struct acpi_device *device) +int devm_acpi_fan_create_hwmon(struct device *dev)  { -	struct acpi_fan *fan = acpi_driver_data(device); +	struct acpi_fan *fan = dev_get_drvdata(dev);  	struct device *hdev; -	hdev = devm_hwmon_device_register_with_info(&device->dev, "acpi_fan", fan, -						    &acpi_fan_hwmon_chip_info, NULL); +	hdev = devm_hwmon_device_register_with_info(dev, "acpi_fan", fan, &acpi_fan_hwmon_chip_info, +						    NULL);  	return PTR_ERR_OR_ZERO(hdev);  } diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c index d4d52d5e9016..73cb933fdc89 100644 --- a/drivers/acpi/spcr.c +++ b/drivers/acpi/spcr.c @@ -155,7 +155,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)  	 * Baud Rate field. If this field is zero or not present, Configured  	 * Baud Rate is used.  	 */ -	if (table->precise_baudrate) +	if (table->header.revision >= 4 && table->precise_baudrate)  		baud_rate = table->precise_baudrate;  	else switch (table->baud_rate) {  	case 0: diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c index 54eb7d227cf4..e523fae73004 100644 --- a/drivers/base/regmap/regmap-slimbus.c +++ b/drivers/base/regmap/regmap-slimbus.c @@ -48,8 +48,7 @@ struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,  	if (IS_ERR(bus))  		return ERR_CAST(bus); -	return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config, -			     lock_key, lock_name); +	return __regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);  }  EXPORT_SYMBOL_GPL(__regmap_init_slimbus); @@ -63,8 +62,7 @@ struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,  	if (IS_ERR(bus))  		return ERR_CAST(bus); -	return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config, -				  lock_key, lock_name); +	return __devm_regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);  }  EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus); diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 6ecfc821cf83..72f045e6ed51 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -294,6 +294,8 @@ static int bcma_register_devices(struct bcma_bus *bus)  	int err;  	list_for_each_entry(core, &bus->cores, list) { +		struct device_node *np; +  		/* We support that core ourselves */  		switch (core->id.id) {  		case BCMA_CORE_4706_CHIPCOMMON: @@ -311,6 +313,10 @@ static int bcma_register_devices(struct bcma_bus *bus)  		if (bcma_is_core_needed_early(core->id.id))  			continue; +		np = core->dev.of_node; +		if (np && !of_device_is_available(np)) +			continue; +  		/* Only first GMAC core on BCM4706 is connected and working */  		if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&  		    core->core_unit > 0) diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index f982027e8c85..0ee55f889cfd 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1949,6 +1949,7 @@ static int null_add_dev(struct nullb_device *dev)  		.logical_block_size	= dev->blocksize,  		.physical_block_size	= dev->blocksize,  		.max_hw_sectors		= dev->max_sectors, +		.dma_alignment		= dev->blocksize - 1,  	};  	struct nullb *nullb; diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c index b7ba667a3d09..e305d04aac9d 100644 --- a/drivers/bluetooth/bpa10x.c +++ b/drivers/bluetooth/bpa10x.c @@ -41,6 +41,7 @@ struct bpa10x_data {  	struct usb_anchor rx_anchor;  	struct sk_buff *rx_skb[2]; +	struct hci_uart hu;  };  static void bpa10x_tx_complete(struct urb *urb) @@ -96,7 +97,7 @@ static void bpa10x_rx_complete(struct urb *urb)  	if (urb->status == 0) {  		bool idx = usb_pipebulk(urb->pipe); -		data->rx_skb[idx] = h4_recv_buf(hdev, data->rx_skb[idx], +		data->rx_skb[idx] = h4_recv_buf(&data->hu, data->rx_skb[idx],  						urb->transfer_buffer,  						urb->actual_length,  						bpa10x_recv_pkts, @@ -388,6 +389,7 @@ static int bpa10x_probe(struct usb_interface *intf,  	hci_set_drvdata(hdev, data);  	data->hdev = hdev; +	data->hu.hdev = hdev;  	SET_HCIDEV_DEV(hdev, &intf->dev); diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c index 6d3963bd56a9..a075d8ec4677 100644 --- a/drivers/bluetooth/btintel_pcie.c +++ b/drivers/bluetooth/btintel_pcie.c @@ -1467,11 +1467,6 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)  	if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)  		btintel_pcie_msix_gp1_handler(data); -	/* This interrupt is triggered by the firmware after updating -	 * boot_stage register and image_response register -	 */ -	if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0) -		btintel_pcie_msix_gp0_handler(data);  	/* For TX */  	if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) { @@ -1487,6 +1482,12 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)  			btintel_pcie_msix_tx_handle(data);  	} +	/* This interrupt is triggered by the firmware after updating +	 * boot_stage register and image_response register +	 */ +	if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0) +		btintel_pcie_msix_gp0_handler(data); +  	/*  	 * Before sending the interrupt the HW disables it to prevent a nested  	 * interrupt. This is done by writing 1 to the corresponding bit in diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index 50abefba6d04..62db31bd6592 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -1270,6 +1270,12 @@ static void btmtksdio_reset(struct hci_dev *hdev)  	sdio_claim_host(bdev->func); +	/* set drv_pmctrl if BT is closed before doing reset */ +	if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) { +		sdio_enable_func(bdev->func); +		btmtksdio_drv_pmctrl(bdev); +	} +  	sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);  	skb_queue_purge(&bdev->txq);  	cancel_work_sync(&bdev->txrx_work); @@ -1285,6 +1291,12 @@ static void btmtksdio_reset(struct hci_dev *hdev)  		goto err;  	} +	/* set fw_pmctrl back if BT is closed after doing reset */ +	if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) { +		btmtksdio_fw_pmctrl(bdev); +		sdio_disable_func(bdev->func); +	} +  	clear_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state);  err:  	sdio_release_host(bdev->func); diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c index d9b90ea2ad38..27aa48ff3ac2 100644 --- a/drivers/bluetooth/btmtkuart.c +++ b/drivers/bluetooth/btmtkuart.c @@ -79,6 +79,7 @@ struct btmtkuart_dev {  	u16	stp_dlen;  	const struct btmtkuart_data *data; +	struct hci_uart hu;  };  #define btmtkuart_is_standalone(bdev)	\ @@ -368,7 +369,7 @@ static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)  		sz_left -= adv;  		p_left += adv; -		bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4, +		bdev->rx_skb = h4_recv_buf(&bdev->hu, bdev->rx_skb, p_h4,  					   sz_h4, mtk_recv_pkts,  					   ARRAY_SIZE(mtk_recv_pkts));  		if (IS_ERR(bdev->rx_skb)) { @@ -858,6 +859,7 @@ static int btmtkuart_probe(struct serdev_device *serdev)  	}  	bdev->hdev = hdev; +	bdev->hu.hdev = hdev;  	hdev->bus = HCI_UART;  	hci_set_drvdata(hdev, bdev); diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c index d5153fed0518..3b1e9224e965 100644 --- a/drivers/bluetooth/btnxpuart.c +++ b/drivers/bluetooth/btnxpuart.c @@ -212,6 +212,7 @@ struct btnxpuart_dev {  	struct ps_data psdata;  	struct btnxpuart_data *nxp_data;  	struct reset_control *pdn; +	struct hci_uart hu;  };  #define NXP_V1_FW_REQ_PKT	0xa5 @@ -1756,7 +1757,7 @@ static size_t btnxpuart_receive_buf(struct serdev_device *serdev,  	ps_start_timer(nxpdev); -	nxpdev->rx_skb = h4_recv_buf(nxpdev->hdev, nxpdev->rx_skb, data, count, +	nxpdev->rx_skb = h4_recv_buf(&nxpdev->hu, nxpdev->rx_skb, data, count,  				     nxp_recv_pkts, ARRAY_SIZE(nxp_recv_pkts));  	if (IS_ERR(nxpdev->rx_skb)) {  		int err = PTR_ERR(nxpdev->rx_skb); @@ -1875,6 +1876,7 @@ static int nxp_serdev_probe(struct serdev_device *serdev)  	reset_control_deassert(nxpdev->pdn);  	nxpdev->hdev = hdev; +	nxpdev->hu.hdev = hdev;  	hdev->bus = HCI_UART;  	hci_set_drvdata(hdev, nxpdev); diff --git a/drivers/bluetooth/hci_ag6xx.c b/drivers/bluetooth/hci_ag6xx.c index 2d40302409ff..94588676510f 100644 --- a/drivers/bluetooth/hci_ag6xx.c +++ b/drivers/bluetooth/hci_ag6xx.c @@ -105,7 +105,7 @@ static int ag6xx_recv(struct hci_uart *hu, const void *data, int count)  	if (!test_bit(HCI_UART_REGISTERED, &hu->flags))  		return -EUNATCH; -	ag6xx->rx_skb = h4_recv_buf(hu->hdev, ag6xx->rx_skb, data, count, +	ag6xx->rx_skb = h4_recv_buf(hu, ag6xx->rx_skb, data, count,  				    ag6xx_recv_pkts,  				    ARRAY_SIZE(ag6xx_recv_pkts));  	if (IS_ERR(ag6xx->rx_skb)) { diff --git a/drivers/bluetooth/hci_aml.c b/drivers/bluetooth/hci_aml.c index 707e90f80130..b1f32c5a8a3f 100644 --- a/drivers/bluetooth/hci_aml.c +++ b/drivers/bluetooth/hci_aml.c @@ -650,7 +650,7 @@ static int aml_recv(struct hci_uart *hu, const void *data, int count)  	struct aml_data *aml_data = hu->priv;  	int err; -	aml_data->rx_skb = h4_recv_buf(hu->hdev, aml_data->rx_skb, data, count, +	aml_data->rx_skb = h4_recv_buf(hu, aml_data->rx_skb, data, count,  				       aml_recv_pkts,  				       ARRAY_SIZE(aml_recv_pkts));  	if (IS_ERR(aml_data->rx_skb)) { diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c index dbfe34664633..8d2b5e7f0d6a 100644 --- a/drivers/bluetooth/hci_ath.c +++ b/drivers/bluetooth/hci_ath.c @@ -191,7 +191,7 @@ static int ath_recv(struct hci_uart *hu, const void *data, int count)  {  	struct ath_struct *ath = hu->priv; -	ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count, +	ath->rx_skb = h4_recv_buf(hu, ath->rx_skb, data, count,  				  ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));  	if (IS_ERR(ath->rx_skb)) {  		int err = PTR_ERR(ath->rx_skb); diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index f96617b85d87..fff845ed44e3 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -698,7 +698,7 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)  	if (!test_bit(HCI_UART_REGISTERED, &hu->flags))  		return -EUNATCH; -	bcm->rx_skb = h4_recv_buf(hu->hdev, bcm->rx_skb, data, count, +	bcm->rx_skb = h4_recv_buf(hu, bcm->rx_skb, data, count,  				  bcm_recv_pkts, ARRAY_SIZE(bcm_recv_pkts));  	if (IS_ERR(bcm->rx_skb)) {  		int err = PTR_ERR(bcm->rx_skb); diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c index 9070e31a68bf..ec017df8572c 100644 --- a/drivers/bluetooth/hci_h4.c +++ b/drivers/bluetooth/hci_h4.c @@ -112,7 +112,7 @@ static int h4_recv(struct hci_uart *hu, const void *data, int count)  	if (!test_bit(HCI_UART_REGISTERED, &hu->flags))  		return -EUNATCH; -	h4->rx_skb = h4_recv_buf(hu->hdev, h4->rx_skb, data, count, +	h4->rx_skb = h4_recv_buf(hu, h4->rx_skb, data, count,  				 h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts));  	if (IS_ERR(h4->rx_skb)) {  		int err = PTR_ERR(h4->rx_skb); @@ -151,12 +151,12 @@ int __exit h4_deinit(void)  	return hci_uart_unregister_proto(&h4p);  } -struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, +struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb,  			    const unsigned char *buffer, int count,  			    const struct h4_recv_pkt *pkts, int pkts_count)  { -	struct hci_uart *hu = hci_get_drvdata(hdev);  	u8 alignment = hu->alignment ? hu->alignment : 1; +	struct hci_dev *hdev = hu->hdev;  	/* Check for error from previous call */  	if (IS_ERR(skb)) diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c index 9b353c3d6442..1d6e09508f1f 100644 --- a/drivers/bluetooth/hci_intel.c +++ b/drivers/bluetooth/hci_intel.c @@ -972,7 +972,7 @@ static int intel_recv(struct hci_uart *hu, const void *data, int count)  	if (!test_bit(HCI_UART_REGISTERED, &hu->flags))  		return -EUNATCH; -	intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count, +	intel->rx_skb = h4_recv_buf(hu, intel->rx_skb, data, count,  				    intel_recv_pkts,  				    ARRAY_SIZE(intel_recv_pkts));  	if (IS_ERR(intel->rx_skb)) { diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c index 7044c86325ce..6f4e25917b86 100644 --- a/drivers/bluetooth/hci_ll.c +++ b/drivers/bluetooth/hci_ll.c @@ -429,7 +429,7 @@ static int ll_recv(struct hci_uart *hu, const void *data, int count)  	if (!test_bit(HCI_UART_REGISTERED, &hu->flags))  		return -EUNATCH; -	ll->rx_skb = h4_recv_buf(hu->hdev, ll->rx_skb, data, count, +	ll->rx_skb = h4_recv_buf(hu, ll->rx_skb, data, count,  				 ll_recv_pkts, ARRAY_SIZE(ll_recv_pkts));  	if (IS_ERR(ll->rx_skb)) {  		int err = PTR_ERR(ll->rx_skb); diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c index e08222395772..8767522ec4c6 100644 --- a/drivers/bluetooth/hci_mrvl.c +++ b/drivers/bluetooth/hci_mrvl.c @@ -264,9 +264,9 @@ static int mrvl_recv(struct hci_uart *hu, const void *data, int count)  				!test_bit(STATE_FW_LOADED, &mrvl->flags))  		return count; -	mrvl->rx_skb = h4_recv_buf(hu->hdev, mrvl->rx_skb, data, count, -				    mrvl_recv_pkts, -				    ARRAY_SIZE(mrvl_recv_pkts)); +	mrvl->rx_skb = h4_recv_buf(hu, mrvl->rx_skb, data, count, +				   mrvl_recv_pkts, +				   ARRAY_SIZE(mrvl_recv_pkts));  	if (IS_ERR(mrvl->rx_skb)) {  		int err = PTR_ERR(mrvl->rx_skb);  		bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c index cd7575c20f65..1e65b541f8ad 100644 --- a/drivers/bluetooth/hci_nokia.c +++ b/drivers/bluetooth/hci_nokia.c @@ -624,8 +624,8 @@ static int nokia_recv(struct hci_uart *hu, const void *data, int count)  	if (!test_bit(HCI_UART_REGISTERED, &hu->flags))  		return -EUNATCH; -	btdev->rx_skb = h4_recv_buf(hu->hdev, btdev->rx_skb, data, count, -				  nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts)); +	btdev->rx_skb = h4_recv_buf(hu, btdev->rx_skb, data, count, +				    nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts));  	if (IS_ERR(btdev->rx_skb)) {  		err = PTR_ERR(btdev->rx_skb);  		dev_err(dev, "Frame reassembly failed (%d)", err); diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 4cff4d9be313..888176b0faa9 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -1277,7 +1277,7 @@ static int qca_recv(struct hci_uart *hu, const void *data, int count)  	if (!test_bit(HCI_UART_REGISTERED, &hu->flags))  		return -EUNATCH; -	qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count, +	qca->rx_skb = h4_recv_buf(hu, qca->rx_skb, data, count,  				  qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));  	if (IS_ERR(qca->rx_skb)) {  		int err = PTR_ERR(qca->rx_skb); diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index cbbe79b241ce..48ac7ca9334e 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -162,7 +162,7 @@ struct h4_recv_pkt {  int h4_init(void);  int h4_deinit(void); -struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, +struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb,  			    const unsigned char *buffer, int count,  			    const struct h4_recv_pkt *pkts, int pkts_count);  #endif diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 7d21fb5a72f4..23239b0c04f9 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -318,10 +318,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,  		/*  		 * Use a physical idle state, not busy polling, unless a timer -		 * is going to trigger soon enough. +		 * is going to trigger soon enough or the exit latency of the +		 * idle state in question is greater than the predicted idle +		 * duration.  		 */  		if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && -		    s->target_residency_ns <= data->next_timer_ns) { +		    s->target_residency_ns <= data->next_timer_ns && +		    s->exit_latency_ns <= predicted_ns) {  			predicted_ns = s->target_residency_ns;  			idx = i;  			break; diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c index 8d1c79aaca07..5993bcba9716 100644 --- a/drivers/crypto/aspeed/aspeed-acry.c +++ b/drivers/crypto/aspeed/aspeed-acry.c @@ -787,7 +787,6 @@ static int aspeed_acry_probe(struct platform_device *pdev)  err_engine_rsa_start:  	crypto_engine_exit(acry_dev->crypt_engine_rsa);  clk_exit: -	clk_disable_unprepare(acry_dev->clk);  	return rc;  } @@ -799,7 +798,6 @@ static void aspeed_acry_remove(struct platform_device *pdev)  	aspeed_acry_unregister(acry_dev);  	crypto_engine_exit(acry_dev->crypt_engine_rsa);  	tasklet_kill(&acry_dev->done_task); -	clk_disable_unprepare(acry_dev->clk);  }  MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches); diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 3f78c56b58dc..39e6f93dc310 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -1141,7 +1141,7 @@ const char __rcu *dma_fence_timeline_name(struct dma_fence *fence)  			 "RCU protection is required for safe access to returned string");  	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) -		return fence->ops->get_driver_name(fence); +		return fence->ops->get_timeline_name(fence);  	else  		return "signaled-timeline";  } diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c index 74c1f0ca95f2..a4153bcb6dcf 100644 --- a/drivers/dpll/dpll_netlink.c +++ b/drivers/dpll/dpll_netlink.c @@ -1559,16 +1559,18 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)  		return -EMSGSIZE;  	}  	pin = dpll_pin_find_from_nlattr(info); -	if (!IS_ERR(pin)) { -		if (!dpll_pin_available(pin)) { -			nlmsg_free(msg); -			return -ENODEV; -		} -		ret = dpll_msg_add_pin_handle(msg, pin); -		if (ret) { -			nlmsg_free(msg); -			return ret; -		} +	if (IS_ERR(pin)) { +		nlmsg_free(msg); +		return PTR_ERR(pin); +	} +	if (!dpll_pin_available(pin)) { +		nlmsg_free(msg); +		return -ENODEV; +	} +	ret = dpll_msg_add_pin_handle(msg, pin); +	if (ret) { +		nlmsg_free(msg); +		return ret;  	}  	genlmsg_end(msg, hdr); @@ -1735,12 +1737,14 @@ int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info)  	}  	dpll = dpll_device_find_from_nlattr(info); -	if (!IS_ERR(dpll)) { -		ret = dpll_msg_add_dev_handle(msg, dpll); -		if (ret) { -			nlmsg_free(msg); -			return ret; -		} +	if (IS_ERR(dpll)) { +		nlmsg_free(msg); +		return PTR_ERR(dpll); +	} +	ret = dpll_msg_add_dev_handle(msg, dpll); +	if (ret) { +		nlmsg_free(msg); +		return ret;  	}  	genlmsg_end(msg, hdr); diff --git a/drivers/dpll/zl3073x/dpll.c b/drivers/dpll/zl3073x/dpll.c index 93dc93eec79e..f93f9a458324 100644 --- a/drivers/dpll/zl3073x/dpll.c +++ b/drivers/dpll/zl3073x/dpll.c @@ -1904,7 +1904,7 @@ zl3073x_dpll_pin_is_registrable(struct zl3073x_dpll *zldpll,  		}  		is_diff = zl3073x_out_is_diff(zldev, out); -		is_enabled = zl3073x_out_is_enabled(zldev, out); +		is_enabled = zl3073x_output_pin_is_enabled(zldev, index);  	}  	/* Skip N-pin if the corresponding input/output is differential */ diff --git a/drivers/edac/versalnet_edac.c b/drivers/edac/versalnet_edac.c index 7c5db8bf0595..1ded4c3f0213 100644 --- a/drivers/edac/versalnet_edac.c +++ b/drivers/edac/versalnet_edac.c @@ -433,7 +433,7 @@ static void handle_error(struct mc_priv  *priv, struct ecc_status *stat,  	phys_addr_t pfn;  	int err; -	if (WARN_ON_ONCE(ctl_num > NUM_CONTROLLERS)) +	if (WARN_ON_ONCE(ctl_num >= NUM_CONTROLLERS))  		return;  	mci = priv->mci[ctl_num]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c index ef996493115f..425a3e564360 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: MIT  /*   * Copyright 2025 Advanced Micro Devices, Inc.   * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h index bcb97d245673..353421807387 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: MIT */  /*   * Copyright 2025 Advanced Micro Devices, Inc.   * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c index 474bfe36c0c2..aa78c2ee9e21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c @@ -322,6 +322,26 @@ static int vpe_early_init(struct amdgpu_ip_block *ip_block)  	return 0;  } +static bool vpe_need_dpm0_at_power_down(struct amdgpu_device *adev) +{ +	switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { +	case IP_VERSION(6, 1, 1): +		return adev->pm.fw_version < 0x0a640500; +	default: +		return false; +	} +} + +static int vpe_get_dpm_level(struct amdgpu_device *adev) +{ +	struct amdgpu_vpe *vpe = &adev->vpe; + +	if (!adev->pm.dpm_enabled) +		return 0; + +	return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv)); +} +  static void vpe_idle_work_handler(struct work_struct *work)  {  	struct amdgpu_device *adev = @@ -329,11 +349,17 @@ static void vpe_idle_work_handler(struct work_struct *work)  	unsigned int fences = 0;  	fences += amdgpu_fence_count_emitted(&adev->vpe.ring); +	if (fences) +		goto reschedule; -	if (fences == 0) -		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE); -	else -		schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); +	if (vpe_need_dpm0_at_power_down(adev) && vpe_get_dpm_level(adev) != 0) +		goto reschedule; + +	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE); +	return; + +reschedule: +	schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);  }  static int vpe_common_init(struct amdgpu_vpe *vpe) diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c index 96616a865aac..ed1e25661706 100644 --- a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: MIT  /*   * Copyright 2018 Advanced Micro Devices, Inc.   * diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 1ec9d03ad747..38f9ea313dcb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -248,6 +248,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)  	struct vblank_control_work *vblank_work =  		container_of(work, struct vblank_control_work, work);  	struct amdgpu_display_manager *dm = vblank_work->dm; +	struct amdgpu_device *adev = drm_to_adev(dm->ddev); +	int r;  	mutex_lock(&dm->dc_lock); @@ -277,7 +279,16 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)  	if (dm->active_vblank_irq_count == 0) {  		dc_post_update_surfaces_to_stream(dm->dc); + +		r = amdgpu_dpm_pause_power_profile(adev, true); +		if (r) +			dev_warn(adev->dev, "failed to set default power profile mode\n"); +  		dc_allow_idle_optimizations(dm->dc, true); + +		r = amdgpu_dpm_pause_power_profile(adev, false); +		if (r) +			dev_warn(adev->dev, "failed to restore the power profile mode\n");  	}  	mutex_unlock(&dm->dc_lock); @@ -297,8 +308,12 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)  	int irq_type;  	int rc = 0; -	if (acrtc->otg_inst == -1) -		goto skip; +	if (enable && !acrtc->base.enabled) { +		drm_dbg_vbl(crtc->dev, +				"Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n", +				acrtc->crtc_id, acrtc->base.enabled); +		return -EINVAL; +	}  	irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); @@ -383,7 +398,7 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)  			return rc;  	}  #endif -skip: +  	if (amdgpu_in_reset(adev))  		return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index fe100e4c9801..cc21337a182f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -83,6 +83,7 @@ static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct  		edid_caps->panel_patch.remove_sink_ext_caps = true;  		break;  	case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154): +	case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171):  		drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id);  		edid_caps->panel_patch.disable_colorimetry = true;  		break; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c index 09be2a90cc79..4f569cd8a5d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c @@ -578,9 +578,6 @@ static void dpp3_power_on_blnd_lut(  			dpp_base->ctx->dc->optimized_required = true;  			dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true;  		} -	} else { -		REG_SET(CM_MEM_PWR_CTRL, 0, -				BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);  	}  } diff --git a/drivers/gpu/drm/amd/include/amd_cper.h b/drivers/gpu/drm/amd/include/amd_cper.h index 086869264425..a252ee4c7874 100644 --- a/drivers/gpu/drm/amd/include/amd_cper.h +++ b/drivers/gpu/drm/amd/include/amd_cper.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: MIT */  /*   * Copyright 2025 Advanced Micro Devices, Inc.   * diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h index 64b553e7de1a..e7fdcee22a71 100644 --- a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h +++ b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: MIT */  /*   * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved. diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c index d2dbd90bb427..0a876c840c79 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c @@ -2024,7 +2024,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)  	table->VoltageResponseTime = 0;  	table->PhaseResponseTime = 0;  	table->MemoryThermThrottleEnable = 1; -	table->PCIeBootLinkLevel = 0;      /* 0:Gen1 1:Gen2 2:Gen3*/ +	table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);  	table->PCIeGenInterval = 1;  	table->VRConfig = 0; diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c index 1f50f1e74c48..aa3ae9b115c4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c @@ -2028,7 +2028,7 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)  	table->VoltageResponseTime  = 0;  	table->PhaseResponseTime  = 0;  	table->MemoryThermThrottleEnable  = 1; -	table->PCIeBootLinkLevel = 0; +	table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);  	table->PCIeGenInterval = 1;  	result = iceland_populate_smc_svi2_config(hwmgr, table); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index f532f7c69259..a8961a8f5c42 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -969,7 +969,7 @@ int smu_cmn_update_table(struct smu_context *smu,  						      table_index);  	uint32_t table_size;  	int ret = 0; -	if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) +	if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0)  		return -EINVAL;  	table_size = smu_table->tables[table_index].size; diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index c15aef014f69..d41bd876167c 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -282,13 +282,13 @@ static inline void __ast_write8_i(void __iomem *addr, u32 reg, u8 index, u8 val)  	__ast_write8(addr, reg + 1, val);  } -static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask, +static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 preserve_mask,  					 u8 val)  { -	u8 tmp = __ast_read8_i_masked(addr, reg, index, read_mask); +	u8 tmp = __ast_read8_i_masked(addr, reg, index, preserve_mask); -	tmp |= val; -	__ast_write8_i(addr, reg, index, tmp); +	val &= ~preserve_mask; +	__ast_write8_i(addr, reg, index, tmp | val);  }  static inline u32 ast_read32(struct ast_device *ast, u32 reg) diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml index d502d146b177..56638814bb28 100644 --- a/drivers/gpu/drm/ci/gitlab-ci.yml +++ b/drivers/gpu/drm/ci/gitlab-ci.yml @@ -280,7 +280,7 @@ sanity:      GIT_STRATEGY: none    script:      # ci-fairy check-commits --junit-xml=check-commits.xml -    - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml +    # - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml      - |        set -eu        image_tags=( diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c index ebf305fb24f0..6fb55601252f 100644 --- a/drivers/gpu/drm/drm_gem_atomic_helper.c +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c @@ -310,8 +310,12 @@ EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state);  void __drm_gem_reset_shadow_plane(struct drm_plane *plane,  				  struct drm_shadow_plane_state *shadow_plane_state)  { -	__drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); -	drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state); +	if (shadow_plane_state) { +		__drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); +		drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state); +	} else { +		__drm_atomic_helper_plane_reset(plane, NULL); +	}  }  EXPORT_SYMBOL(__drm_gem_reset_shadow_plane); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index b13a17276d07..88385dc3b30d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -347,7 +347,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,  	u32 link_target, link_dwords;  	bool switch_context = gpu->exec_state != exec_state;  	bool switch_mmu_context = gpu->mmu_context != mmu_context; -	unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq); +	unsigned int new_flush_seq = READ_ONCE(mmu_context->flush_seq);  	bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;  	bool has_blt = !!(gpu->identity.minor_features5 &  			  chipMinorFeatures5_BLT_ENGINE); diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 77a0199f9ea5..4a4cace1f879 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -546,6 +546,36 @@ static bool is_event_handler(struct intel_display *display,  		REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id;  } +static bool fixup_dmc_evt(struct intel_display *display, +			  enum intel_dmc_id dmc_id, +			  i915_reg_t reg_ctl, u32 *data_ctl, +			  i915_reg_t reg_htp, u32 *data_htp) +{ +	if (!is_dmc_evt_ctl_reg(display, dmc_id, reg_ctl)) +		return false; + +	if (!is_dmc_evt_htp_reg(display, dmc_id, reg_htp)) +		return false; + +	/* make sure reg_ctl and reg_htp are for the same event */ +	if (i915_mmio_reg_offset(reg_ctl) - i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, 0)) != +	    i915_mmio_reg_offset(reg_htp) - i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, 0))) +		return false; + +	/* +	 * On ADL-S the HRR event handler is not restored after DC6. +	 * Clear it to zero from the beginning to avoid mismatches later. +	 */ +	if (display->platform.alderlake_s && dmc_id == DMC_FW_MAIN && +	    is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg_ctl, *data_ctl)) { +		*data_ctl = 0; +		*data_htp = 0; +		return true; +	} + +	return false; +} +  static bool disable_dmc_evt(struct intel_display *display,  			    enum intel_dmc_id dmc_id,  			    i915_reg_t reg, u32 data) @@ -1064,9 +1094,32 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,  	for (i = 0; i < mmio_count; i++) {  		dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);  		dmc_info->mmiodata[i] = mmiodata[i]; +	} + +	for (i = 0; i < mmio_count - 1; i++) { +		u32 orig_mmiodata[2] = { +			dmc_info->mmiodata[i], +			dmc_info->mmiodata[i+1], +		}; + +		if (!fixup_dmc_evt(display, dmc_id, +				   dmc_info->mmioaddr[i], &dmc_info->mmiodata[i], +				   dmc_info->mmioaddr[i+1], &dmc_info->mmiodata[i+1])) +			continue; + +		drm_dbg_kms(display->drm, +			    " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_CTL)\n", +			    i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), +			    orig_mmiodata[0], dmc_info->mmiodata[i]); +		drm_dbg_kms(display->drm, +			    " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_HTP)\n", +			    i+1, i915_mmio_reg_offset(dmc_info->mmioaddr[i+1]), +			    orig_mmiodata[1], dmc_info->mmiodata[i+1]); +	} +	for (i = 0; i < mmio_count; i++) {  		drm_dbg_kms(display->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n", -			    i, mmioaddr[i], mmiodata[i], +			    i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), dmc_info->mmiodata[i],  			    is_dmc_evt_ctl_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" :  			    is_dmc_evt_htp_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "",  			    disable_dmc_evt(display, dmc_id, dmc_info->mmioaddr[i], diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c index 6d8325c76697..7fc6af703307 100644 --- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c +++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c @@ -25,19 +25,18 @@  struct imx_parallel_display_encoder {  	struct drm_encoder encoder; -	struct drm_bridge bridge; -	struct imx_parallel_display *pd;  };  struct imx_parallel_display {  	struct device *dev;  	u32 bus_format;  	struct drm_bridge *next_bridge; +	struct drm_bridge bridge;  };  static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)  { -	return container_of(b, struct imx_parallel_display_encoder, bridge)->pd; +	return container_of(b, struct imx_parallel_display, bridge);  }  static const u32 imx_pd_bus_fmts[] = { @@ -195,15 +194,13 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)  	if (IS_ERR(imxpd_encoder))  		return PTR_ERR(imxpd_encoder); -	imxpd_encoder->pd = imxpd;  	encoder = &imxpd_encoder->encoder; -	bridge = &imxpd_encoder->bridge; +	bridge = &imxpd->bridge;  	ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node);  	if (ret)  		return ret; -	bridge->funcs = &imx_pd_bridge_funcs;  	drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);  	connector = drm_bridge_connector_init(drm, encoder); @@ -228,9 +225,10 @@ static int imx_pd_probe(struct platform_device *pdev)  	u32 bus_format = 0;  	const char *fmt; -	imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL); -	if (!imxpd) -		return -ENOMEM; +	imxpd = devm_drm_bridge_alloc(dev, struct imx_parallel_display, bridge, +				      &imx_pd_bridge_funcs); +	if (IS_ERR(imxpd)) +		return PTR_ERR(imxpd);  	/* port@1 is the output port */  	imxpd->next_bridge = devm_drm_of_get_bridge(dev, np, 1, 0); @@ -258,6 +256,8 @@ static int imx_pd_probe(struct platform_device *pdev)  	platform_set_drvdata(pdev, imxpd); +	devm_drm_bridge_add(dev, &imxpd->bridge); +  	return component_add(dev, &imx_pd_ops);  } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index eb5537f0ac90..31ff2922758a 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -686,10 +686,6 @@ err_free:  	for (i = 0; i < private->data->mmsys_dev_num; i++)  		private->all_drm_private[i]->drm = NULL;  err_put_dev: -	for (i = 0; i < private->data->mmsys_dev_num; i++) { -		/* For device_find_child in mtk_drm_get_all_priv() */ -		put_device(private->all_drm_private[i]->dev); -	}  	put_device(private->mutex_dev);  	return ret;  } @@ -697,18 +693,12 @@ err_put_dev:  static void mtk_drm_unbind(struct device *dev)  {  	struct mtk_drm_private *private = dev_get_drvdata(dev); -	int i;  	/* for multi mmsys dev, unregister drm dev in mmsys master */  	if (private->drm_master) {  		drm_dev_unregister(private->drm);  		mtk_drm_kms_deinit(private->drm);  		drm_dev_put(private->drm); - -		for (i = 0; i < private->data->mmsys_dev_num; i++) { -			/* For device_find_child in mtk_drm_get_all_priv() */ -			put_device(private->all_drm_private[i]->dev); -		}  		put_device(private->mutex_dev);  	}  	private->mtk_drm_bound = false; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index fc62fef2fed8..4e6dc16e4a4c 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -780,6 +780,9 @@ static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)  	return true;  } +#define NEXT_BLK(blk) \ +	((const struct block_header *)((const char *)(blk) + sizeof(*(blk)) + (blk)->size)) +  static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)  {  	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); @@ -811,7 +814,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)  	for (blk = (const struct block_header *) fw_image->data;  	     (const u8*) blk < fw_image->data + fw_image->size; -	     blk = (const struct block_header *) &blk->data[blk->size >> 2]) { +	     blk = NEXT_BLK(blk)) {  		if (blk->size == 0)  			continue; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index afaa3cfefd35..4b5a4edd0702 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -348,13 +348,6 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,  	return 0;  } -static bool -adreno_smmu_has_prr(struct msm_gpu *gpu) -{ -	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev); -	return adreno_smmu && adreno_smmu->set_prr_addr; -} -  int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,  		     uint32_t param, uint64_t *value, uint32_t *len)  { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 4b970a59deaf..2f8156051d9b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -1545,6 +1545,9 @@ static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,  	adjusted_mode_clk = dpu_core_perf_adjusted_mode_clk(mode->clock,  							    dpu_kms->perf.perf_cfg); +	if (dpu_kms->catalog->caps->has_3d_merge) +		adjusted_mode_clk /= 2; +  	/*  	 * The given mode, adjusted for the perf clock factor, should not exceed  	 * the max core clock rate diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 6641455c4ec6..9f8d1bba9139 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -267,8 +267,8 @@ static const u32 wb2_formats_rgb_yuv[] = {  		.base = 0x200, .len = 0xa0,}, \  	.csc_blk = {.name = "csc", \  		.base = 0x320, .len = 0x100,}, \ -	.format_list = plane_formats_yuv, \ -	.num_formats = ARRAY_SIZE(plane_formats_yuv), \ +	.format_list = plane_formats, \ +	.num_formats = ARRAY_SIZE(plane_formats), \  	.rotation_cfg = NULL, \  	} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index f54cf0faa1c7..905524ceeb1f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -500,13 +500,15 @@ static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg,  	int i;  	for (i = 0; i < DPU_MAX_PLANES; i++) { +		uint32_t w = src_w, h = src_h; +  		if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) { -			src_w /= chroma_subsmpl_h; -			src_h /= chroma_subsmpl_v; +			w /= chroma_subsmpl_h; +			h /= chroma_subsmpl_v;  		} -		pixel_ext->num_ext_pxls_top[i] = src_h; -		pixel_ext->num_ext_pxls_left[i] = src_w; +		pixel_ext->num_ext_pxls_top[i] = h; +		pixel_ext->num_ext_pxls_left[i] = w;  	}  } @@ -740,7 +742,7 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,  	 * We already have verified scaling against platform limitations.  	 * Now check if the SSPP supports scaling at all.  	 */ -	if (!sblk->scaler_blk.len && +	if (!(sblk->scaler_blk.len && pipe->sspp->ops.setup_scaler) &&  	    ((drm_rect_width(&new_plane_state->src) >> 16 !=  	      drm_rect_width(&new_plane_state->dst)) ||  	     (drm_rect_height(&new_plane_state->src) >> 16 != @@ -1278,7 +1280,7 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,  							     state, plane_state,  							     prev_adjacent_plane_state);  		if (ret) -			break; +			return ret;  		prev_adjacent_plane_state = plane_state;  	} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 2c77c74fac0f..d9c3b0a1d091 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -842,7 +842,7 @@ struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,  	if (!reqs->scale && !reqs->yuv)  		hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA); -	if (!hw_sspp && reqs->scale) +	if (!hw_sspp && !reqs->yuv)  		hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB);  	if (!hw_sspp)  		hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c index cd73468e369a..7545c0293efb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c @@ -72,6 +72,9 @@ static int dpu_wb_conn_atomic_check(struct drm_connector *connector,  		DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",  			  fb->width, dpu_wb_conn->maxlinewidth);  		return -EINVAL; +	} else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { +		DPU_ERROR("unsupported fb modifier:%#llx\n", fb->modifier); +		return -EINVAL;  	}  	return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state); diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index e391505fdaf0..3cbf08231492 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -109,7 +109,6 @@ struct msm_dsi_phy {  	struct msm_dsi_dphy_timing timing;  	const struct msm_dsi_phy_cfg *cfg;  	void *tuning_cfg; -	void *pll_data;  	enum msm_dsi_phy_usecase usecase;  	bool regulator_ldo_mode; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c index 32f06edd21a9..c5e1d2016bcc 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -426,11 +426,8 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)  	u32 data;  	spin_lock_irqsave(&pll->pll_enable_lock, flags); -	if (pll->pll_enable_cnt++) { -		spin_unlock_irqrestore(&pll->pll_enable_lock, flags); -		WARN_ON(pll->pll_enable_cnt == INT_MAX); -		return; -	} +	pll->pll_enable_cnt++; +	WARN_ON(pll->pll_enable_cnt == INT_MAX);  	data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);  	data |= DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB; @@ -876,7 +873,6 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)  	spin_lock_init(&pll_7nm->pll_enable_lock);  	pll_7nm->phy = phy; -	phy->pll_data = pll_7nm;  	ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws);  	if (ret) { @@ -965,10 +961,8 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,  	u32 const delay_us = 5;  	u32 const timeout_us = 1000;  	struct msm_dsi_dphy_timing *timing = &phy->timing; -	struct dsi_pll_7nm *pll = phy->pll_data;  	void __iomem *base = phy->base;  	bool less_than_1500_mhz; -	unsigned long flags;  	u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0;  	u32 glbl_pemph_ctrl_0;  	u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0; @@ -1090,13 +1084,10 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,  		glbl_rescode_bot_ctrl = 0x3c;  	} -	spin_lock_irqsave(&pll->pll_enable_lock, flags); -	pll->pll_enable_cnt = 1;  	/* de-assert digital and pll power down */  	data = DSI_7nm_PHY_CMN_CTRL_0_DIGTOP_PWRDN_B |  	       DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;  	writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0); -	spin_unlock_irqrestore(&pll->pll_enable_lock, flags);  	/* Assert PLL core reset */  	writel(0x00, base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL); @@ -1209,9 +1200,7 @@ static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)  static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)  { -	struct dsi_pll_7nm *pll = phy->pll_data;  	void __iomem *base = phy->base; -	unsigned long flags;  	u32 data;  	DBG(""); @@ -1238,11 +1227,8 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)  	writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);  	writel(0, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0); -	spin_lock_irqsave(&pll->pll_enable_lock, flags); -	pll->pll_enable_cnt = 0;  	/* Turn off all PHY blocks */  	writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_0); -	spin_unlock_irqrestore(&pll->pll_enable_lock, flags);  	/* make sure phy is turned off */  	wmb(); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 07d8cdd6bb2e..9f7fbe577abb 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -1120,12 +1120,16 @@ static void msm_gem_free_object(struct drm_gem_object *obj)  		put_pages(obj);  	} -	if (obj->resv != &obj->_resv) { +	/* +	 * In error paths, we could end up here before msm_gem_new_handle() +	 * has changed obj->resv to point to the shared resv.  In this case, +	 * we don't want to drop a ref to the shared r_obj that we haven't +	 * taken yet. +	 */ +	if ((msm_obj->flags & MSM_BO_NO_SHARE) && (obj->resv != &obj->_resv)) {  		struct drm_gem_object *r_obj =  			container_of(obj->resv, struct drm_gem_object, _resv); -		WARN_ON(!(msm_obj->flags & MSM_BO_NO_SHARE)); -  		/* Drop reference we hold to shared resv obj: */  		drm_gem_object_put(r_obj);  	} diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 3ab3b27134f9..75d9f3574370 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -414,6 +414,11 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)  					 submit->user_fence,  					 DMA_RESV_USAGE_BOOKKEEP,  					 DMA_RESV_USAGE_BOOKKEEP); + +		last_fence = vm->last_fence; +		vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence); +		dma_fence_put(last_fence); +  		return;  	} @@ -427,10 +432,6 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)  			dma_resv_add_fence(obj->resv, submit->user_fence,  					   DMA_RESV_USAGE_READ);  	} - -	last_fence = vm->last_fence; -	vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence); -	dma_fence_put(last_fence);  }  static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 8316af1723c2..89a95977f41e 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -971,6 +971,7 @@ static int  lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)  {  	struct drm_device *dev = job->vm->drm; +	struct msm_drm_private *priv = dev->dev_private;  	int i = job->nr_ops++;  	int ret = 0; @@ -1017,6 +1018,11 @@ lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)  		break;  	} +	if ((op->op == MSM_VM_BIND_OP_MAP_NULL) && +	    !adreno_smmu_has_prr(priv->gpu)) { +		ret = UERR(EINVAL, dev, "PRR not supported\n"); +	} +  	return ret;  } @@ -1421,7 +1427,7 @@ msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)  	 * Maybe we could allow just UNMAP ops?  OTOH userspace should just  	 * immediately close the device file and all will be torn down.  	 */ -	if (to_msm_vm(ctx->vm)->unusable) +	if (to_msm_vm(msm_context_vm(dev, ctx))->unusable)  		return UERR(EPIPE, dev, "context is unusable");  	/* diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index a597f2bee30b..2894fc118485 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -299,6 +299,17 @@ static inline struct msm_gpu *dev_to_gpu(struct device *dev)  	return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);  } +static inline bool +adreno_smmu_has_prr(struct msm_gpu *gpu) +{ +	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev); + +	if (!adreno_smmu) +		return false; + +	return adreno_smmu && adreno_smmu->set_prr_addr; +} +  /* It turns out that all targets use the same ringbuffer size */  #define MSM_GPU_RINGBUFFER_SZ SZ_32K  #define MSM_GPU_RINGBUFFER_BLKSIZE 32 diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 0e18619f96cb..a188617653e8 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -338,6 +338,8 @@ msm_iommu_pagetable_prealloc_allocate(struct msm_mmu *mmu, struct msm_mmu_preall  	ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages);  	if (ret != p->count) { +		kfree(p->pages); +		p->pages = NULL;  		p->count = ret;  		return -ENOMEM;  	} @@ -351,6 +353,9 @@ msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_preallo  	struct kmem_cache *pt_cache = get_pt_cache(mmu);  	uint32_t remaining_pt_count = p->count - p->ptr; +	if (!p->pages) +		return; +  	if (p->count > 0)  		trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count); diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c index e60f7892f5ce..a7bf539e5d86 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.c +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c @@ -482,6 +482,17 @@ nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,  	return 0;  } +static bool +nouveau_sched_job_list_empty(struct nouveau_sched *sched) +{ +	bool empty; + +	spin_lock(&sched->job.list.lock); +	empty = list_empty(&sched->job.list.head); +	spin_unlock(&sched->job.list.lock); + +	return empty; +}  static void  nouveau_sched_fini(struct nouveau_sched *sched) @@ -489,8 +500,7 @@ nouveau_sched_fini(struct nouveau_sched *sched)  	struct drm_gpu_scheduler *drm_sched = &sched->base;  	struct drm_sched_entity *entity = &sched->entity; -	rmb(); /* for list_empty to work without lock */ -	wait_event(sched->job.wq, list_empty(&sched->job.list.head)); +	wait_event(sched->job.wq, nouveau_sched_job_list_empty(sched));  	drm_sched_entity_fini(entity);  	drm_sched_fini(drm_sched); diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c index 2fc7b0779b37..893af9b16756 100644 --- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c +++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c @@ -359,7 +359,7 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi)  	dsi->lanes = 4;  	dsi->format = MIPI_DSI_FMT_RGB888;  	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | -			  MIPI_DSI_MODE_LPM; +			  MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;  	kingdisplay = devm_drm_panel_alloc(&dsi->dev, __typeof(*kingdisplay), base,  					   &kingdisplay_panel_funcs, diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c index 04d91929eedd..d5f821d6b23c 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c @@ -249,6 +249,11 @@ static const struct drm_display_mode default_mode = {  	.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,  }; +/* + * The mode data for this panel has been reverse engineered without access + * to the panel datasheet / manual. Using DRM_MODE_FLAG_PHSYNC like all + * other panels results in garbage data on the display. + */  static const struct drm_display_mode t28cp45tn89_mode = {  	.clock = 6008,  	.hdisplay = 240, @@ -261,7 +266,7 @@ static const struct drm_display_mode t28cp45tn89_mode = {  	.vtotal = 320 + 8 + 4 + 4,  	.width_mm = 43,  	.height_mm = 57, -	.flags = DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC, +	.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC,  };  static const struct drm_display_mode et028013dma_mode = { diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 88e821d67af7..9c8907bc61d9 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -314,17 +314,17 @@ static int radeon_pci_probe(struct pci_dev *pdev,  	ret = pci_enable_device(pdev);  	if (ret) -		goto err_free; +		return ret;  	pci_set_drvdata(pdev, ddev);  	ret = radeon_driver_load_kms(ddev, flags);  	if (ret) -		goto err_agp; +		goto err;  	ret = drm_dev_register(ddev, flags);  	if (ret) -		goto err_agp; +		goto err;  	if (rdev->mc.real_vram_size <= (8 * 1024 * 1024))  		format = drm_format_info(DRM_FORMAT_C8); @@ -337,30 +337,14 @@ static int radeon_pci_probe(struct pci_dev *pdev,  	return 0; -err_agp: +err:  	pci_disable_device(pdev); -err_free: -	drm_dev_put(ddev);  	return ret;  }  static void -radeon_pci_remove(struct pci_dev *pdev) -{ -	struct drm_device *dev = pci_get_drvdata(pdev); - -	drm_put_dev(dev); -} - -static void  radeon_pci_shutdown(struct pci_dev *pdev)  { -	/* if we are running in a VM, make sure the device -	 * torn down properly on reboot/shutdown -	 */ -	if (radeon_device_is_virtual()) -		radeon_pci_remove(pdev); -  #if defined(CONFIG_PPC64) || defined(CONFIG_MACH_LOONGSON64)  	/*  	 * Some adapters need to be suspended before a @@ -613,7 +597,6 @@ static struct pci_driver radeon_kms_pci_driver = {  	.name = DRIVER_NAME,  	.id_table = pciidlist,  	.probe = radeon_pci_probe, -	.remove = radeon_pci_remove,  	.shutdown = radeon_pci_shutdown,  	.driver.pm = &radeon_pm_ops,  }; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 645e33bf7947..ba1446acd703 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -84,7 +84,6 @@ void radeon_driver_unload_kms(struct drm_device *dev)  	rdev->agp = NULL;  done_free: -	kfree(rdev);  	dev->dev_private = NULL;  } diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 5a4697f636f2..c8e949f4a568 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -70,6 +70,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,  	entity->guilty = guilty;  	entity->num_sched_list = num_sched_list;  	entity->priority = priority; +	entity->last_user = current->group_leader;  	/*  	 * It's perfectly valid to initialize an entity without having a valid  	 * scheduler attached. It's just not valid to use the scheduler before it @@ -302,7 +303,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)  	/* For a killed process disallow further enqueueing of jobs. */  	last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); -	if ((!last_user || last_user == current->group_leader) && +	if (last_user == current->group_leader &&  	    (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))  		drm_sched_entity_kill(entity); @@ -552,10 +553,11 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)  		drm_sched_rq_remove_entity(entity->rq, entity);  		entity->rq = rq;  	} -	spin_unlock(&entity->lock);  	if (entity->num_sched_list == 1)  		entity->sched_list = NULL; + +	spin_unlock(&entity->lock);  }  /** diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 3e0ad7e5b5df..6d3db5e55d98 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -813,12 +813,16 @@ static int gt_reset(struct xe_gt *gt)  	unsigned int fw_ref;  	int err; -	if (xe_device_wedged(gt_to_xe(gt))) -		return -ECANCELED; +	if (xe_device_wedged(gt_to_xe(gt))) { +		err = -ECANCELED; +		goto err_pm_put; +	}  	/* We only support GT resets with GuC submission */ -	if (!xe_device_uc_enabled(gt_to_xe(gt))) -		return -ENODEV; +	if (!xe_device_uc_enabled(gt_to_xe(gt))) { +		err = -ENODEV; +		goto err_pm_put; +	}  	xe_gt_info(gt, "reset started\n"); @@ -826,8 +830,6 @@ static int gt_reset(struct xe_gt *gt)  	if (!err)  		xe_gt_warn(gt, "reset block failed to get lifted"); -	xe_pm_runtime_get(gt_to_xe(gt)); -  	if (xe_fault_inject_gt_reset()) {  		err = -ECANCELED;  		goto err_fail; @@ -874,6 +876,7 @@ err_fail:  	xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));  	xe_device_declare_wedged(gt_to_xe(gt)); +err_pm_put:  	xe_pm_runtime_put(gt_to_xe(gt));  	return err; @@ -895,7 +898,9 @@ void xe_gt_reset_async(struct xe_gt *gt)  		return;  	xe_gt_info(gt, "reset queued\n"); -	queue_work(gt->ordered_wq, >->reset.worker); +	xe_pm_runtime_get_noresume(gt_to_xe(gt)); +	if (!queue_work(gt->ordered_wq, >->reset.worker)) +		xe_pm_runtime_put(gt_to_xe(gt));  }  void xe_gt_suspend_prepare(struct xe_gt *gt) diff --git a/drivers/gpu/drm/xe/xe_validation.h b/drivers/gpu/drm/xe/xe_validation.h index fec331d791e7..b2d09c596714 100644 --- a/drivers/gpu/drm/xe/xe_validation.h +++ b/drivers/gpu/drm/xe/xe_validation.h @@ -166,10 +166,10 @@ xe_validation_device_init(struct xe_validation_device *val)   */  DEFINE_CLASS(xe_validation, struct xe_validation_ctx *,  	     if (_T) xe_validation_ctx_fini(_T);, -	     ({_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags); -	       _ret ? NULL : _ctx; }), +	     ({*_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags); +	       *_ret ? NULL : _ctx; }),  	     struct xe_validation_ctx *_ctx, struct xe_validation_device *_val, -	     struct drm_exec *_exec, const struct xe_val_flags _flags, int _ret); +	     struct drm_exec *_exec, const struct xe_val_flags _flags, int *_ret);  static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T)  {return *_T; }  #define class_xe_validation_is_conditional true @@ -186,7 +186,7 @@ static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T)   * exhaustive eviction.   */  #define xe_validation_guard(_ctx, _val, _exec, _flags, _ret)		\ -	scoped_guard(xe_validation, _ctx, _val, _exec, _flags, _ret) \ +	scoped_guard(xe_validation, _ctx, _val, _exec, _flags, &_ret) \  	drm_exec_until_all_locked(_exec)  #endif diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c index ecd9a0bd5e18..49b57bb5fac1 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c @@ -290,9 +290,15 @@ static int ch_ipsec_xfrm_add_state(struct net_device *dev,  		return -EINVAL;  	} +	if (unlikely(!try_module_get(THIS_MODULE))) { +		NL_SET_ERR_MSG_MOD(extack, "Failed to acquire module reference"); +		return -ENODEV; +	} +  	sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);  	if (!sa_entry) {  		res = -ENOMEM; +		module_put(THIS_MODULE);  		goto out;  	} @@ -301,7 +307,6 @@ static int ch_ipsec_xfrm_add_state(struct net_device *dev,  		sa_entry->esn = 1;  	ch_ipsec_setkey(x, sa_entry);  	x->xso.offload_handle = (unsigned long)sa_entry; -	try_module_get(THIS_MODULE);  out:  	return res;  } diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h index ea09a09c451b..2097e4c2b3d7 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h @@ -17,6 +17,7 @@  #define HBG_PCU_CACHE_LINE_SIZE		32  #define HBG_TX_TIMEOUT_BUF_LEN		1024  #define HBG_RX_DESCR			0x01 +#define HBG_NO_PHY			0xFF  #define HBG_PACKET_HEAD_SIZE	((HBG_RX_SKIP1 + HBG_RX_SKIP2 + \  				  HBG_RX_DESCR) * HBG_PCU_CACHE_LINE_SIZE) diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c index 83cf75bf7a17..e11495b7ee98 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c @@ -136,12 +136,11 @@ static pci_ers_result_t hbg_pci_err_detected(struct pci_dev *pdev,  {  	struct net_device *netdev = pci_get_drvdata(pdev); -	netif_device_detach(netdev); - -	if (state == pci_channel_io_perm_failure) +	if (state == pci_channel_io_perm_failure) { +		netif_device_detach(netdev);  		return PCI_ERS_RESULT_DISCONNECT; +	} -	pci_disable_device(pdev);  	return PCI_ERS_RESULT_NEED_RESET;  } @@ -150,6 +149,9 @@ static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev)  	struct net_device *netdev = pci_get_drvdata(pdev);  	struct hbg_priv *priv = netdev_priv(netdev); +	netif_device_detach(netdev); +	pci_disable_device(pdev); +  	if (pci_enable_device(pdev)) {  		dev_err(&pdev->dev,  			"failed to re-enable PCI device after reset\n"); diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c index d0aa0661ecd4..d6e8ce8e351a 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c @@ -244,6 +244,9 @@ void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)  	hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE); +	if (priv->mac.phy_addr == HBG_NO_PHY) +		return; +  	/* wait MAC link up */  	ret = readl_poll_timeout(priv->io_base + HBG_REG_AN_NEG_STATE_ADDR,  				 link_status, diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c index 8af0bc4cca21..ae4cb35186d8 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c @@ -32,6 +32,7 @@ static void hbg_irq_handle_rx_buf_val(struct hbg_priv *priv,  				      const struct hbg_irq_info *irq_info)  {  	priv->stats.rx_fifo_less_empty_thrsld_cnt++; +	hbg_hw_irq_enable(priv, irq_info->mask, true);  }  #define HBG_IRQ_I(name, handle) \ diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c index 37791de47f6f..b6f0a2780ea8 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c @@ -20,7 +20,6 @@  #define HBG_MDIO_OP_INTERVAL_US		(5 * 1000)  #define HBG_NP_LINK_FAIL_RETRY_TIMES	5 -#define HBG_NO_PHY			0xFF  static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd)  { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 9d34d28ff168..782bb48c9f3d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -9429,8 +9429,7 @@ static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)  		/* this command reads phy id and register at the same time */  		fallthrough;  	case SIOCGMIIREG: -		data->val_out = hclge_read_phy_reg(hdev, data->reg_num); -		return 0; +		return hclge_read_phy_reg(hdev, data->reg_num, &data->val_out);  	case SIOCSMIIREG:  		return hclge_write_phy_reg(hdev, data->reg_num, data->val_in); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 96553109f44c..cf881108fa57 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -274,7 +274,7 @@ void hclge_mac_stop_phy(struct hclge_dev *hdev)  	phy_stop(phydev);  } -u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr) +int hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 *val)  {  	struct hclge_phy_reg_cmd *req;  	struct hclge_desc desc; @@ -286,11 +286,14 @@ u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr)  	req->reg_addr = cpu_to_le16(reg_addr);  	ret = hclge_cmd_send(&hdev->hw, &desc, 1); -	if (ret) +	if (ret) {  		dev_err(&hdev->pdev->dev,  			"failed to read phy reg, ret = %d.\n", ret); +		return ret; +	} -	return le16_to_cpu(req->reg_val); +	*val = le16_to_cpu(req->reg_val); +	return 0;  }  int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h index 4200d0b6d931..21d434c82475 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h @@ -13,7 +13,7 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle);  void hclge_mac_disconnect_phy(struct hnae3_handle *handle);  void hclge_mac_start_phy(struct hclge_dev *hdev);  void hclge_mac_stop_phy(struct hclge_dev *hdev); -u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr); +int hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 *val);  int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val);  #endif diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 2250426ec91b..2532b6f82e97 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -4382,6 +4382,15 @@ int ice_get_phy_lane_number(struct ice_hw *hw)  	unsigned int lane;  	int err; +	/* E82X does not have sequential IDs, lane number is PF ID. +	 * For E825 device, the exception is the variant with external +	 * PHY (0x579F), in which there is also 1:1 pf_id -> lane_number +	 * mapping. +	 */ +	if (hw->mac_type == ICE_MAC_GENERIC || +	    hw->device_id == ICE_DEV_ID_E825C_SGMII) +		return hw->pf_id; +  	options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL);  	if (!options)  		return -ENOMEM; @@ -6497,6 +6506,28 @@ u32 ice_get_link_speed(u16 index)  }  /** + * ice_get_dest_cgu - get destination CGU dev for given HW + * @hw: pointer to the HW struct + * + * Get CGU client id for CGU register read/write operations. + * + * Return: CGU device id to use in SBQ transactions. + */ +static enum ice_sbq_dev_id ice_get_dest_cgu(struct ice_hw *hw) +{ +	/* On dual complex E825 only complex 0 has functional CGU powering all +	 * the PHYs. +	 * SBQ destination device cgu points to CGU on a current complex and to +	 * access primary CGU from the secondary complex, the driver should use +	 * cgu_peer as a destination device. +	 */ +	if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) && +	    !ice_is_primary(hw)) +		return ice_sbq_dev_cgu_peer; +	return ice_sbq_dev_cgu; +} + +/**   * ice_read_cgu_reg - Read a CGU register   * @hw: Pointer to the HW struct   * @addr: Register address to read @@ -6510,8 +6541,8 @@ u32 ice_get_link_speed(u16 index)  int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val)  {  	struct ice_sbq_msg_input cgu_msg = { +		.dest_dev = ice_get_dest_cgu(hw),  		.opcode = ice_sbq_msg_rd, -		.dest_dev = ice_sbq_dev_cgu,  		.msg_addr_low = addr  	};  	int err; @@ -6542,8 +6573,8 @@ int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val)  int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val)  {  	struct ice_sbq_msg_input cgu_msg = { +		.dest_dev = ice_get_dest_cgu(hw),  		.opcode = ice_sbq_msg_wr, -		.dest_dev = ice_sbq_dev_cgu,  		.msg_addr_low = addr,  		.data = val  	}; diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 363ae79a3620..013c93b6605e 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -1479,7 +1479,7 @@ static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)  	per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;  	hw->blk[blk].masks.count = per_pf; -	hw->blk[blk].masks.first = hw->pf_id * per_pf; +	hw->blk[blk].masks.first = hw->logical_pf_id * per_pf;  	memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks)); diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h index 183dd5457d6a..21bb861febbf 100644 --- a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h @@ -50,6 +50,7 @@ enum ice_sbq_dev_id {  	ice_sbq_dev_phy_0	= 0x02,  	ice_sbq_dev_cgu		= 0x06,  	ice_sbq_dev_phy_0_peer	= 0x0D, +	ice_sbq_dev_cgu_peer	= 0x0F,  };  enum ice_sbq_msg_opcode { diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index f8a208c84f15..10e2445e0ded 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2281,7 +2281,7 @@ static int igb_get_sset_count(struct net_device *netdev, int sset)  	case ETH_SS_PRIV_FLAGS:  		return IGB_PRIV_FLAGS_STR_LEN;  	default: -		return -ENOTSUPP; +		return -EOPNOTSUPP;  	}  } diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index f3e7218ba6f3..bb783042d1af 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -810,7 +810,7 @@ static int igc_ethtool_get_sset_count(struct net_device *netdev, int sset)  	case ETH_SS_PRIV_FLAGS:  		return IGC_PRIV_FLAGS_STR_LEN;  	default: -		return -ENOTSUPP; +		return -EOPNOTSUPP;  	}  } @@ -2094,6 +2094,9 @@ static void igc_ethtool_diag_test(struct net_device *netdev,  		netdev_info(adapter->netdev, "Offline testing starting");  		set_bit(__IGC_TESTING, &adapter->state); +		/* power up PHY for link test */ +		igc_power_up_phy_copper(&adapter->hw); +  		/* Link test performed before hardware reset so autoneg doesn't  		 * interfere with test result  		 */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ca1ccc630001..3190ce7e44c7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -11507,10 +11507,10 @@ static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter)  shutdown_aci:  	mutex_destroy(&adapter->hw.aci.lock);  	ixgbe_release_hw_control(adapter); -	devlink_free(adapter->devlink);  clean_up_probe:  	disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);  	free_netdev(netdev); +	devlink_free(adapter->devlink);  	pci_release_mem_regions(pdev);  	if (disable_dev)  		pci_disable_device(pdev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 114dd88fc71c..6885d2343c48 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -641,7 +641,7 @@ static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp,  	 * disabled  	 */  	if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_setup_sdp) -		return -ENOTSUPP; +		return -EOPNOTSUPP;  	if (on)  		adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index d7a11ff9bbdb..da2d1eb52c13 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -320,7 +320,6 @@ err_dma_unmap:  err_free:  	kfree(buf);  err_out: -	priv_rx->rq_stats->tls_resync_req_skip++;  	return err;  } @@ -339,14 +338,19 @@ static void resync_handle_work(struct work_struct *work)  	if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {  		mlx5e_ktls_priv_rx_put(priv_rx); +		priv_rx->rq_stats->tls_resync_req_skip++; +		tls_offload_rx_resync_async_request_cancel(&resync->core);  		return;  	}  	c = resync->priv->channels.c[priv_rx->rxq];  	sq = &c->async_icosq; -	if (resync_post_get_progress_params(sq, priv_rx)) +	if (resync_post_get_progress_params(sq, priv_rx)) { +		priv_rx->rq_stats->tls_resync_req_skip++; +		tls_offload_rx_resync_async_request_cancel(&resync->core);  		mlx5e_ktls_priv_rx_put(priv_rx); +	}  }  static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync, @@ -425,14 +429,21 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,  {  	struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf;  	struct mlx5e_ktls_offload_context_rx *priv_rx; +	struct tls_offload_resync_async *async_resync; +	struct tls_offload_context_rx *rx_ctx;  	u8 tracker_state, auth_state, *ctx;  	struct device *dev;  	u32 hw_seq;  	priv_rx = buf->priv_rx;  	dev = mlx5_core_dma_dev(sq->channel->mdev); -	if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) +	rx_ctx = tls_offload_ctx_rx(tls_get_ctx(priv_rx->sk)); +	async_resync = rx_ctx->resync_async; +	if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) { +		priv_rx->rq_stats->tls_resync_req_skip++; +		tls_offload_rx_resync_async_request_cancel(async_resync);  		goto out; +	}  	dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE,  				DMA_FROM_DEVICE); @@ -443,11 +454,13 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,  	if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||  	    auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) {  		priv_rx->rq_stats->tls_resync_req_skip++; +		tls_offload_rx_resync_async_request_cancel(async_resync);  		goto out;  	}  	hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn); -	tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq)); +	tls_offload_rx_resync_async_request_end(async_resync, +						cpu_to_be32(hw_seq));  	priv_rx->rq_stats->tls_resync_req_end++;  out:  	mlx5e_ktls_priv_rx_put(priv_rx); @@ -472,8 +485,10 @@ static bool resync_queue_get_psv(struct sock *sk)  	resync = &priv_rx->resync;  	mlx5e_ktls_priv_rx_get(priv_rx); -	if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) +	if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) {  		mlx5e_ktls_priv_rx_put(priv_rx); +		return false; +	}  	return true;  } @@ -482,6 +497,7 @@ static bool resync_queue_get_psv(struct sock *sk)  static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)  {  	struct ethhdr *eth = (struct ethhdr *)(skb->data); +	struct tls_offload_resync_async *resync_async;  	struct net_device *netdev = rq->netdev;  	struct net *net = dev_net(netdev);  	struct sock *sk = NULL; @@ -527,7 +543,8 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)  	seq = th->seq;  	datalen = skb->len - depth; -	tls_offload_rx_resync_async_request_start(sk, seq, datalen); +	resync_async = tls_offload_ctx_rx(tls_get_ctx(sk))->resync_async; +	tls_offload_rx_resync_async_request_start(resync_async, seq, datalen);  	rq->stats->tls_resync_req_start++;  unref: @@ -556,6 +573,18 @@ void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,  	resync_handle_seq_match(priv_rx, c);  } +void +mlx5e_ktls_rx_resync_async_request_cancel(struct mlx5e_icosq_wqe_info *wi) +{ +	struct mlx5e_ktls_offload_context_rx *priv_rx; +	struct mlx5e_ktls_rx_resync_buf *buf; + +	buf = wi->tls_get_params.buf; +	priv_rx = buf->priv_rx; +	priv_rx->rq_stats->tls_resync_req_skip++; +	tls_offload_rx_resync_async_request_cancel(&priv_rx->resync.core); +} +  /* End of resync section */  void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h index f87b65c560ea..cb08799769ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h @@ -29,6 +29,10 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,  void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,  					   struct mlx5e_tx_wqe_info *wi,  					   u32 *dma_fifo_cc); + +void +mlx5e_ktls_rx_resync_async_request_cancel(struct mlx5e_icosq_wqe_info *wi); +  static inline bool  mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,  					  struct mlx5e_tx_wqe_info *wi, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 1c79adc51a04..26621a2972ec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1036,6 +1036,10 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)  				netdev_WARN_ONCE(cq->netdev,  						 "Bad OP in ICOSQ CQE: 0x%x\n",  						 get_cqe_opcode(cqe)); +#ifdef CONFIG_MLX5_EN_TLS +				if (wi->wqe_type == MLX5E_ICOSQ_WQE_GET_PSV_TLS) +					mlx5e_ktls_rx_resync_async_request_cancel(wi); +#endif  				mlx5e_dump_error_cqe(&sq->cq, sq->sqn,  						     (struct mlx5_err_cqe *)cqe);  				mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c index 76382626ad41..929adeb50a98 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c @@ -66,7 +66,6 @@ static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)  	esw->fdb_table.legacy.addr_grp = NULL;  	esw->fdb_table.legacy.allmulti_grp = NULL;  	esw->fdb_table.legacy.promisc_grp = NULL; -	atomic64_set(&esw->user_count, 0);  }  static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 34749814f19b..44a142a041b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1978,7 +1978,6 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)  	/* Holds true only as long as DMFS is the default */  	mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,  				     MLX5_FLOW_STEERING_MODE_DMFS); -	atomic64_set(&esw->user_count, 0);  }  static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 132626a3f9f7..9ef72f294117 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2557,14 +2557,16 @@ nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,  	err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,  				     &nn->tlv_caps);  	if (err) -		goto err_free_nn; +		goto err_free_xsk_pools;  	err = nfp_ccm_mbox_alloc(nn);  	if (err) -		goto err_free_nn; +		goto err_free_xsk_pools;  	return nn; +err_free_xsk_pools: +	kfree(nn->dp.xsk_pools);  err_free_nn:  	if (nn->dp.netdev)  		free_netdev(nn->dp.netdev); diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c index 6fd0c1e9a7d5..7cfd9000f79d 100644 --- a/drivers/net/ethernet/sfc/mae.c +++ b/drivers/net/ethernet/sfc/mae.c @@ -1090,6 +1090,9 @@ void efx_mae_remove_mport(void *desc, void *arg)  	kfree(mport);  } +/* + * Takes ownership of @desc, even if it returns an error + */  static int efx_mae_process_mport(struct efx_nic *efx,  				 struct mae_mport_desc *desc)  { @@ -1100,6 +1103,7 @@ static int efx_mae_process_mport(struct efx_nic *efx,  	if (!IS_ERR_OR_NULL(mport)) {  		netif_err(efx, drv, efx->net_dev,  			  "mport with id %u does exist!!!\n", desc->mport_id); +		kfree(desc);  		return -EEXIST;  	} diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 650d75b73e0b..7b90ecd3a55e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -4089,18 +4089,11 @@ static int stmmac_release(struct net_device *dev)  static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,  			       struct stmmac_tx_queue *tx_q)  { -	u16 tag = 0x0, inner_tag = 0x0; -	u32 inner_type = 0x0;  	struct dma_desc *p; +	u16 tag = 0x0; -	if (!priv->dma_cap.vlins) +	if (!priv->dma_cap.vlins || !skb_vlan_tag_present(skb))  		return false; -	if (!skb_vlan_tag_present(skb)) -		return false; -	if (skb->vlan_proto == htons(ETH_P_8021AD)) { -		inner_tag = skb_vlan_tag_get(skb); -		inner_type = STMMAC_VLAN_INSERT; -	}  	tag = skb_vlan_tag_get(skb); @@ -4109,7 +4102,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,  	else  		p = &tx_q->dma_tx[tx_q->cur_tx]; -	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) +	if (stmmac_set_desc_vlan_tag(priv, p, tag, 0x0, 0x0))  		return false;  	stmmac_set_tx_owner(priv, p); @@ -4507,6 +4500,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)  	bool has_vlan, set_ic;  	int entry, first_tx;  	dma_addr_t des; +	u32 sdu_len;  	tx_q = &priv->dma_conf.tx_queue[queue];  	txq_stats = &priv->xstats.txq_stats[queue]; @@ -4524,10 +4518,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)  	}  	if (priv->est && priv->est->enable && -	    priv->est->max_sdu[queue] && -	    skb->len > priv->est->max_sdu[queue]){ -		priv->xstats.max_sdu_txq_drop[queue]++; -		goto max_sdu_err; +	    priv->est->max_sdu[queue]) { +		sdu_len = skb->len; +		/* Add VLAN tag length if VLAN tag insertion offload is requested */ +		if (priv->dma_cap.vlins && skb_vlan_tag_present(skb)) +			sdu_len += VLAN_HLEN; +		if (sdu_len > priv->est->max_sdu[queue]) { +			priv->xstats.max_sdu_txq_drop[queue]++; +			goto max_sdu_err; +		}  	}  	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { @@ -7573,11 +7572,8 @@ int stmmac_dvr_probe(struct device *device,  		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;  		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;  	} -	if (priv->dma_cap.vlins) { +	if (priv->dma_cap.vlins)  		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; -		if (priv->dma_cap.dvlan) -			ndev->features |= NETIF_F_HW_VLAN_STAG_TX; -	}  #endif  	priv->msg_enable = netif_msg_init(debug, default_msg_level); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 97e89a604abd..3b4d4696afe9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -981,7 +981,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,  	if (qopt->cmd == TAPRIO_CMD_DESTROY)  		goto disable; -	if (qopt->num_entries >= dep) +	if (qopt->num_entries > dep)  		return -EINVAL;  	if (!qopt->cycle_time)  		return -ERANGE; @@ -1012,7 +1012,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,  		s64 delta_ns = qopt->entries[i].interval;  		u32 gates = qopt->entries[i].gate_mask; -		if (delta_ns > GENMASK(wid, 0)) +		if (delta_ns > GENMASK(wid - 1, 0))  			return -ERANGE;  		if (gates > GENMASK(31 - wid, 0))  			return -ERANGE; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c index 0b6f6228ae35..ff02a79c00d4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c @@ -212,7 +212,7 @@ static void vlan_enable(struct mac_device_info *hw, u32 type)  	value = readl(ioaddr + VLAN_INCL);  	value |= VLAN_VLTI; -	value |= VLAN_CSVL; /* Only use SVLAN */ +	value &= ~VLAN_CSVL; /* Only use CVLAN */  	value &= ~VLAN_VLC;  	value |= (type << VLAN_VLC_SHIFT) & VLAN_VLC;  	writel(value, ioaddr + VLAN_INCL); diff --git a/drivers/net/mctp/mctp-usb.c b/drivers/net/mctp/mctp-usb.c index 36ccc53b1797..ef860cfc629f 100644 --- a/drivers/net/mctp/mctp-usb.c +++ b/drivers/net/mctp/mctp-usb.c @@ -96,11 +96,13 @@ static netdev_tx_t mctp_usb_start_xmit(struct sk_buff *skb,  			  skb->data, skb->len,  			  mctp_usb_out_complete, skb); +	/* Stops TX queue first to prevent race condition with URB complete */ +	netif_stop_queue(dev);  	rc = usb_submit_urb(urb, GFP_ATOMIC); -	if (rc) +	if (rc) { +		netif_wake_queue(dev);  		goto err_drop; -	else -		netif_stop_queue(dev); +	}  	return NETDEV_TX_OK; diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 194570443493..5d8d0214786c 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -886,8 +886,11 @@ static ssize_t userdatum_value_show(struct config_item *item, char *buf)  static void update_userdata(struct netconsole_target *nt)  { -	int complete_idx = 0, child_count = 0;  	struct list_head *entry; +	int child_count = 0; +	unsigned long flags; + +	spin_lock_irqsave(&target_list_lock, flags);  	/* Clear the current string in case the last userdatum was deleted */  	nt->userdata_length = 0; @@ -897,8 +900,11 @@ static void update_userdata(struct netconsole_target *nt)  		struct userdatum *udm_item;  		struct config_item *item; -		if (WARN_ON_ONCE(child_count >= MAX_EXTRADATA_ITEMS)) -			break; +		if (child_count >= MAX_EXTRADATA_ITEMS) { +			spin_unlock_irqrestore(&target_list_lock, flags); +			WARN_ON_ONCE(1); +			return; +		}  		child_count++;  		item = container_of(entry, struct config_item, ci_entry); @@ -912,12 +918,11 @@ static void update_userdata(struct netconsole_target *nt)  		 * one entry length (1/MAX_EXTRADATA_ITEMS long), entry count is  		 * checked to not exceed MAX items with child_count above  		 */ -		complete_idx += scnprintf(&nt->extradata_complete[complete_idx], -					  MAX_EXTRADATA_ENTRY_LEN, " %s=%s\n", -					  item->ci_name, udm_item->value); +		nt->userdata_length += scnprintf(&nt->extradata_complete[nt->userdata_length], +						 MAX_EXTRADATA_ENTRY_LEN, " %s=%s\n", +						 item->ci_name, udm_item->value);  	} -	nt->userdata_length = strnlen(nt->extradata_complete, -				      sizeof(nt->extradata_complete)); +	spin_unlock_irqrestore(&target_list_lock, flags);  }  static ssize_t userdatum_value_store(struct config_item *item, const char *buf, diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index deeefb962566..36a0c1b7f59c 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -738,6 +738,12 @@ static int dp83867_config_init(struct phy_device *phydev)  			return ret;  	} +	/* Although the DP83867 reports EEE capability through the +	 * MDIO_PCS_EEE_ABLE and MDIO_AN_EEE_ADV registers, the feature +	 * is not actually implemented in hardware. +	 */ +	phy_disable_eee(phydev); +  	if (phy_interface_is_rgmii(phydev) ||  	    phydev->interface == PHY_INTERFACE_MODE_SGMII) {  		val = phy_read(phydev, MII_DP83867_PHYCTRL); diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c index a2cd1cc35cde..1f381d7b13ff 100644 --- a/drivers/net/phy/dp83869.c +++ b/drivers/net/phy/dp83869.c @@ -84,7 +84,7 @@  #define DP83869_CLK_DELAY_DEF			7  /* STRAP_STS1 bits */ -#define DP83869_STRAP_OP_MODE_MASK		GENMASK(2, 0) +#define DP83869_STRAP_OP_MODE_MASK		GENMASK(11, 9)  #define DP83869_STRAP_STS1_RESERVED		BIT(11)  #define DP83869_STRAP_MIRROR_ENABLED           BIT(12) @@ -528,7 +528,7 @@ static int dp83869_set_strapped_mode(struct phy_device *phydev)  	if (val < 0)  		return val; -	dp83869->mode = val & DP83869_STRAP_OP_MODE_MASK; +	dp83869->mode = FIELD_GET(DP83869_STRAP_OP_MODE_MASK, val);  	return 0;  } diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 85bd5d845409..232bbd79a4de 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -230,7 +230,9 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)  	int i;  	unsigned long gpio_bits = dev->driver_info->data; -	usbnet_get_endpoints(dev,intf); +	ret = usbnet_get_endpoints(dev, intf); +	if (ret) +		goto out;  	/* Toggle the GPIOs in a manufacturer/model specific way */  	for (i = 2; i >= 0; i--) { @@ -848,7 +850,9 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)  	dev->driver_priv = priv; -	usbnet_get_endpoints(dev, intf); +	ret = usbnet_get_endpoints(dev, intf); +	if (ret) +		return ret;  	/* Maybe the boot loader passed the MAC address via device tree */  	if (!eth_platform_get_mac_address(&dev->udev->dev, buf)) { @@ -1281,7 +1285,9 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)  	int ret;  	u8 buf[ETH_ALEN] = {0}; -	usbnet_get_endpoints(dev,intf); +	ret = usbnet_get_endpoints(dev, intf); +	if (ret) +		return ret;  	/* Get the MAC address */  	ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index bf01f2728531..697cd9d866d3 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1659,6 +1659,8 @@ void usbnet_disconnect (struct usb_interface *intf)  	net = dev->net;  	unregister_netdev (net); +	cancel_work_sync(&dev->kevent); +  	while ((urb = usb_get_from_anchor(&dev->deferred))) {  		dev_kfree_skb(urb->context);  		kfree(urb->sg); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index a757cbcab87f..8e8a179aaa49 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1379,9 +1379,14 @@ static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct  	ret = XDP_PASS;  	rcu_read_lock();  	prog = rcu_dereference(rq->xdp_prog); -	/* TODO: support multi buffer. */ -	if (prog && num_buf == 1) -		ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats); +	if (prog) { +		/* TODO: support multi buffer. */ +		if (num_buf == 1) +			ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, +						  stats); +		else +			ret = XDP_ABORTED; +	}  	rcu_read_unlock();  	switch (ret) { diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index e595b0979a56..b3b00d324075 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1937,6 +1937,7 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)  	if (cmd_id == WMI_CMD_UNSUPPORTED) {  		ath10k_warn(ar, "wmi command %d is not supported by firmware\n",  			    cmd_id); +		dev_kfree_skb_any(skb);  		return ret;  	} diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 2810752260f2..812686173ac8 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -912,42 +912,84 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {  static const struct dmi_system_id ath11k_pm_quirk_table[] = {  	{  		.driver_data = (void *)ATH11K_PM_WOW, -		.matches = { +		.matches = { /* X13 G4 AMD #1 */ +			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), +			DMI_MATCH(DMI_PRODUCT_NAME, "21J3"), +		}, +	}, +	{ +		.driver_data = (void *)ATH11K_PM_WOW, +		.matches = { /* X13 G4 AMD #2 */  			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),  			DMI_MATCH(DMI_PRODUCT_NAME, "21J4"),  		},  	},  	{  		.driver_data = (void *)ATH11K_PM_WOW, -		.matches = { +		.matches = { /* T14 G4 AMD #1 */ +			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), +			DMI_MATCH(DMI_PRODUCT_NAME, "21K3"), +		}, +	}, +	{ +		.driver_data = (void *)ATH11K_PM_WOW, +		.matches = { /* T14 G4 AMD #2 */  			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),  			DMI_MATCH(DMI_PRODUCT_NAME, "21K4"),  		},  	},  	{  		.driver_data = (void *)ATH11K_PM_WOW, -		.matches = { +		.matches = { /* P14s G4 AMD #1 */ +			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), +			DMI_MATCH(DMI_PRODUCT_NAME, "21K5"), +		}, +	}, +	{ +		.driver_data = (void *)ATH11K_PM_WOW, +		.matches = { /* P14s G4 AMD #2 */  			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),  			DMI_MATCH(DMI_PRODUCT_NAME, "21K6"),  		},  	},  	{  		.driver_data = (void *)ATH11K_PM_WOW, -		.matches = { +		.matches = { /* T16 G2 AMD #1 */ +			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), +			DMI_MATCH(DMI_PRODUCT_NAME, "21K7"), +		}, +	}, +	{ +		.driver_data = (void *)ATH11K_PM_WOW, +		.matches = { /* T16 G2 AMD #2 */  			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),  			DMI_MATCH(DMI_PRODUCT_NAME, "21K8"),  		},  	},  	{  		.driver_data = (void *)ATH11K_PM_WOW, -		.matches = { +		.matches = { /* P16s G2 AMD #1 */ +			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), +			DMI_MATCH(DMI_PRODUCT_NAME, "21K9"), +		}, +	}, +	{ +		.driver_data = (void *)ATH11K_PM_WOW, +		.matches = { /* P16s G2 AMD #2 */  			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),  			DMI_MATCH(DMI_PRODUCT_NAME, "21KA"),  		},  	},  	{  		.driver_data = (void *)ATH11K_PM_WOW, -		.matches = { +		.matches = { /* T14s G4 AMD #1 */ +			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), +			DMI_MATCH(DMI_PRODUCT_NAME, "21F8"), +		}, +	}, +	{ +		.driver_data = (void *)ATH11K_PM_WOW, +		.matches = { /* T14s G4 AMD #2 */  			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),  			DMI_MATCH(DMI_PRODUCT_NAME, "21F9"),  		}, diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 106e2530b64e..0e41b5a91d66 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -1,7 +1,7 @@  // SPDX-License-Identifier: BSD-3-Clause-Clear  /*   * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.   */  #include <net/mac80211.h> @@ -4417,9 +4417,9 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,  	}  	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) -		flags |= WMI_KEY_PAIRWISE; +		flags = WMI_KEY_PAIRWISE;  	else -		flags |= WMI_KEY_GROUP; +		flags = WMI_KEY_GROUP;  	ath11k_dbg(ar->ab, ATH11K_DBG_MAC,  		   "%s for peer %pM on vdev %d flags 0x%X, type = %d, num_sta %d\n", @@ -4456,7 +4456,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,  	is_ap_with_no_sta = (vif->type == NL80211_IFTYPE_AP &&  			     !arvif->num_stations); -	if ((flags & WMI_KEY_PAIRWISE) || cmd == SET_KEY || is_ap_with_no_sta) { +	if (flags == WMI_KEY_PAIRWISE || cmd == SET_KEY || is_ap_with_no_sta) {  		ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags);  		if (ret) {  			ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret); @@ -4470,7 +4470,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,  			goto exit;  		} -		if ((flags & WMI_KEY_GROUP) && cmd == SET_KEY && is_ap_with_no_sta) +		if (flags == WMI_KEY_GROUP && cmd == SET_KEY && is_ap_with_no_sta)  			arvif->reinstall_group_keys = true;  	} diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index 1d7b60aa5cb0..eacab798630a 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -8290,23 +8290,32 @@ static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb)  		wake_up(&ar->txmgmt_empty_waitq);  } -int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) +static void ath12k_mac_tx_mgmt_free(struct ath12k *ar, int buf_id)  { -	struct sk_buff *msdu = skb; +	struct sk_buff *msdu;  	struct ieee80211_tx_info *info; -	struct ath12k *ar = ctx; -	struct ath12k_base *ab = ar->ab;  	spin_lock_bh(&ar->txmgmt_idr_lock); -	idr_remove(&ar->txmgmt_idr, buf_id); +	msdu = idr_remove(&ar->txmgmt_idr, buf_id);  	spin_unlock_bh(&ar->txmgmt_idr_lock); -	dma_unmap_single(ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len, + +	if (!msdu) +		return; + +	dma_unmap_single(ar->ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len,  			 DMA_TO_DEVICE);  	info = IEEE80211_SKB_CB(msdu);  	memset(&info->status, 0, sizeof(info->status)); -	ath12k_mgmt_over_wmi_tx_drop(ar, skb); +	ath12k_mgmt_over_wmi_tx_drop(ar, msdu); +} + +int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) +{ +	struct ath12k *ar = ctx; + +	ath12k_mac_tx_mgmt_free(ar, buf_id);  	return 0;  } @@ -8315,17 +8324,10 @@ static int ath12k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx)  {  	struct ieee80211_vif *vif = ctx;  	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); -	struct sk_buff *msdu = skb;  	struct ath12k *ar = skb_cb->ar; -	struct ath12k_base *ab = ar->ab; -	if (skb_cb->vif == vif) { -		spin_lock_bh(&ar->txmgmt_idr_lock); -		idr_remove(&ar->txmgmt_idr, buf_id); -		spin_unlock_bh(&ar->txmgmt_idr_lock); -		dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, -				 DMA_TO_DEVICE); -	} +	if (skb_cb->vif == vif) +		ath12k_mac_tx_mgmt_free(ar, buf_id);  	return 0;  } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 8afaffe31031..bb96b87b2a6e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -5627,8 +5627,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,  			  *cookie, le16_to_cpu(action_frame->len),  			  le32_to_cpu(af_params->channel)); -		ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg), -						  af_params); +		ack = brcmf_p2p_send_action_frame(vif->ifp, af_params);  		cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack,  					GFP_KERNEL); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index 0dc9d28cd77b..e1752a513c73 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -1529,6 +1529,7 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,  /**   * brcmf_p2p_tx_action_frame() - send action frame over fil.   * + * @ifp: interface to transmit on.   * @p2p: p2p info struct for vif.   * @af_params: action frame data/info.   * @@ -1538,12 +1539,11 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,   * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action   * frame is transmitted.   */ -static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p, +static s32 brcmf_p2p_tx_action_frame(struct brcmf_if *ifp, +				     struct brcmf_p2p_info *p2p,  				     struct brcmf_fil_af_params_le *af_params)  {  	struct brcmf_pub *drvr = p2p->cfg->pub; -	struct brcmf_cfg80211_vif *vif; -	struct brcmf_p2p_action_frame *p2p_af;  	s32 err = 0;  	brcmf_dbg(TRACE, "Enter\n"); @@ -1552,14 +1552,7 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,  	clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);  	clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status); -	/* check if it is a p2p_presence response */ -	p2p_af = (struct brcmf_p2p_action_frame *)af_params->action_frame.data; -	if (p2p_af->subtype == P2P_AF_PRESENCE_RSP) -		vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif; -	else -		vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; - -	err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params, +	err = brcmf_fil_bsscfg_data_set(ifp, "actframe", af_params,  					sizeof(*af_params));  	if (err) {  		bphy_err(drvr, " sending action frame has failed\n"); @@ -1711,16 +1704,14 @@ static bool brcmf_p2p_check_dwell_overflow(u32 requested_dwell,  /**   * brcmf_p2p_send_action_frame() - send action frame .   * - * @cfg: driver private data for cfg80211 interface. - * @ndev: net device to transmit on. + * @ifp: interface to transmit on.   * @af_params: configuration data for action frame.   */ -bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg, -				 struct net_device *ndev, +bool brcmf_p2p_send_action_frame(struct brcmf_if *ifp,  				 struct brcmf_fil_af_params_le *af_params)  { +	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;  	struct brcmf_p2p_info *p2p = &cfg->p2p; -	struct brcmf_if *ifp = netdev_priv(ndev);  	struct brcmf_fil_action_frame_le *action_frame;  	struct brcmf_config_af_params config_af_params;  	struct afx_hdl *afx_hdl = &p2p->afx_hdl; @@ -1857,7 +1848,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,  		if (af_params->channel)  			msleep(P2P_AF_RETRY_DELAY_TIME); -		ack = !brcmf_p2p_tx_action_frame(p2p, af_params); +		ack = !brcmf_p2p_tx_action_frame(ifp, p2p, af_params);  		tx_retry++;  		dwell_overflow = brcmf_p2p_check_dwell_overflow(requested_dwell,  								dwell_jiffies); @@ -2217,7 +2208,6 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,  	WARN_ON(p2p_ifp->bsscfgidx != bsscfgidx); -	init_completion(&p2p->send_af_done);  	INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler);  	init_completion(&p2p->afx_hdl.act_frm_scan);  	init_completion(&p2p->wait_next_af); @@ -2513,6 +2503,8 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced)  	pri_ifp = brcmf_get_ifp(cfg->pub, 0);  	p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif; +	init_completion(&p2p->send_af_done); +  	if (p2pdev_forced) {  		err_ptr = brcmf_p2p_create_p2pdev(p2p, NULL, NULL);  		if (IS_ERR(err_ptr)) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h index d2ecee565bf2..d3137ebd7158 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h @@ -168,8 +168,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,  int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,  					const struct brcmf_event_msg *e,  					void *data); -bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg, -				 struct net_device *ndev, +bool brcmf_p2p_send_action_frame(struct brcmf_if *ifp,  				 struct brcmf_fil_af_params_le *af_params);  bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,  					   struct brcmf_bss_info_le *bi); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.c b/drivers/net/wireless/intel/iwlwifi/mld/link.c index 738f80fe0c50..60d814bf5779 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/link.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/link.c @@ -501,6 +501,7 @@ void iwl_mld_remove_link(struct iwl_mld *mld,  	struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_conf->vif);  	struct iwl_mld_link *link = iwl_mld_link_from_mac80211(bss_conf);  	bool is_deflink = link == &mld_vif->deflink; +	u8 fw_id = link->fw_id;  	if (WARN_ON(!link || link->active))  		return; @@ -513,10 +514,10 @@ void iwl_mld_remove_link(struct iwl_mld *mld,  	RCU_INIT_POINTER(mld_vif->link[bss_conf->link_id], NULL); -	if (WARN_ON(link->fw_id >= mld->fw->ucode_capa.num_links)) +	if (WARN_ON(fw_id >= mld->fw->ucode_capa.num_links))  		return; -	RCU_INIT_POINTER(mld->fw_id_to_bss_conf[link->fw_id], NULL); +	RCU_INIT_POINTER(mld->fw_id_to_bss_conf[fw_id], NULL);  }  void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld, diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index c916176bd9f0..72fb675a696f 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1042,7 +1042,7 @@ static blk_status_t nvme_map_data(struct request *req)  	return nvme_pci_setup_data_prp(req, &iter);  } -static blk_status_t nvme_pci_setup_meta_sgls(struct request *req) +static blk_status_t nvme_pci_setup_meta_iter(struct request *req)  {  	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;  	unsigned int entries = req->nr_integrity_segments; @@ -1072,8 +1072,12 @@ static blk_status_t nvme_pci_setup_meta_sgls(struct request *req)  	 * descriptor provides an explicit length, so we're relying on that  	 * mechanism to catch any misunderstandings between the application and  	 * device. +	 * +	 * P2P DMA also needs to use the blk_dma_iter method, so mptr setup +	 * leverages this routine when that happens.  	 */ -	if (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD)) { +	if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl) || +	    (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD))) {  		iod->cmd.common.metadata = cpu_to_le64(iter.addr);  		iod->meta_total_len = iter.len;  		iod->meta_dma = iter.addr; @@ -1114,6 +1118,9 @@ static blk_status_t nvme_pci_setup_meta_mptr(struct request *req)  	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;  	struct bio_vec bv = rq_integrity_vec(req); +	if (is_pci_p2pdma_page(bv.bv_page)) +		return nvme_pci_setup_meta_iter(req); +  	iod->meta_dma = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0);  	if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma))  		return BLK_STS_IOERR; @@ -1128,7 +1135,7 @@ static blk_status_t nvme_map_metadata(struct request *req)  	if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) &&  	    nvme_pci_metadata_use_sgls(req)) -		return nvme_pci_setup_meta_sgls(req); +		return nvme_pci_setup_meta_iter(req);  	return nvme_pci_setup_meta_mptr(req);  } diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c index b340380f3892..ceba21684e82 100644 --- a/drivers/nvme/target/auth.c +++ b/drivers/nvme/target/auth.c @@ -298,7 +298,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,  	const char *hash_name;  	u8 *challenge = req->sq->dhchap_c1;  	struct nvme_dhchap_key *transformed_key; -	u8 buf[4]; +	u8 buf[4], sc_c = ctrl->concat ? 1 : 0;  	int ret;  	hash_name = nvme_auth_hmac_name(ctrl->shash_id); @@ -367,13 +367,14 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,  	ret = crypto_shash_update(shash, buf, 2);  	if (ret)  		goto out; -	memset(buf, 0, 4); +	*buf = sc_c;  	ret = crypto_shash_update(shash, buf, 1);  	if (ret)  		goto out;  	ret = crypto_shash_update(shash, "HostHost", 8);  	if (ret)  		goto out; +	memset(buf, 0, 4);  	ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));  	if (ret)  		goto out; diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 6948824642dc..c48a20602d7f 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -247,6 +247,7 @@ struct qcom_pcie_ops {  	int (*get_resources)(struct qcom_pcie *pcie);  	int (*init)(struct qcom_pcie *pcie);  	int (*post_init)(struct qcom_pcie *pcie); +	void (*host_post_init)(struct qcom_pcie *pcie);  	void (*deinit)(struct qcom_pcie *pcie);  	void (*ltssm_enable)(struct qcom_pcie *pcie);  	int (*config_sid)(struct qcom_pcie *pcie); @@ -1038,6 +1039,25 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)  	return 0;  } +static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata) +{ +	/* +	 * Downstream devices need to be in D0 state before enabling PCI PM +	 * substates. +	 */ +	pci_set_power_state_locked(pdev, PCI_D0); +	pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL); + +	return 0; +} + +static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie) +{ +	struct dw_pcie_rp *pp = &pcie->pci->pp; + +	pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL); +} +  static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)  {  	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; @@ -1312,9 +1332,19 @@ static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)  	pcie->cfg->ops->deinit(pcie);  } +static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp) +{ +	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	struct qcom_pcie *pcie = to_qcom_pcie(pci); + +	if (pcie->cfg->ops->host_post_init) +		pcie->cfg->ops->host_post_init(pcie); +} +  static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {  	.init		= qcom_pcie_host_init,  	.deinit		= qcom_pcie_host_deinit, +	.post_init	= qcom_pcie_host_post_init,  };  /* Qcom IP rev.: 2.1.0	Synopsys IP rev.: 4.01a */ @@ -1376,6 +1406,7 @@ static const struct qcom_pcie_ops ops_1_9_0 = {  	.get_resources = qcom_pcie_get_resources_2_7_0,  	.init = qcom_pcie_init_2_7_0,  	.post_init = qcom_pcie_post_init_2_7_0, +	.host_post_init = qcom_pcie_host_post_init_2_7_0,  	.deinit = qcom_pcie_deinit_2_7_0,  	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,  	.config_sid = qcom_pcie_config_sid_1_9_0, @@ -1386,6 +1417,7 @@ static const struct qcom_pcie_ops ops_1_21_0 = {  	.get_resources = qcom_pcie_get_resources_2_7_0,  	.init = qcom_pcie_init_2_7_0,  	.post_init = qcom_pcie_post_init_2_7_0, +	.host_post_init = qcom_pcie_host_post_init_2_7_0,  	.deinit = qcom_pcie_deinit_2_7_0,  	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,  }; diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 4a8735b275e4..3645f392a9fd 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -1604,7 +1604,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)  		pbus_size_io(bus, realloc_head ? 0 : additional_io_size,  			     additional_io_size, realloc_head); -		if (pref) { +		if (pref && (pref->flags & IORESOURCE_PREFETCH)) {  			pbus_size_mem(bus,  				      IORESOURCE_MEM | IORESOURCE_PREFETCH |  				      (pref->flags & IORESOURCE_MEM_64), diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c index 022d98f3c32a..ea9c4058ee6a 100644 --- a/drivers/regulator/bd718x7-regulator.c +++ b/drivers/regulator/bd718x7-regulator.c @@ -1613,6 +1613,8 @@ static int setup_feedback_loop(struct device *dev, struct device_node *np,  				step /= r1;  				new[j].min = min; +				new[j].min_sel = desc->linear_ranges[j].min_sel; +				new[j].max_sel = desc->linear_ranges[j].max_sel;  				new[j].step = step;  				dev_dbg(dev, "%s: old range min %d, step %d\n", diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index cc5d05dc395c..17173239301e 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -611,8 +611,9 @@ int scsi_host_busy(struct Scsi_Host *shost)  {  	int cnt = 0; -	blk_mq_tagset_busy_iter(&shost->tag_set, -				scsi_host_check_in_flight, &cnt); +	if (shost->tag_set.ops) +		blk_mq_tagset_busy_iter(&shost->tag_set, +					scsi_host_check_in_flight, &cnt);  	return cnt;  }  EXPORT_SYMBOL(scsi_host_busy); diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 746ff6a1f309..1c13812a3f03 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -554,9 +554,9 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)  		 * happened, even if someone else gets the sense data.  		 */  		if (sshdr.asc == 0x28) -			scmd->device->ua_new_media_ctr++; +			atomic_inc(&sdev->ua_new_media_ctr);  		else if (sshdr.asc == 0x29) -			scmd->device->ua_por_ctr++; +			atomic_inc(&sdev->ua_por_ctr);  	}  	if (scsi_sense_is_deferred(&sshdr)) diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 8339fec975b9..9ca27de4767a 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -4282,8 +4282,8 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,  			get, UIC_GET_ATTR_ID(attr_sel),  			UFS_UIC_COMMAND_RETRIES - retries); -	if (mib_val && !ret) -		*mib_val = uic_cmd.argument3; +	if (mib_val) +		*mib_val = ret == 0 ? uic_cmd.argument3 : 0;  	if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)  	    && pwr_mode_change) @@ -4999,7 +4999,7 @@ EXPORT_SYMBOL_GPL(ufshcd_hba_enable);  static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)  { -	int tx_lanes = 0, i, err = 0; +	int tx_lanes, i, err = 0;  	if (!peer)  		ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), @@ -6673,6 +6673,20 @@ static void ufshcd_err_handler(struct work_struct *work)  		 hba->saved_uic_err, hba->force_reset,  		 ufshcd_is_link_broken(hba) ? "; link is broken" : ""); +	/* +	 * Use ufshcd_rpm_get_noresume() here to safely perform link recovery +	 * even if an error occurs during runtime suspend or runtime resume. +	 * This avoids potential deadlocks that could happen if we tried to +	 * resume the device while a PM operation is already in progress. +	 */ +	ufshcd_rpm_get_noresume(hba); +	if (hba->pm_op_in_progress) { +		ufshcd_link_recovery(hba); +		ufshcd_rpm_put(hba); +		return; +	} +	ufshcd_rpm_put(hba); +  	down(&hba->host_sem);  	spin_lock_irqsave(hba->host->host_lock, flags);  	if (ufshcd_err_handling_should_stop(hba)) { @@ -6684,14 +6698,6 @@ static void ufshcd_err_handler(struct work_struct *work)  	}  	spin_unlock_irqrestore(hba->host->host_lock, flags); -	ufshcd_rpm_get_noresume(hba); -	if (hba->pm_op_in_progress) { -		ufshcd_link_recovery(hba); -		ufshcd_rpm_put(hba); -		return; -	} -	ufshcd_rpm_put(hba); -  	ufshcd_err_handling_prepare(hba);  	spin_lock_irqsave(hba->host->host_lock, flags); diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 916cad80941c..5167bec14e36 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -38,6 +38,7 @@  #include <linux/workqueue.h>  #include <linux/notifier.h>  #include <linux/mm_inline.h> +#include <linux/overflow.h>  #include "vfio.h"  #define DRIVER_VERSION  "0.2" @@ -167,12 +168,14 @@ static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,  {  	struct rb_node *node = iommu->dma_list.rb_node; +	WARN_ON(!size); +  	while (node) {  		struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); -		if (start + size <= dma->iova) +		if (start + size - 1 < dma->iova)  			node = node->rb_left; -		else if (start >= dma->iova + dma->size) +		else if (start > dma->iova + dma->size - 1)  			node = node->rb_right;  		else  			return dma; @@ -182,16 +185,19 @@ static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,  }  static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu, -						dma_addr_t start, u64 size) +						dma_addr_t start, +						dma_addr_t end)  {  	struct rb_node *res = NULL;  	struct rb_node *node = iommu->dma_list.rb_node;  	struct vfio_dma *dma_res = NULL; +	WARN_ON(end < start); +  	while (node) {  		struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); -		if (start < dma->iova + dma->size) { +		if (start <= dma->iova + dma->size - 1) {  			res = node;  			dma_res = dma;  			if (start >= dma->iova) @@ -201,7 +207,7 @@ static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu,  			node = node->rb_right;  		}  	} -	if (res && size && dma_res->iova >= start + size) +	if (res && dma_res->iova > end)  		res = NULL;  	return res;  } @@ -211,11 +217,13 @@ static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new)  	struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL;  	struct vfio_dma *dma; +	WARN_ON(new->size != 0); +  	while (*link) {  		parent = *link;  		dma = rb_entry(parent, struct vfio_dma, node); -		if (new->iova + new->size <= dma->iova) +		if (new->iova <= dma->iova)  			link = &(*link)->rb_left;  		else  			link = &(*link)->rb_right; @@ -895,14 +903,20 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,  	unsigned long remote_vaddr;  	struct vfio_dma *dma;  	bool do_accounting; +	dma_addr_t iova_end; +	size_t iova_size; -	if (!iommu || !pages) +	if (!iommu || !pages || npage <= 0)  		return -EINVAL;  	/* Supported for v2 version only */  	if (!iommu->v2)  		return -EACCES; +	if (check_mul_overflow(npage, PAGE_SIZE, &iova_size) || +	    check_add_overflow(user_iova, iova_size - 1, &iova_end)) +		return -EOVERFLOW; +  	mutex_lock(&iommu->lock);  	if (WARN_ONCE(iommu->vaddr_invalid_count, @@ -1008,12 +1022,21 @@ static void vfio_iommu_type1_unpin_pages(void *iommu_data,  {  	struct vfio_iommu *iommu = iommu_data;  	bool do_accounting; +	dma_addr_t iova_end; +	size_t iova_size;  	int i;  	/* Supported for v2 version only */  	if (WARN_ON(!iommu->v2))  		return; +	if (WARN_ON(npage <= 0)) +		return; + +	if (WARN_ON(check_mul_overflow(npage, PAGE_SIZE, &iova_size) || +		    check_add_overflow(user_iova, iova_size - 1, &iova_end))) +		return; +  	mutex_lock(&iommu->lock);  	do_accounting = list_empty(&iommu->domain_list); @@ -1067,7 +1090,7 @@ static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,  #define VFIO_IOMMU_TLB_SYNC_MAX		512  static size_t unmap_unpin_fast(struct vfio_domain *domain, -			       struct vfio_dma *dma, dma_addr_t *iova, +			       struct vfio_dma *dma, dma_addr_t iova,  			       size_t len, phys_addr_t phys, long *unlocked,  			       struct list_head *unmapped_list,  			       int *unmapped_cnt, @@ -1077,18 +1100,17 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain,  	struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL);  	if (entry) { -		unmapped = iommu_unmap_fast(domain->domain, *iova, len, +		unmapped = iommu_unmap_fast(domain->domain, iova, len,  					    iotlb_gather);  		if (!unmapped) {  			kfree(entry);  		} else { -			entry->iova = *iova; +			entry->iova = iova;  			entry->phys = phys;  			entry->len  = unmapped;  			list_add_tail(&entry->list, unmapped_list); -			*iova += unmapped;  			(*unmapped_cnt)++;  		}  	} @@ -1107,18 +1129,17 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain,  }  static size_t unmap_unpin_slow(struct vfio_domain *domain, -			       struct vfio_dma *dma, dma_addr_t *iova, +			       struct vfio_dma *dma, dma_addr_t iova,  			       size_t len, phys_addr_t phys,  			       long *unlocked)  { -	size_t unmapped = iommu_unmap(domain->domain, *iova, len); +	size_t unmapped = iommu_unmap(domain->domain, iova, len);  	if (unmapped) { -		*unlocked += vfio_unpin_pages_remote(dma, *iova, +		*unlocked += vfio_unpin_pages_remote(dma, iova,  						     phys >> PAGE_SHIFT,  						     unmapped >> PAGE_SHIFT,  						     false); -		*iova += unmapped;  		cond_resched();  	}  	return unmapped; @@ -1127,12 +1148,12 @@ static size_t unmap_unpin_slow(struct vfio_domain *domain,  static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,  			     bool do_accounting)  { -	dma_addr_t iova = dma->iova, end = dma->iova + dma->size;  	struct vfio_domain *domain, *d;  	LIST_HEAD(unmapped_region_list);  	struct iommu_iotlb_gather iotlb_gather;  	int unmapped_region_cnt = 0;  	long unlocked = 0; +	size_t pos = 0;  	if (!dma->size)  		return 0; @@ -1156,13 +1177,14 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,  	}  	iommu_iotlb_gather_init(&iotlb_gather); -	while (iova < end) { +	while (pos < dma->size) {  		size_t unmapped, len;  		phys_addr_t phys, next; +		dma_addr_t iova = dma->iova + pos;  		phys = iommu_iova_to_phys(domain->domain, iova);  		if (WARN_ON(!phys)) { -			iova += PAGE_SIZE; +			pos += PAGE_SIZE;  			continue;  		} @@ -1171,7 +1193,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,  		 * may require hardware cache flushing, try to find the  		 * largest contiguous physical memory chunk to unmap.  		 */ -		for (len = PAGE_SIZE; iova + len < end; len += PAGE_SIZE) { +		for (len = PAGE_SIZE; pos + len < dma->size; len += PAGE_SIZE) {  			next = iommu_iova_to_phys(domain->domain, iova + len);  			if (next != phys + len)  				break; @@ -1181,16 +1203,18 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,  		 * First, try to use fast unmap/unpin. In case of failure,  		 * switch to slow unmap/unpin path.  		 */ -		unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys, +		unmapped = unmap_unpin_fast(domain, dma, iova, len, phys,  					    &unlocked, &unmapped_region_list,  					    &unmapped_region_cnt,  					    &iotlb_gather);  		if (!unmapped) { -			unmapped = unmap_unpin_slow(domain, dma, &iova, len, +			unmapped = unmap_unpin_slow(domain, dma, iova, len,  						    phys, &unlocked);  			if (WARN_ON(!unmapped))  				break;  		} + +		pos += unmapped;  	}  	dma->iommu_mapped = false; @@ -1282,7 +1306,7 @@ static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,  }  static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, -				  dma_addr_t iova, size_t size, size_t pgsize) +				  dma_addr_t iova, dma_addr_t iova_end, size_t pgsize)  {  	struct vfio_dma *dma;  	struct rb_node *n; @@ -1299,8 +1323,8 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,  	if (dma && dma->iova != iova)  		return -EINVAL; -	dma = vfio_find_dma(iommu, iova + size - 1, 0); -	if (dma && dma->iova + dma->size != iova + size) +	dma = vfio_find_dma(iommu, iova_end, 1); +	if (dma && dma->iova + dma->size - 1 != iova_end)  		return -EINVAL;  	for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { @@ -1309,7 +1333,7 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,  		if (dma->iova < iova)  			continue; -		if (dma->iova > iova + size - 1) +		if (dma->iova > iova_end)  			break;  		ret = update_user_bitmap(bitmap, iommu, dma, iova, pgsize); @@ -1374,7 +1398,8 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,  	int ret = -EINVAL, retries = 0;  	unsigned long pgshift;  	dma_addr_t iova = unmap->iova; -	u64 size = unmap->size; +	dma_addr_t iova_end; +	size_t size = unmap->size;  	bool unmap_all = unmap->flags & VFIO_DMA_UNMAP_FLAG_ALL;  	bool invalidate_vaddr = unmap->flags & VFIO_DMA_UNMAP_FLAG_VADDR;  	struct rb_node *n, *first_n; @@ -1387,6 +1412,11 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,  		goto unlock;  	} +	if (iova != unmap->iova || size != unmap->size) { +		ret = -EOVERFLOW; +		goto unlock; +	} +  	pgshift = __ffs(iommu->pgsize_bitmap);  	pgsize = (size_t)1 << pgshift; @@ -1396,10 +1426,15 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,  	if (unmap_all) {  		if (iova || size)  			goto unlock; -		size = U64_MAX; -	} else if (!size || size & (pgsize - 1) || -		   iova + size - 1 < iova || size > SIZE_MAX) { -		goto unlock; +		iova_end = ~(dma_addr_t)0; +	} else { +		if (!size || size & (pgsize - 1)) +			goto unlock; + +		if (check_add_overflow(iova, size - 1, &iova_end)) { +			ret = -EOVERFLOW; +			goto unlock; +		}  	}  	/* When dirty tracking is enabled, allow only min supported pgsize */ @@ -1446,17 +1481,17 @@ again:  		if (dma && dma->iova != iova)  			goto unlock; -		dma = vfio_find_dma(iommu, iova + size - 1, 0); -		if (dma && dma->iova + dma->size != iova + size) +		dma = vfio_find_dma(iommu, iova_end, 1); +		if (dma && dma->iova + dma->size - 1 != iova_end)  			goto unlock;  	}  	ret = 0; -	n = first_n = vfio_find_dma_first_node(iommu, iova, size); +	n = first_n = vfio_find_dma_first_node(iommu, iova, iova_end);  	while (n) {  		dma = rb_entry(n, struct vfio_dma, node); -		if (dma->iova >= iova + size) +		if (dma->iova > iova_end)  			break;  		if (!iommu->v2 && iova > dma->iova) @@ -1648,7 +1683,9 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,  {  	bool set_vaddr = map->flags & VFIO_DMA_MAP_FLAG_VADDR;  	dma_addr_t iova = map->iova; +	dma_addr_t iova_end;  	unsigned long vaddr = map->vaddr; +	unsigned long vaddr_end;  	size_t size = map->size;  	int ret = 0, prot = 0;  	size_t pgsize; @@ -1656,8 +1693,15 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,  	/* Verify that none of our __u64 fields overflow */  	if (map->size != size || map->vaddr != vaddr || map->iova != iova) +		return -EOVERFLOW; + +	if (!size)  		return -EINVAL; +	if (check_add_overflow(iova, size - 1, &iova_end) || +	    check_add_overflow(vaddr, size - 1, &vaddr_end)) +		return -EOVERFLOW; +  	/* READ/WRITE from device perspective */  	if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)  		prot |= IOMMU_WRITE; @@ -1673,13 +1717,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,  	WARN_ON((pgsize - 1) & PAGE_MASK); -	if (!size || (size | iova | vaddr) & (pgsize - 1)) { -		ret = -EINVAL; -		goto out_unlock; -	} - -	/* Don't allow IOVA or virtual address wrap */ -	if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) { +	if ((size | iova | vaddr) & (pgsize - 1)) {  		ret = -EINVAL;  		goto out_unlock;  	} @@ -1710,7 +1748,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,  		goto out_unlock;  	} -	if (!vfio_iommu_iova_dma_valid(iommu, iova, iova + size - 1)) { +	if (!vfio_iommu_iova_dma_valid(iommu, iova, iova_end)) {  		ret = -EINVAL;  		goto out_unlock;  	} @@ -1783,12 +1821,12 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,  	for (; n; n = rb_next(n)) {  		struct vfio_dma *dma; -		dma_addr_t iova; +		size_t pos = 0;  		dma = rb_entry(n, struct vfio_dma, node); -		iova = dma->iova; -		while (iova < dma->iova + dma->size) { +		while (pos < dma->size) { +			dma_addr_t iova = dma->iova + pos;  			phys_addr_t phys;  			size_t size; @@ -1804,14 +1842,14 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,  				phys = iommu_iova_to_phys(d->domain, iova);  				if (WARN_ON(!phys)) { -					iova += PAGE_SIZE; +					pos += PAGE_SIZE;  					continue;  				}  				size = PAGE_SIZE;  				p = phys + size;  				i = iova + size; -				while (i < dma->iova + dma->size && +				while (pos + size < dma->size &&  				       p == iommu_iova_to_phys(d->domain, i)) {  					size += PAGE_SIZE;  					p += PAGE_SIZE; @@ -1819,9 +1857,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,  				}  			} else {  				unsigned long pfn; -				unsigned long vaddr = dma->vaddr + -						     (iova - dma->iova); -				size_t n = dma->iova + dma->size - iova; +				unsigned long vaddr = dma->vaddr + pos; +				size_t n = dma->size - pos;  				long npage;  				npage = vfio_pin_pages_remote(dma, vaddr, @@ -1852,7 +1889,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,  				goto unwind;  			} -			iova += size; +			pos += size;  		}  	} @@ -1869,29 +1906,29 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,  unwind:  	for (; n; n = rb_prev(n)) {  		struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); -		dma_addr_t iova; +		size_t pos = 0;  		if (dma->iommu_mapped) {  			iommu_unmap(domain->domain, dma->iova, dma->size);  			continue;  		} -		iova = dma->iova; -		while (iova < dma->iova + dma->size) { +		while (pos < dma->size) { +			dma_addr_t iova = dma->iova + pos;  			phys_addr_t phys, p;  			size_t size;  			dma_addr_t i;  			phys = iommu_iova_to_phys(domain->domain, iova);  			if (!phys) { -				iova += PAGE_SIZE; +				pos += PAGE_SIZE;  				continue;  			}  			size = PAGE_SIZE;  			p = phys + size;  			i = iova + size; -			while (i < dma->iova + dma->size && +			while (pos + size < dma->size &&  			       p == iommu_iova_to_phys(domain->domain, i)) {  				size += PAGE_SIZE;  				p += PAGE_SIZE; @@ -2977,7 +3014,8 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,  		struct vfio_iommu_type1_dirty_bitmap_get range;  		unsigned long pgshift;  		size_t data_size = dirty.argsz - minsz; -		size_t iommu_pgsize; +		size_t size, iommu_pgsize; +		dma_addr_t iova, iova_end;  		if (!data_size || data_size < sizeof(range))  			return -EINVAL; @@ -2986,14 +3024,24 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,  				   sizeof(range)))  			return -EFAULT; -		if (range.iova + range.size < range.iova) +		iova = range.iova; +		size = range.size; + +		if (iova != range.iova || size != range.size) +			return -EOVERFLOW; + +		if (!size)  			return -EINVAL; + +		if (check_add_overflow(iova, size - 1, &iova_end)) +			return -EOVERFLOW; +  		if (!access_ok((void __user *)range.bitmap.data,  			       range.bitmap.size))  			return -EINVAL;  		pgshift = __ffs(range.bitmap.pgsize); -		ret = verify_bitmap_size(range.size >> pgshift, +		ret = verify_bitmap_size(size >> pgshift,  					 range.bitmap.size);  		if (ret)  			return ret; @@ -3007,19 +3055,18 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,  			ret = -EINVAL;  			goto out_unlock;  		} -		if (range.iova & (iommu_pgsize - 1)) { +		if (iova & (iommu_pgsize - 1)) {  			ret = -EINVAL;  			goto out_unlock;  		} -		if (!range.size || range.size & (iommu_pgsize - 1)) { +		if (size & (iommu_pgsize - 1)) {  			ret = -EINVAL;  			goto out_unlock;  		}  		if (iommu->dirty_page_tracking)  			ret = vfio_iova_dirty_bitmap(range.bitmap.data, -						     iommu, range.iova, -						     range.size, +						     iommu, iova, iova_end,  						     range.bitmap.pgsize);  		else  			ret = -EINVAL; diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index 210fd3ac18a4..56ef1d88e003 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c @@ -2614,8 +2614,12 @@ static int aty_init(struct fb_info *info)  		pr_cont("\n");  	}  #endif -	if (par->pll_ops->init_pll) -		par->pll_ops->init_pll(info, &par->pll); +	if (par->pll_ops->init_pll) { +		ret = par->pll_ops->init_pll(info, &par->pll); +		if (ret) +			return ret; +	} +  	if (par->pll_ops->resume_pll)  		par->pll_ops->resume_pll(info, &par->pll); diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index a9ec7f488522..dc5ad3fcc7be 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -79,12 +79,16 @@ static inline void bit_putcs_aligned(struct vc_data *vc, struct fb_info *info,  				     struct fb_image *image, u8 *buf, u8 *dst)  {  	u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; +	unsigned int charcnt = vc->vc_font.charcount;  	u32 idx = vc->vc_font.width >> 3;  	u8 *src;  	while (cnt--) { -		src = vc->vc_font.data + (scr_readw(s++)& -					  charmask)*cellsize; +		u16 ch = scr_readw(s++) & charmask; + +		if (ch >= charcnt) +			ch = 0; +		src = vc->vc_font.data + (unsigned int)ch * cellsize;  		if (attr) {  			update_attr(buf, src, attr, vc); @@ -112,14 +116,18 @@ static inline void bit_putcs_unaligned(struct vc_data *vc,  				       u8 *dst)  {  	u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; +	unsigned int charcnt = vc->vc_font.charcount;  	u32 shift_low = 0, mod = vc->vc_font.width % 8;  	u32 shift_high = 8;  	u32 idx = vc->vc_font.width >> 3;  	u8 *src;  	while (cnt--) { -		src = vc->vc_font.data + (scr_readw(s++)& -					  charmask)*cellsize; +		u16 ch = scr_readw(s++) & charmask; + +		if (ch >= charcnt) +			ch = 0; +		src = vc->vc_font.data + (unsigned int)ch * cellsize;  		if (attr) {  			update_attr(buf, src, attr, vc); diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 96cc9b389246..9bd3c3814b5c 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -2810,6 +2810,25 @@ int fbcon_mode_deleted(struct fb_info *info,  	return found;  } +static void fbcon_delete_mode(struct fb_videomode *m) +{ +	struct fbcon_display *p; + +	for (int i = first_fb_vc; i <= last_fb_vc; i++) { +		p = &fb_display[i]; +		if (p->mode == m) +			p->mode = NULL; +	} +} + +void fbcon_delete_modelist(struct list_head *head) +{ +	struct fb_modelist *modelist; + +	list_for_each_entry(modelist, head, list) +		fbcon_delete_mode(&modelist->mode); +} +  #ifdef CONFIG_VT_HW_CONSOLE_BINDING  static void fbcon_unbind(void)  { diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 53f1719b1ae1..eff757ebbed1 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -544,6 +544,7 @@ static void do_unregister_framebuffer(struct fb_info *fb_info)  		fb_info->pixmap.addr = NULL;  	} +	fbcon_delete_modelist(&fb_info->modelist);  	fb_destroy_modelist(&fb_info->modelist);  	registered_fb[fb_info->node] = NULL;  	num_registered_fb--; diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c index cbdb1caf61bd..0b8d23c12b77 100644 --- a/drivers/video/fbdev/pvr2fb.c +++ b/drivers/video/fbdev/pvr2fb.c @@ -192,7 +192,7 @@ static unsigned long pvr2fb_map;  #ifdef CONFIG_PVR2_DMA  static unsigned int shdma = PVR2_CASCADE_CHAN; -static unsigned int pvr2dma = ONCHIP_NR_DMA_CHANNELS; +static unsigned int pvr2dma = CONFIG_NR_ONCHIP_DMA_CHANNELS;  #endif  static struct fb_videomode pvr2_modedb[] = { diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c index 91d070ef6989..6ff059ee1694 100644 --- a/drivers/video/fbdev/valkyriefb.c +++ b/drivers/video/fbdev/valkyriefb.c @@ -329,11 +329,13 @@ static int __init valkyriefb_init(void)  		if (of_address_to_resource(dp, 0, &r)) {  			printk(KERN_ERR "can't find address for valkyrie\n"); +			of_node_put(dp);  			return 0;  		}  		frame_buffer_phys = r.start;  		cmap_regs_phys = r.start + 0x304000; +		of_node_put(dp);  	}  #endif /* ppc (!CONFIG_MAC) */ diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 755ec6dfd51c..23273d0e6f22 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2228,6 +2228,14 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb,  		wbc_account_cgroup_owner(wbc, folio, range_len);  		folio_unlock(folio);  	} +	/* +	 * If the fs is already in error status, do not submit any writeback +	 * but immediately finish it. +	 */ +	if (unlikely(BTRFS_FS_ERROR(fs_info))) { +		btrfs_bio_end_io(bbio, errno_to_blk_status(BTRFS_FS_ERROR(fs_info))); +		return; +	}  	btrfs_submit_bbio(bbio, 0);  } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 7efd1f8a1912..fa82def46e39 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2854,12 +2854,22 @@ static int btrfs_fallocate_update_isize(struct inode *inode,  {  	struct btrfs_trans_handle *trans;  	struct btrfs_root *root = BTRFS_I(inode)->root; +	u64 range_start; +	u64 range_end;  	int ret;  	int ret2;  	if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))  		return 0; +	range_start = round_down(i_size_read(inode), root->fs_info->sectorsize); +	range_end = round_up(end, root->fs_info->sectorsize); + +	ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), range_start, +						range_end - range_start); +	if (ret) +		return ret; +  	trans = btrfs_start_transaction(root, 1);  	if (IS_ERR(trans))  		return PTR_ERR(trans); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3b1b3a0553ee..3df5f36185a0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6873,7 +6873,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,  	BTRFS_I(inode)->dir_index = 0ULL;  	inode_inc_iversion(inode);  	inode_set_ctime_current(inode); -	set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);  	ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),  			     &fname.disk_name, 1, index); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 1175b8192cd7..31ad8580322a 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1539,8 +1539,10 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst  	ASSERT(prealloc);  	/* Check the level of src and dst first */ -	if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) +	if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) { +		kfree(prealloc);  		return -EINVAL; +	}  	mutex_lock(&fs_info->qgroup_ioctl_lock);  	if (!fs_info->quota_root) { diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 621e0df097e3..c90b2d2cb08f 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -7910,6 +7910,9 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,  	bool log_pinned = false;  	int ret; +	/* The inode has a new name (ref/extref), so make sure we log it. */ +	set_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags); +  	btrfs_init_log_ctx(&ctx, inode);  	ctx.logging_new_name = true; diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index e466cf52d7d7..7f7e6bb23a90 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -988,10 +988,11 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,  static void  nfsd4_read_release(union nfsd4_op_u *u)  { -	if (u->read.rd_nf) +	if (u->read.rd_nf) { +		trace_nfsd_read_done(u->read.rd_rqstp, u->read.rd_fhp, +				     u->read.rd_offset, u->read.rd_length);  		nfsd_file_put(u->read.rd_nf); -	trace_nfsd_read_done(u->read.rd_rqstp, u->read.rd_fhp, -			     u->read.rd_offset, u->read.rd_length); +	}  }  static __be32 @@ -2892,10 +2893,20 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)  	rqstp->rq_lease_breaker = (void **)&cstate->clp; -	trace_nfsd_compound(rqstp, args->tag, args->taglen, args->opcnt); +	trace_nfsd_compound(rqstp, args->tag, args->taglen, args->client_opcnt);  	while (!status && resp->opcnt < args->opcnt) {  		op = &args->ops[resp->opcnt++]; +		if (unlikely(resp->opcnt == NFSD_MAX_OPS_PER_COMPOUND)) { +			/* If there are still more operations to process, +			 * stop here and report NFS4ERR_RESOURCE. */ +			if (cstate->minorversion == 0 && +			    args->client_opcnt > resp->opcnt) { +				op->status = nfserr_resource; +				goto encode_op; +			} +		} +  		/*  		 * The XDR decode routines may have pre-set op->status;  		 * for example, if there is a miscellaneous XDR error @@ -2972,7 +2983,7 @@ encode_op:  			status = op->status;  		} -		trace_nfsd_compound_status(args->opcnt, resp->opcnt, +		trace_nfsd_compound_status(args->client_opcnt, resp->opcnt,  					   status, nfsd4_op_name(op->opnum));  		nfsd4_cstate_clear_replay(cstate); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 81fa7cc6c77b..c1b54322c412 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3902,6 +3902,7 @@ static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfs  	ca->headerpadsz = 0;  	ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);  	ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc); +	ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);  	ca->maxresp_cached = min_t(u32, ca->maxresp_cached,  			NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);  	ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION); diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index c0a3c6a7c8bb..6040a6145dad 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2488,8 +2488,10 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)  	if (xdr_stream_decode_u32(argp->xdr, &argp->minorversion) < 0)  		return false; -	if (xdr_stream_decode_u32(argp->xdr, &argp->opcnt) < 0) +	if (xdr_stream_decode_u32(argp->xdr, &argp->client_opcnt) < 0)  		return false; +	argp->opcnt = min_t(u32, argp->client_opcnt, +			    NFSD_MAX_OPS_PER_COMPOUND);  	if (argp->opcnt > ARRAY_SIZE(argp->iops)) {  		argp->ops = vcalloc(argp->opcnt, sizeof(*argp->ops)); @@ -2628,10 +2630,8 @@ static __be32 nfsd4_encode_components_esc(struct xdr_stream *xdr, char sep,  	__be32 *p;  	__be32 pathlen;  	int pathlen_offset; -	int strlen, count=0;  	char *str, *end, *next; - -	dprintk("nfsd4_encode_components(%s)\n", components); +	int count = 0;  	pathlen_offset = xdr->buf->len;  	p = xdr_reserve_space(xdr, 4); @@ -2658,9 +2658,8 @@ static __be32 nfsd4_encode_components_esc(struct xdr_stream *xdr, char sep,  			for (; *end && (*end != sep); end++)  				/* find sep or end of string */; -		strlen = end - str; -		if (strlen) { -			if (xdr_stream_encode_opaque(xdr, str, strlen) < 0) +		if (end > str) { +			if (xdr_stream_encode_opaque(xdr, str, end - str) < 0)  				return nfserr_resource;  			count++;  		} else @@ -2939,6 +2938,12 @@ struct nfsd4_fattr_args {  typedef __be32(*nfsd4_enc_attr)(struct xdr_stream *xdr,  				const struct nfsd4_fattr_args *args); +static __be32 nfsd4_encode_fattr4__inval(struct xdr_stream *xdr, +					 const struct nfsd4_fattr_args *args) +{ +	return nfserr_inval; +} +  static __be32 nfsd4_encode_fattr4__noop(struct xdr_stream *xdr,  					const struct nfsd4_fattr_args *args)  { @@ -3560,6 +3565,8 @@ static const nfsd4_enc_attr nfsd4_enc_fattr4_encode_ops[] = {  	[FATTR4_MODE_UMASK]		= nfsd4_encode_fattr4__noop,  	[FATTR4_XATTR_SUPPORT]		= nfsd4_encode_fattr4_xattr_support, +	[FATTR4_TIME_DELEG_ACCESS]	= nfsd4_encode_fattr4__inval, +	[FATTR4_TIME_DELEG_MODIFY]	= nfsd4_encode_fattr4__inval,  	[FATTR4_OPEN_ARGUMENTS]		= nfsd4_encode_fattr4_open_arguments,  }; diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index ea87b42894dd..f19320018639 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -57,6 +57,9 @@ struct readdir_cd {  	__be32			err;	/* 0, nfserr, or nfserr_eof */  }; +/* Maximum number of operations per session compound */ +#define NFSD_MAX_OPS_PER_COMPOUND	200 +  struct nfsd_genl_rqstp {  	struct sockaddr		rq_daddr;  	struct sockaddr		rq_saddr; diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index d4b48602b2b0..ee0570cbdd9e 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h @@ -903,6 +903,7 @@ struct nfsd4_compoundargs {  	char *				tag;  	u32				taglen;  	u32				minorversion; +	u32				client_opcnt;  	u32				opcnt;  	bool				splice_ok;  	struct nfsd4_op			*ops; diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c index 4f959f1e08d2..185ac41bd7e9 100644 --- a/fs/smb/client/cifsfs.c +++ b/fs/smb/client/cifsfs.c @@ -173,7 +173,7 @@ module_param(enable_oplocks, bool, 0644);  MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");  module_param(enable_gcm_256, bool, 0644); -MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0"); +MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/1");  module_param(require_gcm_256, bool, 0644);  MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0"); diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h index fb1813cbe0eb..3528c365a452 100644 --- a/fs/smb/client/cifsproto.h +++ b/fs/smb/client/cifsproto.h @@ -616,6 +616,8 @@ extern int E_md4hash(const unsigned char *passwd, unsigned char *p16,  extern struct TCP_Server_Info *  cifs_find_tcp_session(struct smb3_fs_context *ctx); +struct cifs_tcon *cifs_setup_ipc(struct cifs_ses *ses, bool seal); +  void __cifs_put_smb_ses(struct cifs_ses *ses);  extern struct cifs_ses * diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c index dd12f3eb61dc..55cb4b0cbd48 100644 --- a/fs/smb/client/connect.c +++ b/fs/smb/client/connect.c @@ -310,6 +310,8 @@ cifs_abort_connection(struct TCP_Server_Info *server)  			 server->ssocket->flags);  		sock_release(server->ssocket);  		server->ssocket = NULL; +	} else if (cifs_rdma_enabled(server)) { +		smbd_destroy(server);  	}  	server->sequence_number = 0;  	server->session_estab = false; @@ -338,12 +340,6 @@ cifs_abort_connection(struct TCP_Server_Info *server)  		mid_execute_callback(mid);  		release_mid(mid);  	} - -	if (cifs_rdma_enabled(server)) { -		cifs_server_lock(server); -		smbd_destroy(server); -		cifs_server_unlock(server); -	}  }  static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets) @@ -2015,39 +2011,31 @@ static int match_session(struct cifs_ses *ses,  /**   * cifs_setup_ipc - helper to setup the IPC tcon for the session   * @ses: smb session to issue the request on - * @ctx: the superblock configuration context to use for building the - *       new tree connection for the IPC (interprocess communication RPC) + * @seal: if encryption is requested   *   * A new IPC connection is made and stored in the session   * tcon_ipc. The IPC tcon has the same lifetime as the session.   */ -static int -cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx) +struct cifs_tcon *cifs_setup_ipc(struct cifs_ses *ses, bool seal)  {  	int rc = 0, xid;  	struct cifs_tcon *tcon;  	char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0}; -	bool seal = false;  	struct TCP_Server_Info *server = ses->server;  	/*  	 * If the mount request that resulted in the creation of the  	 * session requires encryption, force IPC to be encrypted too.  	 */ -	if (ctx->seal) { -		if (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) -			seal = true; -		else { -			cifs_server_dbg(VFS, -				 "IPC: server doesn't support encryption\n"); -			return -EOPNOTSUPP; -		} +	if (seal && !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) { +		cifs_server_dbg(VFS, "IPC: server doesn't support encryption\n"); +		return ERR_PTR(-EOPNOTSUPP);  	}  	/* no need to setup directory caching on IPC share, so pass in false */  	tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_ipc);  	if (tcon == NULL) -		return -ENOMEM; +		return ERR_PTR(-ENOMEM);  	spin_lock(&server->srv_lock);  	scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname); @@ -2057,13 +2045,13 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)  	tcon->ses = ses;  	tcon->ipc = true;  	tcon->seal = seal; -	rc = server->ops->tree_connect(xid, ses, unc, tcon, ctx->local_nls); +	rc = server->ops->tree_connect(xid, ses, unc, tcon, ses->local_nls);  	free_xid(xid);  	if (rc) { -		cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc); +		cifs_server_dbg(VFS | ONCE, "failed to connect to IPC (rc=%d)\n", rc);  		tconInfoFree(tcon, netfs_trace_tcon_ref_free_ipc_fail); -		goto out; +		return ERR_PTR(rc);  	}  	cifs_dbg(FYI, "IPC tcon rc=%d ipc tid=0x%x\n", rc, tcon->tid); @@ -2071,9 +2059,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)  	spin_lock(&tcon->tc_lock);  	tcon->status = TID_GOOD;  	spin_unlock(&tcon->tc_lock); -	ses->tcon_ipc = tcon; -out: -	return rc; +	return tcon;  }  static struct cifs_ses * @@ -2347,6 +2333,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)  {  	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;  	struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; +	struct cifs_tcon *ipc;  	struct cifs_ses *ses;  	unsigned int xid;  	int retries = 0; @@ -2525,7 +2512,12 @@ retry_new_session:  	list_add(&ses->smb_ses_list, &server->smb_ses_list);  	spin_unlock(&cifs_tcp_ses_lock); -	cifs_setup_ipc(ses, ctx); +	ipc = cifs_setup_ipc(ses, ctx->seal); +	spin_lock(&cifs_tcp_ses_lock); +	spin_lock(&ses->ses_lock); +	ses->tcon_ipc = !IS_ERR(ipc) ? ipc : NULL; +	spin_unlock(&ses->ses_lock); +	spin_unlock(&cifs_tcp_ses_lock);  	free_xid(xid); diff --git a/fs/smb/client/dfs_cache.c b/fs/smb/client/dfs_cache.c index 4dada26d56b5..f2ad0ccd08a7 100644 --- a/fs/smb/client/dfs_cache.c +++ b/fs/smb/client/dfs_cache.c @@ -1120,24 +1120,63 @@ static bool target_share_equal(struct cifs_tcon *tcon, const char *s1)  	return match;  } -static bool is_ses_good(struct cifs_ses *ses) +static bool is_ses_good(struct cifs_tcon *tcon, struct cifs_ses *ses)  {  	struct TCP_Server_Info *server = ses->server; -	struct cifs_tcon *tcon = ses->tcon_ipc; +	struct cifs_tcon *ipc = NULL;  	bool ret; +	spin_lock(&cifs_tcp_ses_lock);  	spin_lock(&ses->ses_lock);  	spin_lock(&ses->chan_lock); +  	ret = !cifs_chan_needs_reconnect(ses, server) && -		ses->ses_status == SES_GOOD && -		!tcon->need_reconnect; +		ses->ses_status == SES_GOOD; +  	spin_unlock(&ses->chan_lock); + +	if (!ret) +		goto out; + +	if (likely(ses->tcon_ipc)) { +		if (ses->tcon_ipc->need_reconnect) { +			ret = false; +			goto out; +		} +	} else { +		spin_unlock(&ses->ses_lock); +		spin_unlock(&cifs_tcp_ses_lock); + +		ipc = cifs_setup_ipc(ses, tcon->seal); + +		spin_lock(&cifs_tcp_ses_lock); +		spin_lock(&ses->ses_lock); +		if (!IS_ERR(ipc)) { +			if (!ses->tcon_ipc) { +				ses->tcon_ipc = ipc; +				ipc = NULL; +			} +		} else { +			ret = false; +			ipc = NULL; +		} +	} + +out:  	spin_unlock(&ses->ses_lock); +	spin_unlock(&cifs_tcp_ses_lock); +	if (ipc && server->ops->tree_disconnect) { +		unsigned int xid = get_xid(); + +		(void)server->ops->tree_disconnect(xid, ipc); +		_free_xid(xid); +	} +	tconInfoFree(ipc, netfs_trace_tcon_ref_free_ipc);  	return ret;  }  /* Refresh dfs referral of @ses */ -static void refresh_ses_referral(struct cifs_ses *ses) +static void refresh_ses_referral(struct cifs_tcon *tcon, struct cifs_ses *ses)  {  	struct cache_entry *ce;  	unsigned int xid; @@ -1153,7 +1192,7 @@ static void refresh_ses_referral(struct cifs_ses *ses)  	}  	ses = CIFS_DFS_ROOT_SES(ses); -	if (!is_ses_good(ses)) { +	if (!is_ses_good(tcon, ses)) {  		cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",  			 __func__);  		goto out; @@ -1241,7 +1280,7 @@ static void refresh_tcon_referral(struct cifs_tcon *tcon, bool force_refresh)  	up_read(&htable_rw_lock);  	ses = CIFS_DFS_ROOT_SES(ses); -	if (!is_ses_good(ses)) { +	if (!is_ses_good(tcon, ses)) {  		cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",  			 __func__);  		goto out; @@ -1309,7 +1348,7 @@ void dfs_cache_refresh(struct work_struct *work)  	tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);  	list_for_each_entry(ses, &tcon->dfs_ses_list, dlist) -		refresh_ses_referral(ses); +		refresh_ses_referral(tcon, ses);  	refresh_tcon_referral(tcon, false);  	queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work, diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index 0f9130ef2e7d..1e39f2165e42 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -2799,11 +2799,12 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,  	struct cifs_fid fid;  	int rc;  	__le16 *utf16_path; -	struct cached_fid *cfid = NULL; +	struct cached_fid *cfid;  	int retries = 0, cur_sleep = 1;  replay_again:  	/* reinitialize for possible replay */ +	cfid = NULL;  	flags = CIFS_CP_CREATE_CLOSE_OP;  	oplock = SMB2_OPLOCK_LEVEL_NONE;  	server = cifs_pick_channel(ses); diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c index 46f87fd1ce1c..2c08cccfa680 100644 --- a/fs/smb/server/transport_ipc.c +++ b/fs/smb/server/transport_ipc.c @@ -263,10 +263,16 @@ static void ipc_msg_handle_free(int handle)  static int handle_response(int type, void *payload, size_t sz)  { -	unsigned int handle = *(unsigned int *)payload; +	unsigned int handle;  	struct ipc_msg_table_entry *entry;  	int ret = 0; +	/* Prevent 4-byte read beyond declared payload size */ +	if (sz < sizeof(unsigned int)) +		return -EINVAL; + +	handle = *(unsigned int *)payload; +  	ipc_update_last_active();  	down_read(&ipc_msg_table_lock);  	hash_for_each_possible(ipc_msg_table, entry, ipc_table_hlist, handle) { diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c index 89b02efdba0c..7d86553fcc7c 100644 --- a/fs/smb/server/transport_rdma.c +++ b/fs/smb/server/transport_rdma.c @@ -418,9 +418,6 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)  	sc->ib.dev = sc->rdma.cm_id->device; -	INIT_WORK(&sc->recv_io.posted.refill_work, -		  smb_direct_post_recv_credits); -	INIT_WORK(&sc->idle.immediate_work, smb_direct_send_immediate_work);  	INIT_DELAYED_WORK(&sc->idle.timer_work, smb_direct_idle_connection_timer);  	conn = ksmbd_conn_alloc(); @@ -469,6 +466,9 @@ static void free_transport(struct smb_direct_transport *t)  	disable_delayed_work_sync(&sc->idle.timer_work);  	disable_work_sync(&sc->idle.immediate_work); +	if (sc->rdma.cm_id) +		rdma_lock_handler(sc->rdma.cm_id); +  	if (sc->ib.qp) {  		ib_drain_qp(sc->ib.qp);  		sc->ib.qp = NULL; @@ -497,8 +497,10 @@ static void free_transport(struct smb_direct_transport *t)  		ib_free_cq(sc->ib.recv_cq);  	if (sc->ib.pd)  		ib_dealloc_pd(sc->ib.pd); -	if (sc->rdma.cm_id) +	if (sc->rdma.cm_id) { +		rdma_unlock_handler(sc->rdma.cm_id);  		rdma_destroy_id(sc->rdma.cm_id); +	}  	smb_direct_destroy_pools(sc);  	ksmbd_conn_free(KSMBD_TRANS(t)->conn); @@ -1727,10 +1729,10 @@ static int smb_direct_cm_handler(struct rdma_cm_id *cm_id,  	}  	case RDMA_CM_EVENT_DEVICE_REMOVAL:  	case RDMA_CM_EVENT_DISCONNECTED: { -		ib_drain_qp(sc->ib.qp); -  		sc->status = SMBDIRECT_SOCKET_DISCONNECTED;  		smb_direct_disconnect_rdma_work(&sc->disconnect_work); +		if (sc->ib.qp) +			ib_drain_qp(sc->ib.qp);  		break;  	}  	case RDMA_CM_EVENT_CONNECT_ERROR: { @@ -1904,7 +1906,6 @@ static int smb_direct_prepare_negotiation(struct smbdirect_socket *sc)  		goto out_err;  	} -	smb_direct_post_recv_credits(&sc->recv_io.posted.refill_work);  	return 0;  out_err:  	put_recvmsg(sc, recvmsg); @@ -2249,8 +2250,8 @@ static int smb_direct_prepare(struct ksmbd_transport *t)  		return -ECONNABORTED;  	ret = smb_direct_check_recvmsg(recvmsg); -	if (ret == -ECONNABORTED) -		goto out; +	if (ret) +		goto put;  	req = (struct smbdirect_negotiate_req *)recvmsg->packet;  	sp->max_recv_size = min_t(int, sp->max_recv_size, @@ -2265,14 +2266,38 @@ static int smb_direct_prepare(struct ksmbd_transport *t)  	sc->recv_io.credits.target = min_t(u16, sc->recv_io.credits.target, sp->recv_credit_max);  	sc->recv_io.credits.target = max_t(u16, sc->recv_io.credits.target, 1); -	ret = smb_direct_send_negotiate_response(sc, ret); -out: +put:  	spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags);  	sc->recv_io.reassembly.queue_length--;  	list_del(&recvmsg->list);  	spin_unlock_irqrestore(&sc->recv_io.reassembly.lock, flags);  	put_recvmsg(sc, recvmsg); +	if (ret == -ECONNABORTED) +		return ret; + +	if (ret) +		goto respond; + +	/* +	 * We negotiated with success, so we need to refill the recv queue. +	 * We do that with sc->idle.immediate_work still being disabled +	 * via smbdirect_socket_init(), so that queue_work(sc->workqueue, +	 * &sc->idle.immediate_work) in smb_direct_post_recv_credits() +	 * is a no-op. +	 * +	 * The message that grants the credits to the client is +	 * the negotiate response. +	 */ +	INIT_WORK(&sc->recv_io.posted.refill_work, smb_direct_post_recv_credits); +	smb_direct_post_recv_credits(&sc->recv_io.posted.refill_work); +	if (unlikely(sc->first_error)) +		return sc->first_error; +	INIT_WORK(&sc->idle.immediate_work, smb_direct_send_immediate_work); + +respond: +	ret = smb_direct_send_negotiate_response(sc, ret); +  	return ret;  } diff --git a/fs/xfs/libxfs/xfs_rtgroup.h b/fs/xfs/libxfs/xfs_rtgroup.h index d36a6ae0abe5..d4fcf591e63d 100644 --- a/fs/xfs/libxfs/xfs_rtgroup.h +++ b/fs/xfs/libxfs/xfs_rtgroup.h @@ -50,6 +50,12 @@ struct xfs_rtgroup {  		uint8_t			*rtg_rsum_cache;  		struct xfs_open_zone	*rtg_open_zone;  	}; + +	/* +	 * Count of outstanding GC operations for zoned XFS.  Any RTG with a +	 * non-zero rtg_gccount will not be picked as new GC victim. +	 */ +	atomic_t		rtg_gccount;  };  /* diff --git a/fs/xfs/xfs_zone_alloc.c b/fs/xfs/xfs_zone_alloc.c index 23cdab4515bb..040402240807 100644 --- a/fs/xfs/xfs_zone_alloc.c +++ b/fs/xfs/xfs_zone_alloc.c @@ -246,6 +246,14 @@ xfs_zoned_map_extent(  	 * If a data write raced with this GC write, keep the existing data in  	 * the data fork, mark our newly written GC extent as reclaimable, then  	 * move on to the next extent. +	 * +	 * Note that this can also happen when racing with operations that do +	 * not actually invalidate the data, but just move it to a different +	 * inode (XFS_IOC_EXCHANGE_RANGE), or to a different offset inside the +	 * inode (FALLOC_FL_COLLAPSE_RANGE / FALLOC_FL_INSERT_RANGE).  If the +	 * data was just moved around, GC fails to free the zone, but the zone +	 * becomes a GC candidate again as soon as all previous GC I/O has +	 * finished and these blocks will be moved out eventually.  	 */  	if (old_startblock != NULLFSBLOCK &&  	    old_startblock != data.br_startblock) diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c index 109877d9a6bf..4ade54445532 100644 --- a/fs/xfs/xfs_zone_gc.c +++ b/fs/xfs/xfs_zone_gc.c @@ -114,6 +114,8 @@ struct xfs_gc_bio {  	/* Open Zone being written to */  	struct xfs_open_zone		*oz; +	struct xfs_rtgroup		*victim_rtg; +  	/* Bio used for reads and writes, including the bvec used by it */  	struct bio_vec			bv;  	struct bio			bio;	/* must be last */ @@ -264,6 +266,7 @@ xfs_zone_gc_iter_init(  	iter->rec_count = 0;  	iter->rec_idx = 0;  	iter->victim_rtg = victim_rtg; +	atomic_inc(&victim_rtg->rtg_gccount);  }  /* @@ -362,6 +365,7 @@ xfs_zone_gc_query(  	return 0;  done: +	atomic_dec(&iter->victim_rtg->rtg_gccount);  	xfs_rtgroup_rele(iter->victim_rtg);  	iter->victim_rtg = NULL;  	return 0; @@ -451,6 +455,20 @@ xfs_zone_gc_pick_victim_from(  		if (!rtg)  			continue; +		/* +		 * If the zone is already undergoing GC, don't pick it again. +		 * +		 * This prevents us from picking one of the zones for which we +		 * already submitted GC I/O, but for which the remapping hasn't +		 * concluded yet.  This won't cause data corruption, but +		 * increases write amplification and slows down GC, so this is +		 * a bad thing. +		 */ +		if (atomic_read(&rtg->rtg_gccount)) { +			xfs_rtgroup_rele(rtg); +			continue; +		} +  		/* skip zones that are just waiting for a reset */  		if (rtg_rmap(rtg)->i_used_blocks == 0 ||  		    rtg_rmap(rtg)->i_used_blocks >= victim_used) { @@ -688,6 +706,9 @@ xfs_zone_gc_start_chunk(  	chunk->scratch = &data->scratch[data->scratch_idx];  	chunk->data = data;  	chunk->oz = oz; +	chunk->victim_rtg = iter->victim_rtg; +	atomic_inc(&chunk->victim_rtg->rtg_group.xg_active_ref); +	atomic_inc(&chunk->victim_rtg->rtg_gccount);  	bio->bi_iter.bi_sector = xfs_rtb_to_daddr(mp, chunk->old_startblock);  	bio->bi_end_io = xfs_zone_gc_end_io; @@ -710,6 +731,8 @@ static void  xfs_zone_gc_free_chunk(  	struct xfs_gc_bio	*chunk)  { +	atomic_dec(&chunk->victim_rtg->rtg_gccount); +	xfs_rtgroup_rele(chunk->victim_rtg);  	list_del(&chunk->entry);  	xfs_open_zone_put(chunk->oz);  	xfs_irele(chunk->ip); @@ -770,6 +793,10 @@ xfs_zone_gc_split_write(  	split_chunk->oz = chunk->oz;  	atomic_inc(&chunk->oz->oz_ref); +	split_chunk->victim_rtg = chunk->victim_rtg; +	atomic_inc(&chunk->victim_rtg->rtg_group.xg_active_ref); +	atomic_inc(&chunk->victim_rtg->rtg_gccount); +  	chunk->offset += split_len;  	chunk->len -= split_len;  	chunk->old_startblock += XFS_B_TO_FSB(data->mp, split_len); diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 8a9a2e732a65..e04d56a5332e 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -832,7 +832,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)  /* Required sections not related to debugging. */  #define ELF_DETAILS							\ -		.modinfo : { *(.modinfo) }				\ +		.modinfo : { *(.modinfo) . = ALIGN(8); }		\  		.comment 0 : { *(.comment) }				\  		.symtab 0 : { *(.symtab) }				\  		.strtab 0 : { *(.strtab) }				\ diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 8e8d1cc8b06c..44c30183ecc3 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -341,15 +341,15 @@ enum req_op {  	/* write the zero filled sector many times */  	REQ_OP_WRITE_ZEROES	= (__force blk_opf_t)9,  	/* Open a zone */ -	REQ_OP_ZONE_OPEN	= (__force blk_opf_t)10, +	REQ_OP_ZONE_OPEN	= (__force blk_opf_t)11,  	/* Close a zone */ -	REQ_OP_ZONE_CLOSE	= (__force blk_opf_t)11, +	REQ_OP_ZONE_CLOSE	= (__force blk_opf_t)13,  	/* Transition a zone to full */ -	REQ_OP_ZONE_FINISH	= (__force blk_opf_t)13, +	REQ_OP_ZONE_FINISH	= (__force blk_opf_t)15,  	/* reset a zone write pointer */ -	REQ_OP_ZONE_RESET	= (__force blk_opf_t)15, +	REQ_OP_ZONE_RESET	= (__force blk_opf_t)17,  	/* reset all the zone present on the device */ -	REQ_OP_ZONE_RESET_ALL	= (__force blk_opf_t)17, +	REQ_OP_ZONE_RESET_ALL	= (__force blk_opf_t)19,  	/* Driver private requests */  	REQ_OP_DRV_IN		= (__force blk_opf_t)34, @@ -478,6 +478,7 @@ static inline bool op_is_zone_mgmt(enum req_op op)  {  	switch (op & REQ_OP_MASK) {  	case REQ_OP_ZONE_RESET: +	case REQ_OP_ZONE_RESET_ALL:  	case REQ_OP_ZONE_OPEN:  	case REQ_OP_ZONE_CLOSE:  	case REQ_OP_ZONE_FINISH: diff --git a/include/linux/fbcon.h b/include/linux/fbcon.h index 81f0e698acbf..f206370060e1 100644 --- a/include/linux/fbcon.h +++ b/include/linux/fbcon.h @@ -18,6 +18,7 @@ void fbcon_suspended(struct fb_info *info);  void fbcon_resumed(struct fb_info *info);  int fbcon_mode_deleted(struct fb_info *info,  		       struct fb_videomode *mode); +void fbcon_delete_modelist(struct list_head *head);  void fbcon_new_modelist(struct fb_info *info);  void fbcon_get_requirement(struct fb_info *info,  			   struct fb_blit_caps *caps); @@ -38,6 +39,7 @@ static inline void fbcon_suspended(struct fb_info *info) {}  static inline void fbcon_resumed(struct fb_info *info) {}  static inline int fbcon_mode_deleted(struct fb_info *info,  				     struct fb_videomode *mode) { return 0; } +static inline void fbcon_delete_modelist(struct list_head *head) {}  static inline void fbcon_new_modelist(struct fb_info *info) {}  static inline void fbcon_get_requirement(struct fb_info *info,  					 struct fb_blit_caps *caps) {} diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 4e1ac1fbcec4..55343795644b 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -1643,7 +1643,7 @@ struct regmap_irq_chip_data;   * @status_invert: Inverted status register: cleared bits are active interrupts.   * @status_is_level: Status register is actuall signal level: Xor status   *		     register with previous value to get active interrupts. - * @wake_invert: Inverted wake register: cleared bits are wake enabled. + * @wake_invert: Inverted wake register: cleared bits are wake disabled.   * @type_in_mask: Use the mask registers for controlling irq type. Use this if   *		  the hardware provides separate bits for rising/falling edge   *		  or low/high level interrupts and they should be combined into diff --git a/include/linux/sched.h b/include/linux/sched.h index cbb7340c5866..b469878de25c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2407,12 +2407,12 @@ static inline void __migrate_enable(void) { }   * be defined in kernel/sched/core.c.   */  #ifndef INSTANTIATE_EXPORTED_MIGRATE_DISABLE -static inline void migrate_disable(void) +static __always_inline void migrate_disable(void)  {  	__migrate_disable();  } -static inline void migrate_enable(void) +static __always_inline void migrate_enable(void)  {  	__migrate_enable();  } diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 9ecc70baaca9..8d0e703bc929 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -434,6 +434,7 @@ enum {  	HCI_USER_CHANNEL,  	HCI_EXT_CONFIGURED,  	HCI_LE_ADV, +	HCI_LE_ADV_0,  	HCI_LE_PER_ADV,  	HCI_LE_SCAN,  	HCI_SSP_ENABLED, diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 2924c2bf2a98..b8100dbfe5d7 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -244,6 +244,7 @@ struct adv_info {  	bool	enabled;  	bool	pending;  	bool	periodic; +	bool	periodic_enabled;  	__u8	mesh;  	__u8	instance;  	__u8	handle; diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 4bb0eaedda18..00e182a22720 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -38,8 +38,8 @@  #define L2CAP_DEFAULT_TX_WINDOW		63  #define L2CAP_DEFAULT_EXT_WINDOW	0x3FFF  #define L2CAP_DEFAULT_MAX_TX		3 -#define L2CAP_DEFAULT_RETRANS_TO	2    /* seconds */ -#define L2CAP_DEFAULT_MONITOR_TO	12   /* seconds */ +#define L2CAP_DEFAULT_RETRANS_TO	2000    /* 2 seconds */ +#define L2CAP_DEFAULT_MONITOR_TO	12000   /* 12 seconds */  #define L2CAP_DEFAULT_MAX_PDU_SIZE	1492    /* Sized for AMP packet */  #define L2CAP_DEFAULT_ACK_TO		200  #define L2CAP_DEFAULT_MAX_SDU_SIZE	0xFFFF diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 74edea06985b..bca0333f1e99 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -853,7 +853,7 @@ struct mgmt_cp_set_mesh {  	__le16 window;  	__le16 period;  	__u8   num_ad_types; -	__u8   ad_types[]; +	__u8   ad_types[] __counted_by(num_ad_types);  } __packed;  #define MGMT_SET_MESH_RECEIVER_SIZE	6 diff --git a/include/net/tcp.h b/include/net/tcp.h index 5ca230ed526a..ab20f549b8f9 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -370,7 +370,7 @@ void tcp_delack_timer_handler(struct sock *sk);  int tcp_ioctl(struct sock *sk, int cmd, int *karg);  enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);  void tcp_rcv_established(struct sock *sk, struct sk_buff *skb); -void tcp_rcvbuf_grow(struct sock *sk); +void tcp_rcvbuf_grow(struct sock *sk, u32 newval);  void tcp_rcv_space_adjust(struct sock *sk);  int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);  void tcp_twsk_destructor(struct sock *sk); diff --git a/include/net/tls.h b/include/net/tls.h index 857340338b69..c7bcdb3afad7 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -451,25 +451,26 @@ static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)  /* Log all TLS record header TCP sequences in [seq, seq+len] */  static inline void -tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len) +tls_offload_rx_resync_async_request_start(struct tls_offload_resync_async *resync_async, +					  __be32 seq, u16 len)  { -	struct tls_context *tls_ctx = tls_get_ctx(sk); -	struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); - -	atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) | +	atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) |  		     ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC); -	rx_ctx->resync_async->loglen = 0; -	rx_ctx->resync_async->rcd_delta = 0; +	resync_async->loglen = 0; +	resync_async->rcd_delta = 0;  }  static inline void -tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq) +tls_offload_rx_resync_async_request_end(struct tls_offload_resync_async *resync_async, +					__be32 seq)  { -	struct tls_context *tls_ctx = tls_get_ctx(sk); -	struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); +	atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) | RESYNC_REQ); +} -	atomic64_set(&rx_ctx->resync_async->req, -		     ((u64)ntohl(seq) << 32) | RESYNC_REQ); +static inline void +tls_offload_rx_resync_async_request_cancel(struct tls_offload_resync_async *resync_async) +{ +	atomic64_set(&resync_async->req, 0);  }  static inline void diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 6d6500148c4b..993008cdea65 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -252,8 +252,8 @@ struct scsi_device {  	unsigned int queue_stopped;	/* request queue is quiesced */  	bool offline_already;		/* Device offline message logged */ -	unsigned int ua_new_media_ctr;	/* Counter for New Media UNIT ATTENTIONs */ -	unsigned int ua_por_ctr;	/* Counter for Power On / Reset UAs */ +	atomic_t ua_new_media_ctr;	/* Counter for New Media UNIT ATTENTIONs */ +	atomic_t ua_por_ctr;		/* Counter for Power On / Reset UAs */  	atomic_t disk_events_disable_depth; /* disable depth for disk events */ @@ -693,10 +693,8 @@ static inline int scsi_device_busy(struct scsi_device *sdev)  }  /* Macros to access the UNIT ATTENTION counters */ -#define scsi_get_ua_new_media_ctr(sdev) \ -	((const unsigned int)(sdev->ua_new_media_ctr)) -#define scsi_get_ua_por_ctr(sdev) \ -	((const unsigned int)(sdev->ua_por_ctr)) +#define scsi_get_ua_new_media_ctr(sdev)	atomic_read(&sdev->ua_new_media_ctr) +#define scsi_get_ua_por_ctr(sdev)	atomic_read(&sdev->ua_por_ctr)  #define MODULE_ALIAS_SCSI_DEVICE(type) \  	MODULE_ALIAS("scsi:t-" __stringify(type) "*") diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index 9d2c36c6a0ed..6757233bd064 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h @@ -218,6 +218,9 @@ TRACE_EVENT(tcp_rcvbuf_grow,  		__field(__u32, space)  		__field(__u32, ooo_space)  		__field(__u32, rcvbuf) +		__field(__u32, rcv_ssthresh) +		__field(__u32, window_clamp) +		__field(__u32, rcv_wnd)  		__field(__u8, scaling_ratio)  		__field(__u16, sport)  		__field(__u16, dport) @@ -245,6 +248,9 @@ TRACE_EVENT(tcp_rcvbuf_grow,  				     tp->rcv_nxt;  		__entry->rcvbuf = sk->sk_rcvbuf; +		__entry->rcv_ssthresh = tp->rcv_ssthresh; +		__entry->window_clamp = tp->window_clamp; +		__entry->rcv_wnd = tp->rcv_wnd;  		__entry->scaling_ratio = tp->scaling_ratio;  		__entry->sport = ntohs(inet->inet_sport);  		__entry->dport = ntohs(inet->inet_dport); @@ -264,11 +270,14 @@ TRACE_EVENT(tcp_rcvbuf_grow,  	),  	TP_printk("time=%u rtt_us=%u copied=%u inq=%u space=%u ooo=%u scaling_ratio=%u rcvbuf=%u " +		  "rcv_ssthresh=%u window_clamp=%u rcv_wnd=%u "  		  "family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 "  		  "saddrv6=%pI6c daddrv6=%pI6c skaddr=%p sock_cookie=%llx",  		  __entry->time, __entry->rtt_us, __entry->copied,  		  __entry->inq, __entry->space, __entry->ooo_space,  		  __entry->scaling_ratio, __entry->rcvbuf, +		  __entry->rcv_ssthresh, __entry->window_clamp, +		  __entry->rcv_wnd,  		  show_family_name(__entry->family),  		  __entry->sport, __entry->dport,  		  __entry->saddr, __entry->daddr, diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h index cde8f173f566..22acaaec7b1c 100644 --- a/include/uapi/linux/fb.h +++ b/include/uapi/linux/fb.h @@ -319,7 +319,7 @@ enum {  #define FB_VBLANK_HAVE_VCOUNT	0x020	/* the vcount field is valid */  #define FB_VBLANK_HAVE_HCOUNT	0x040	/* the hcount field is valid */  #define FB_VBLANK_VSYNCING	0x080	/* currently in a vsync */ -#define FB_VBLANK_HAVE_VSYNC	0x100	/* verical syncs can be detected */ +#define FB_VBLANK_HAVE_VSYNC	0x100	/* vertical syncs can be detected */  struct fb_vblank {  	__u32 flags;			/* FB_VBLANK flags */ diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 8eb117c52817..eb25e70e0bdc 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -4345,6 +4345,7 @@ BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLE  BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)  BTF_ID_FLAGS(func, bpf_local_irq_save)  BTF_ID_FLAGS(func, bpf_local_irq_restore) +#ifdef CONFIG_BPF_EVENTS  BTF_ID_FLAGS(func, bpf_probe_read_user_dynptr)  BTF_ID_FLAGS(func, bpf_probe_read_kernel_dynptr)  BTF_ID_FLAGS(func, bpf_probe_read_user_str_dynptr) @@ -4353,6 +4354,7 @@ BTF_ID_FLAGS(func, bpf_copy_from_user_dynptr, KF_SLEEPABLE)  BTF_ID_FLAGS(func, bpf_copy_from_user_str_dynptr, KF_SLEEPABLE)  BTF_ID_FLAGS(func, bpf_copy_from_user_task_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS)  BTF_ID_FLAGS(func, bpf_copy_from_user_task_str_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS) +#endif  #ifdef CONFIG_DMA_SHARED_BUFFER  BTF_ID_FLAGS(func, bpf_iter_dmabuf_new, KF_ITER_NEW | KF_SLEEPABLE)  BTF_ID_FLAGS(func, bpf_iter_dmabuf_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE) diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c index 719d73299397..d706c4b7f532 100644 --- a/kernel/bpf/ringbuf.c +++ b/kernel/bpf/ringbuf.c @@ -216,6 +216,8 @@ static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)  static void bpf_ringbuf_free(struct bpf_ringbuf *rb)  { +	irq_work_sync(&rb->work); +  	/* copy pages pointer and nr_pages to local variable, as we are going  	 * to unmap rb itself with vunmap() below  	 */ diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 14e85ff23551..53166ef86ba4 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -706,7 +706,6 @@ static void power_down(void)  #ifdef CONFIG_SUSPEND  	if (hibernation_mode == HIBERNATION_SUSPEND) { -		pm_restore_gfp_mask();  		error = suspend_devices_and_enter(mem_sleep_current);  		if (!error)  			goto exit; @@ -746,9 +745,6 @@ static void power_down(void)  		cpu_relax();  exit: -	/* Match the pm_restore_gfp_mask() call in hibernate(). */ -	pm_restrict_gfp_mask(); -  	/* Restore swap signature. */  	error = swsusp_unmark();  	if (error) diff --git a/kernel/power/main.c b/kernel/power/main.c index 3cf2d7e72567..549f51ca3a1e 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -31,23 +31,35 @@   * held, unless the suspend/hibernate code is guaranteed not to run in parallel   * with that modification).   */ +static unsigned int saved_gfp_count;  static gfp_t saved_gfp_mask;  void pm_restore_gfp_mask(void)  {  	WARN_ON(!mutex_is_locked(&system_transition_mutex)); -	if (saved_gfp_mask) { -		gfp_allowed_mask = saved_gfp_mask; -		saved_gfp_mask = 0; -	} + +	if (WARN_ON(!saved_gfp_count) || --saved_gfp_count) +		return; + +	gfp_allowed_mask = saved_gfp_mask; +	saved_gfp_mask = 0; + +	pm_pr_dbg("GFP mask restored\n");  }  void pm_restrict_gfp_mask(void)  {  	WARN_ON(!mutex_is_locked(&system_transition_mutex)); -	WARN_ON(saved_gfp_mask); + +	if (saved_gfp_count++) { +		WARN_ON((saved_gfp_mask & ~(__GFP_IO | __GFP_FS)) != gfp_allowed_mask); +		return; +	} +  	saved_gfp_mask = gfp_allowed_mask;  	gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); + +	pm_pr_dbg("GFP mask restricted\n");  }  unsigned int lock_system_sleep(void) diff --git a/kernel/power/process.c b/kernel/power/process.c index 8ff68ebaa1e0..dc0dfc349f22 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -132,6 +132,7 @@ int freeze_processes(void)  	if (!pm_freezing)  		static_branch_inc(&freezer_active); +	pm_wakeup_clear(0);  	pm_freezing = true;  	error = try_to_freeze_tasks(true);  	if (!error) diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 4bb4686c1c08..b4ca17c2fecf 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -595,7 +595,6 @@ static int enter_state(suspend_state_t state)  	}  	pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]); -	pm_wakeup_clear(0);  	pm_suspend_clear_flags();  	error = suspend_prepare(state);  	if (error) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 2b0e88206d07..ecb251e883ea 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -67,8 +67,19 @@ static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;  static struct delayed_work scx_watchdog_work; -/* for %SCX_KICK_WAIT */ -static unsigned long __percpu *scx_kick_cpus_pnt_seqs; +/* + * For %SCX_KICK_WAIT: Each CPU has a pointer to an array of pick_task sequence + * numbers. The arrays are allocated with kvzalloc() as size can exceed percpu + * allocator limits on large machines. O(nr_cpu_ids^2) allocation, allocated + * lazily when enabling and freed when disabling to avoid waste when sched_ext + * isn't active. + */ +struct scx_kick_pseqs { +	struct rcu_head		rcu; +	unsigned long		seqs[]; +}; + +static DEFINE_PER_CPU(struct scx_kick_pseqs __rcu *, scx_kick_pseqs);  /*   * Direct dispatch marker. @@ -780,13 +791,23 @@ static void schedule_deferred(struct rq *rq)  	if (rq->scx.flags & SCX_RQ_IN_WAKEUP)  		return; +	/* Don't do anything if there already is a deferred operation. */ +	if (rq->scx.flags & SCX_RQ_BAL_CB_PENDING) +		return; +  	/*  	 * If in balance, the balance callbacks will be called before rq lock is  	 * released. Schedule one. +	 * +	 * +	 * We can't directly insert the callback into the +	 * rq's list: The call can drop its lock and make the pending balance +	 * callback visible to unrelated code paths that call rq_pin_lock(). +	 * +	 * Just let balance_one() know that it must do it itself.  	 */  	if (rq->scx.flags & SCX_RQ_IN_BALANCE) { -		queue_balance_callback(rq, &rq->scx.deferred_bal_cb, -				       deferred_bal_cb_workfn); +		rq->scx.flags |= SCX_RQ_BAL_CB_PENDING;  		return;  	} @@ -2003,6 +2024,19 @@ static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq)  	dspc->cursor = 0;  } +static inline void maybe_queue_balance_callback(struct rq *rq) +{ +	lockdep_assert_rq_held(rq); + +	if (!(rq->scx.flags & SCX_RQ_BAL_CB_PENDING)) +		return; + +	queue_balance_callback(rq, &rq->scx.deferred_bal_cb, +				deferred_bal_cb_workfn); + +	rq->scx.flags &= ~SCX_RQ_BAL_CB_PENDING; +} +  static int balance_one(struct rq *rq, struct task_struct *prev)  {  	struct scx_sched *sch = scx_root; @@ -2150,6 +2184,8 @@ static int balance_scx(struct rq *rq, struct task_struct *prev,  #endif  	rq_repin_lock(rq, rf); +	maybe_queue_balance_callback(rq); +  	return ret;  } @@ -3471,7 +3507,9 @@ static void scx_sched_free_rcu_work(struct work_struct *work)  	struct scx_dispatch_q *dsq;  	int node; +	irq_work_sync(&sch->error_irq_work);  	kthread_stop(sch->helper->task); +  	free_percpu(sch->pcpu);  	for_each_node_state(node, N_POSSIBLE) @@ -3850,6 +3888,27 @@ static const char *scx_exit_reason(enum scx_exit_kind kind)  	}  } +static void free_kick_pseqs_rcu(struct rcu_head *rcu) +{ +	struct scx_kick_pseqs *pseqs = container_of(rcu, struct scx_kick_pseqs, rcu); + +	kvfree(pseqs); +} + +static void free_kick_pseqs(void) +{ +	int cpu; + +	for_each_possible_cpu(cpu) { +		struct scx_kick_pseqs **pseqs = per_cpu_ptr(&scx_kick_pseqs, cpu); +		struct scx_kick_pseqs *to_free; + +		to_free = rcu_replace_pointer(*pseqs, NULL, true); +		if (to_free) +			call_rcu(&to_free->rcu, free_kick_pseqs_rcu); +	} +} +  static void scx_disable_workfn(struct kthread_work *work)  {  	struct scx_sched *sch = container_of(work, struct scx_sched, disable_work); @@ -3986,6 +4045,7 @@ static void scx_disable_workfn(struct kthread_work *work)  	free_percpu(scx_dsp_ctx);  	scx_dsp_ctx = NULL;  	scx_dsp_max_batch = 0; +	free_kick_pseqs();  	mutex_unlock(&scx_enable_mutex); @@ -4348,6 +4408,33 @@ static void scx_vexit(struct scx_sched *sch,  	irq_work_queue(&sch->error_irq_work);  } +static int alloc_kick_pseqs(void) +{ +	int cpu; + +	/* +	 * Allocate per-CPU arrays sized by nr_cpu_ids. Use kvzalloc as size +	 * can exceed percpu allocator limits on large machines. +	 */ +	for_each_possible_cpu(cpu) { +		struct scx_kick_pseqs **pseqs = per_cpu_ptr(&scx_kick_pseqs, cpu); +		struct scx_kick_pseqs *new_pseqs; + +		WARN_ON_ONCE(rcu_access_pointer(*pseqs)); + +		new_pseqs = kvzalloc_node(struct_size(new_pseqs, seqs, nr_cpu_ids), +					  GFP_KERNEL, cpu_to_node(cpu)); +		if (!new_pseqs) { +			free_kick_pseqs(); +			return -ENOMEM; +		} + +		rcu_assign_pointer(*pseqs, new_pseqs); +	} + +	return 0; +} +  static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops)  {  	struct scx_sched *sch; @@ -4495,10 +4582,14 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)  		goto err_unlock;  	} +	ret = alloc_kick_pseqs(); +	if (ret) +		goto err_unlock; +  	sch = scx_alloc_and_add_sched(ops);  	if (IS_ERR(sch)) {  		ret = PTR_ERR(sch); -		goto err_unlock; +		goto err_free_pseqs;  	}  	/* @@ -4701,6 +4792,8 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)  	return 0; +err_free_pseqs: +	free_kick_pseqs();  err_unlock:  	mutex_unlock(&scx_enable_mutex);  	return ret; @@ -5082,10 +5175,18 @@ static void kick_cpus_irq_workfn(struct irq_work *irq_work)  {  	struct rq *this_rq = this_rq();  	struct scx_rq *this_scx = &this_rq->scx; -	unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs); +	struct scx_kick_pseqs __rcu *pseqs_pcpu = __this_cpu_read(scx_kick_pseqs);  	bool should_wait = false; +	unsigned long *pseqs;  	s32 cpu; +	if (unlikely(!pseqs_pcpu)) { +		pr_warn_once("kick_cpus_irq_workfn() called with NULL scx_kick_pseqs"); +		return; +	} + +	pseqs = rcu_dereference_bh(pseqs_pcpu)->seqs; +  	for_each_cpu(cpu, this_scx->cpus_to_kick) {  		should_wait |= kick_one_cpu(cpu, this_rq, pseqs);  		cpumask_clear_cpu(cpu, this_scx->cpus_to_kick); @@ -5208,11 +5309,6 @@ void __init init_sched_ext_class(void)  	scx_idle_init_masks(); -	scx_kick_cpus_pnt_seqs = -		__alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids, -			       __alignof__(scx_kick_cpus_pnt_seqs[0])); -	BUG_ON(!scx_kick_cpus_pnt_seqs); -  	for_each_possible_cpu(cpu) {  		struct rq *rq = cpu_rq(cpu);  		int  n = cpu_to_node(cpu); @@ -5688,8 +5784,8 @@ BTF_KFUNCS_START(scx_kfunc_ids_dispatch)  BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)  BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)  BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local) -BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice) -BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime) +BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU) +BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU)  BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)  BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)  BTF_KFUNCS_END(scx_kfunc_ids_dispatch) @@ -5820,8 +5916,8 @@ __bpf_kfunc_end_defs();  BTF_KFUNCS_START(scx_kfunc_ids_unlocked)  BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE) -BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice) -BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime) +BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU) +BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU)  BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)  BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)  BTF_KFUNCS_END(scx_kfunc_ids_unlocked) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 361f9101cef9..adfb6e3409d7 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -784,6 +784,7 @@ enum scx_rq_flags {  	SCX_RQ_BAL_KEEP		= 1 << 3, /* balance decided to keep current */  	SCX_RQ_BYPASSING	= 1 << 4,  	SCX_RQ_CLK_VALID	= 1 << 5, /* RQ clock is fresh and valid */ +	SCX_RQ_BAL_CB_PENDING	= 1 << 6, /* must queue a cb after dispatching */  	SCX_RQ_IN_WAKEUP	= 1 << 16,  	SCX_RQ_IN_BALANCE	= 1 << 17, diff --git a/lib/Kconfig.kmsan b/lib/Kconfig.kmsan index 7251b6b59e69..cae1ddcc18e1 100644 --- a/lib/Kconfig.kmsan +++ b/lib/Kconfig.kmsan @@ -3,7 +3,7 @@ config HAVE_ARCH_KMSAN  	bool  config HAVE_KMSAN_COMPILER -	def_bool CC_IS_CLANG +	def_bool $(cc-option,-fsanitize=kernel-memory)  config KMSAN  	bool "KMSAN: detector of uninitialized values use" diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index 8c01eabd4eaf..63130a48e237 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -739,7 +739,7 @@ static struct kunit_case kunit_current_test_cases[] = {  static void test_dev_action(void *priv)  { -	*(void **)priv = (void *)1; +	*(long *)priv = 1;  }  static void kunit_device_test(struct kunit *test) diff --git a/lib/kunit/test.c b/lib/kunit/test.c index bb66ea1a3eac..62eb529824c6 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -745,7 +745,8 @@ int kunit_run_tests(struct kunit_suite *suite)  					.param_index = ++test.param_index,  					.parent = &test,  				}; -				kunit_init_test(¶m_test, test_case->name, test_case->log); +				kunit_init_test(¶m_test, test_case->name, NULL); +				param_test.log = test_case->log;  				kunit_run_case_catch_errors(suite, test_case, ¶m_test);  				if (param_desc[0] == '\0') { diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index c84420cb410d..a662408ad867 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -763,11 +763,16 @@ int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb)  	bat_priv = netdev_priv(mesh_iface);  	primary_if = batadv_primary_if_get_selected(bat_priv); -	if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { +	if (!primary_if) {  		ret = -ENOENT;  		goto out_put_mesh_iface;  	} +	if (primary_if->if_status != BATADV_IF_ACTIVE) { +		ret = -ENOENT; +		goto out_put_primary_if; +	} +  	hard_iface = batadv_netlink_get_hardif(bat_priv, cb);  	if (IS_ERR(hard_iface) && PTR_ERR(hard_iface) != -ENONET) {  		ret = PTR_ERR(hard_iface); @@ -1327,11 +1332,16 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)  	bat_priv = netdev_priv(mesh_iface);  	primary_if = batadv_primary_if_get_selected(bat_priv); -	if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { +	if (!primary_if) {  		ret = -ENOENT;  		goto out_put_mesh_iface;  	} +	if (primary_if->if_status != BATADV_IF_ACTIVE) { +		ret = -ENOENT; +		goto out_put_primary_if; +	} +  	hard_iface = batadv_netlink_get_hardif(bat_priv, cb);  	if (IS_ERR(hard_iface) && PTR_ERR(hard_iface) != -ENONET) {  		ret = PTR_ERR(hard_iface); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 111f0e37b672..c5dedf39a129 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -843,6 +843,13 @@ static void bis_cleanup(struct hci_conn *conn)  		if (bis)  			return; +		bis = hci_conn_hash_lookup_big_state(hdev, +						     conn->iso_qos.bcast.big, +						     BT_OPEN, +						     HCI_ROLE_MASTER); +		if (bis) +			return; +  		hci_le_terminate_big(hdev, conn);  	} else {  		hci_le_big_terminate(hdev, conn->iso_qos.bcast.big, diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index d790b0d4eb9a..d37db364acf7 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1607,8 +1607,10 @@ static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,  		hci_dev_set_flag(hdev, HCI_LE_ADV); -		if (adv && !adv->periodic) +		if (adv)  			adv->enabled = true; +		else if (!set->handle) +			hci_dev_set_flag(hdev, HCI_LE_ADV_0);  		conn = hci_lookup_le_connect(hdev);  		if (conn) @@ -1619,6 +1621,8 @@ static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,  		if (cp->num_of_sets) {  			if (adv)  				adv->enabled = false; +			else if (!set->handle) +				hci_dev_clear_flag(hdev, HCI_LE_ADV_0);  			/* If just one instance was disabled check if there are  			 * any other instance enabled before clearing HCI_LE_ADV @@ -3959,8 +3963,11 @@ static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,  		hci_dev_set_flag(hdev, HCI_LE_PER_ADV);  		if (adv) -			adv->enabled = true; +			adv->periodic_enabled = true;  	} else { +		if (adv) +			adv->periodic_enabled = false; +  		/* If just one instance was disabled check if there are  		 * any other instance enabled before clearing HCI_LE_PER_ADV.  		 * The current periodic adv instance will be marked as diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index eefdb6134ca5..73fc41b68b68 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -863,11 +863,17 @@ bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,  {  	struct hci_cmd_sync_work_entry *entry; -	entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy); -	if (!entry) +	mutex_lock(&hdev->cmd_sync_work_lock); + +	entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy); +	if (!entry) { +		mutex_unlock(&hdev->cmd_sync_work_lock);  		return false; +	} -	hci_cmd_sync_cancel_entry(hdev, entry); +	_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); + +	mutex_unlock(&hdev->cmd_sync_work_lock);  	return true;  } @@ -1601,7 +1607,7 @@ int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance)  	/* If periodic advertising already disabled there is nothing to do. */  	adv = hci_find_adv_instance(hdev, instance); -	if (!adv || !adv->periodic || !adv->enabled) +	if (!adv || !adv->periodic_enabled)  		return 0;  	memset(&cp, 0, sizeof(cp)); @@ -1666,7 +1672,7 @@ static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance)  	/* If periodic advertising already enabled there is nothing to do. */  	adv = hci_find_adv_instance(hdev, instance); -	if (adv && adv->periodic && adv->enabled) +	if (adv && adv->periodic_enabled)  		return 0;  	memset(&cp, 0, sizeof(cp)); @@ -2600,9 +2606,8 @@ static int hci_resume_advertising_sync(struct hci_dev *hdev)  		/* If current advertising instance is set to instance 0x00  		 * then we need to re-enable it.  		 */ -		if (!hdev->cur_adv_instance) -			err = hci_enable_ext_advertising_sync(hdev, -							      hdev->cur_adv_instance); +		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_ADV_0)) +			err = hci_enable_ext_advertising_sync(hdev, 0x00);  	} else {  		/* Schedule for most recent instance to be restarted and begin  		 * the software rotation loop diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c index 9b263d061e05..3d98cb6291da 100644 --- a/net/bluetooth/iso.c +++ b/net/bluetooth/iso.c @@ -2032,7 +2032,7 @@ static void iso_conn_ready(struct iso_conn *conn)  		 */  		if (!bacmp(&hcon->dst, BDADDR_ANY)) {  			bacpy(&hcon->dst, &iso_pi(parent)->dst); -			hcon->dst_type = iso_pi(parent)->dst_type; +			hcon->dst_type = le_addr_type(iso_pi(parent)->dst_type);  		}  		if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) { @@ -2046,7 +2046,13 @@ static void iso_conn_ready(struct iso_conn *conn)  		}  		bacpy(&iso_pi(sk)->dst, &hcon->dst); -		iso_pi(sk)->dst_type = hcon->dst_type; + +		/* Convert from HCI to three-value type */ +		if (hcon->dst_type == ADDR_LE_DEV_PUBLIC) +			iso_pi(sk)->dst_type = BDADDR_LE_PUBLIC; +		else +			iso_pi(sk)->dst_type = BDADDR_LE_RANDOM; +  		iso_pi(sk)->sync_handle = iso_pi(parent)->sync_handle;  		memcpy(iso_pi(sk)->base, iso_pi(parent)->base, iso_pi(parent)->base_len);  		iso_pi(sk)->base_len = iso_pi(parent)->base_len; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 805c752ac0a9..d08320380ad6 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -282,7 +282,7 @@ static void __set_retrans_timer(struct l2cap_chan *chan)  	if (!delayed_work_pending(&chan->monitor_timer) &&  	    chan->retrans_timeout) {  		l2cap_set_timer(chan, &chan->retrans_timer, -				secs_to_jiffies(chan->retrans_timeout)); +				msecs_to_jiffies(chan->retrans_timeout));  	}  } @@ -291,7 +291,7 @@ static void __set_monitor_timer(struct l2cap_chan *chan)  	__clear_retrans_timer(chan);  	if (chan->monitor_timeout) {  		l2cap_set_timer(chan, &chan->monitor_timer, -				secs_to_jiffies(chan->monitor_timeout)); +				msecs_to_jiffies(chan->monitor_timeout));  	}  } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index a3d16eece0d2..24e335e3a727 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -2175,19 +2175,24 @@ static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)  	sk = cmd->sk;  	if (status) { +		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, +				status);  		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,  				     cmd_status_rsp, &status); -		return; +		goto done;  	} -	mgmt_pending_remove(cmd);  	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0); + +done: +	mgmt_pending_free(cmd);  }  static int set_mesh_sync(struct hci_dev *hdev, void *data)  {  	struct mgmt_pending_cmd *cmd = data; -	struct mgmt_cp_set_mesh cp; +	DEFINE_FLEX(struct mgmt_cp_set_mesh, cp, ad_types, num_ad_types, +		    sizeof(hdev->mesh_ad_types));  	size_t len;  	mutex_lock(&hdev->mgmt_pending_lock); @@ -2197,27 +2202,26 @@ static int set_mesh_sync(struct hci_dev *hdev, void *data)  		return -ECANCELED;  	} -	memcpy(&cp, cmd->param, sizeof(cp)); +	len = cmd->param_len; +	memcpy(cp, cmd->param, min(__struct_size(cp), len));  	mutex_unlock(&hdev->mgmt_pending_lock); -	len = cmd->param_len; -  	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types)); -	if (cp.enable) +	if (cp->enable)  		hci_dev_set_flag(hdev, HCI_MESH);  	else  		hci_dev_clear_flag(hdev, HCI_MESH); -	hdev->le_scan_interval = __le16_to_cpu(cp.period); -	hdev->le_scan_window = __le16_to_cpu(cp.window); +	hdev->le_scan_interval = __le16_to_cpu(cp->period); +	hdev->le_scan_window = __le16_to_cpu(cp->window); -	len -= sizeof(cp); +	len -= sizeof(struct mgmt_cp_set_mesh);  	/* If filters don't fit, forward all adv pkts */  	if (len <= sizeof(hdev->mesh_ad_types)) -		memcpy(hdev->mesh_ad_types, cp.ad_types, len); +		memcpy(hdev->mesh_ad_types, cp->ad_types, len);  	hci_update_passive_scan_sync(hdev);  	return 0; diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 376ce6de84be..b783526ab588 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -643,8 +643,8 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)  		tty_port_tty_hangup(&dev->port, true);  	dev->modem_status = -		((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) | -		((v24_sig & RFCOMM_V24_RTR) ? (TIOCM_RTS | TIOCM_CTS) : 0) | +		((v24_sig & RFCOMM_V24_RTC) ? TIOCM_DSR : 0) | +		((v24_sig & RFCOMM_V24_RTR) ? TIOCM_CTS : 0) |  		((v24_sig & RFCOMM_V24_IC)  ? TIOCM_RI : 0) |  		((v24_sig & RFCOMM_V24_DV)  ? TIOCM_CD : 0);  } @@ -1055,10 +1055,14 @@ static void rfcomm_tty_hangup(struct tty_struct *tty)  static int rfcomm_tty_tiocmget(struct tty_struct *tty)  {  	struct rfcomm_dev *dev = tty->driver_data; +	struct rfcomm_dlc *dlc = dev->dlc; +	u8 v24_sig;  	BT_DBG("tty %p dev %p", tty, dev); -	return dev->modem_status; +	rfcomm_dlc_get_modem_status(dlc, &v24_sig); + +	return (v24_sig & (TIOCM_DTR | TIOCM_RTS)) | dev->modem_status;  }  static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) @@ -1071,23 +1075,15 @@ static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigne  	rfcomm_dlc_get_modem_status(dlc, &v24_sig); -	if (set & TIOCM_DSR || set & TIOCM_DTR) +	if (set & TIOCM_DTR)  		v24_sig |= RFCOMM_V24_RTC; -	if (set & TIOCM_RTS || set & TIOCM_CTS) +	if (set & TIOCM_RTS)  		v24_sig |= RFCOMM_V24_RTR; -	if (set & TIOCM_RI) -		v24_sig |= RFCOMM_V24_IC; -	if (set & TIOCM_CD) -		v24_sig |= RFCOMM_V24_DV; -	if (clear & TIOCM_DSR || clear & TIOCM_DTR) +	if (clear & TIOCM_DTR)  		v24_sig &= ~RFCOMM_V24_RTC; -	if (clear & TIOCM_RTS || clear & TIOCM_CTS) +	if (clear & TIOCM_RTS)  		v24_sig &= ~RFCOMM_V24_RTR; -	if (clear & TIOCM_RI) -		v24_sig &= ~RFCOMM_V24_IC; -	if (clear & TIOCM_CD) -		v24_sig &= ~RFCOMM_V24_DV;  	rfcomm_dlc_set_modem_status(dlc, v24_sig); diff --git a/net/core/devmem.c b/net/core/devmem.c index d9de31a6cc7f..1d04754bc756 100644 --- a/net/core/devmem.c +++ b/net/core/devmem.c @@ -17,6 +17,7 @@  #include <net/page_pool/helpers.h>  #include <net/page_pool/memory_provider.h>  #include <net/sock.h> +#include <net/tcp.h>  #include <trace/events/page_pool.h>  #include "devmem.h" @@ -357,7 +358,8 @@ struct net_devmem_dmabuf_binding *net_devmem_get_binding(struct sock *sk,  							 unsigned int dmabuf_id)  {  	struct net_devmem_dmabuf_binding *binding; -	struct dst_entry *dst = __sk_dst_get(sk); +	struct net_device *dst_dev; +	struct dst_entry *dst;  	int err = 0;  	binding = net_devmem_lookup_dmabuf(dmabuf_id); @@ -366,16 +368,35 @@ struct net_devmem_dmabuf_binding *net_devmem_get_binding(struct sock *sk,  		goto out_err;  	} +	rcu_read_lock(); +	dst = __sk_dst_get(sk); +	/* If dst is NULL (route expired), attempt to rebuild it. */ +	if (unlikely(!dst)) { +		if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) { +			err = -EHOSTUNREACH; +			goto out_unlock; +		} +		dst = __sk_dst_get(sk); +		if (unlikely(!dst)) { +			err = -ENODEV; +			goto out_unlock; +		} +	} +  	/* The dma-addrs in this binding are only reachable to the corresponding  	 * net_device.  	 */ -	if (!dst || !dst->dev || dst->dev->ifindex != binding->dev->ifindex) { +	dst_dev = dst_dev_rcu(dst); +	if (unlikely(!dst_dev) || unlikely(dst_dev != binding->dev)) {  		err = -ENODEV; -		goto out_err; +		goto out_unlock;  	} +	rcu_read_unlock();  	return binding; +out_unlock: +	rcu_read_unlock();  out_err:  	if (binding)  		net_devmem_dmabuf_binding_put(binding); diff --git a/net/core/filter.c b/net/core/filter.c index 76628df1fc82..fa06c5a08e22 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3877,7 +3877,8 @@ static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,  	u32 new_len = skb->len + head_room;  	int ret; -	if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) || +	if (unlikely(flags || (int)head_room < 0 || +		     (!skb_is_gso(skb) && new_len > max_len) ||  		     new_len < skb->len))  		return -EINVAL; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 31ea5af49f2d..e4a979b75cc6 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -891,18 +891,27 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,  	}  } -void tcp_rcvbuf_grow(struct sock *sk) +void tcp_rcvbuf_grow(struct sock *sk, u32 newval)  {  	const struct net *net = sock_net(sk);  	struct tcp_sock *tp = tcp_sk(sk); -	int rcvwin, rcvbuf, cap; +	u32 rcvwin, rcvbuf, cap, oldval; +	u64 grow; + +	oldval = tp->rcvq_space.space; +	tp->rcvq_space.space = newval;  	if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) ||  	    (sk->sk_userlocks & SOCK_RCVBUF_LOCK))  		return; +	/* DRS is always one RTT late. */ +	rcvwin = newval << 1; +  	/* slow start: allow the sender to double its rate. */ -	rcvwin = tp->rcvq_space.space << 1; +	grow = (u64)rcvwin * (newval - oldval); +	do_div(grow, oldval); +	rcvwin += grow << 1;  	if (!RB_EMPTY_ROOT(&tp->out_of_order_queue))  		rcvwin += TCP_SKB_CB(tp->ooo_last_skb)->end_seq - tp->rcv_nxt; @@ -943,9 +952,7 @@ void tcp_rcv_space_adjust(struct sock *sk)  	trace_tcp_rcvbuf_grow(sk, time); -	tp->rcvq_space.space = copied; - -	tcp_rcvbuf_grow(sk); +	tcp_rcvbuf_grow(sk, copied);  new_measure:  	tp->rcvq_space.seq = tp->copied_seq; @@ -5270,7 +5277,7 @@ end:  	}  	/* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */  	if (sk->sk_socket) -		tcp_rcvbuf_grow(sk); +		tcp_rcvbuf_grow(sk, tp->rcvq_space.space);  }  static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index d9aca1c3c097..c52b0456039d 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1876,6 +1876,9 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,  	link_conf->nontransmitted = false;  	link_conf->ema_ap = false;  	link_conf->bssid_indicator = 0; +	link_conf->fils_discovery.min_interval = 0; +	link_conf->fils_discovery.max_interval = 0; +	link_conf->unsol_bcast_probe_resp_interval = 0;  	__sta_info_flush(sdata, true, link_id, NULL); diff --git a/net/mac80211/key.c b/net/mac80211/key.c index b14e9cd9713f..d5da7ccea66e 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -508,11 +508,16 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,  				ret = ieee80211_key_enable_hw_accel(new);  		}  	} else { -		if (!new->local->wowlan) +		if (!new->local->wowlan) {  			ret = ieee80211_key_enable_hw_accel(new); -		else if (link_id < 0 || !sdata->vif.active_links || -			 BIT(link_id) & sdata->vif.active_links) +		} else if (link_id < 0 || !sdata->vif.active_links || +			 BIT(link_id) & sdata->vif.active_links) {  			new->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; +			if (!(new->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | +						 IEEE80211_KEY_FLAG_PUT_MIC_SPACE | +						 IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) +				decrease_tailroom_need_count(sdata, 1); +		}  	}  	if (ret) diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c index 6003e47c770a..171643815076 100644 --- a/net/mptcp/mib.c +++ b/net/mptcp/mib.c @@ -85,6 +85,7 @@ static const struct snmp_mib mptcp_snmp_list[] = {  	SNMP_MIB_ITEM("DssFallback", MPTCP_MIB_DSSFALLBACK),  	SNMP_MIB_ITEM("SimultConnectFallback", MPTCP_MIB_SIMULTCONNFALLBACK),  	SNMP_MIB_ITEM("FallbackFailed", MPTCP_MIB_FALLBACKFAILED), +	SNMP_MIB_ITEM("WinProbe", MPTCP_MIB_WINPROBE),  };  /* mptcp_mib_alloc - allocate percpu mib counters diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h index 309bac6fea32..a1d3e9369fbb 100644 --- a/net/mptcp/mib.h +++ b/net/mptcp/mib.h @@ -88,6 +88,7 @@ enum linux_mptcp_mib_field {  	MPTCP_MIB_DSSFALLBACK,		/* Bad or missing DSS */  	MPTCP_MIB_SIMULTCONNFALLBACK,	/* Simultaneous connect */  	MPTCP_MIB_FALLBACKFAILED,	/* Can't fallback due to msk status */ +	MPTCP_MIB_WINPROBE,		/* MPTCP-level zero window probe */  	__MPTCP_MIB_MAX  }; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 0292162a14ee..2d6b8de35c44 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -194,17 +194,26 @@ static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,   * - mptcp does not maintain a msk-level window clamp   * - returns true when  the receive buffer is actually updated   */ -static bool mptcp_rcvbuf_grow(struct sock *sk) +static bool mptcp_rcvbuf_grow(struct sock *sk, u32 newval)  {  	struct mptcp_sock *msk = mptcp_sk(sk);  	const struct net *net = sock_net(sk); -	int rcvwin, rcvbuf, cap; +	u32 rcvwin, rcvbuf, cap, oldval; +	u64 grow; +	oldval = msk->rcvq_space.space; +	msk->rcvq_space.space = newval;  	if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) ||  	    (sk->sk_userlocks & SOCK_RCVBUF_LOCK))  		return false; -	rcvwin = msk->rcvq_space.space << 1; +	/* DRS is always one RTT late. */ +	rcvwin = newval << 1; + +	/* slow start: allow the sender to double its rate. */ +	grow = (u64)rcvwin * (newval - oldval); +	do_div(grow, oldval); +	rcvwin += grow << 1;  	if (!RB_EMPTY_ROOT(&msk->out_of_order_queue))  		rcvwin += MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq - msk->ack_seq; @@ -334,7 +343,7 @@ end:  	skb_set_owner_r(skb, sk);  	/* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */  	if (sk->sk_socket) -		mptcp_rcvbuf_grow(sk); +		mptcp_rcvbuf_grow(sk, msk->rcvq_space.space);  }  static void mptcp_init_skb(struct sock *ssk, struct sk_buff *skb, int offset, @@ -998,7 +1007,7 @@ static void __mptcp_clean_una(struct sock *sk)  			if (WARN_ON_ONCE(!msk->recovery))  				break; -			WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); +			msk->first_pending = mptcp_send_next(sk);  		}  		dfrag_clear(sk, dfrag); @@ -1290,7 +1299,12 @@ alloc_skb:  	if (copy == 0) {  		u64 snd_una = READ_ONCE(msk->snd_una); -		if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) { +		/* No need for zero probe if there are any data pending +		 * either at the msk or ssk level; skb is the current write +		 * queue tail and can be empty at this point. +		 */ +		if (snd_una != msk->snd_nxt || skb->len || +		    skb != tcp_send_head(ssk)) {  			tcp_remove_empty_skb(ssk);  			return 0;  		} @@ -1341,6 +1355,7 @@ alloc_skb:  		 mpext->dsn64);  	if (zero_window_probe) { +		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_WINPROBE);  		mptcp_subflow_ctx(ssk)->rel_write_seq += copy;  		mpext->frozen = 1;  		if (READ_ONCE(msk->csum_enabled)) @@ -1543,7 +1558,7 @@ static int __subflow_push_pending(struct sock *sk, struct sock *ssk,  			mptcp_update_post_push(msk, dfrag, ret);  		} -		WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); +		msk->first_pending = mptcp_send_next(sk);  		if (msk->snd_burst <= 0 ||  		    !sk_stream_memory_free(ssk) || @@ -1903,7 +1918,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)  			get_page(dfrag->page);  			list_add_tail(&dfrag->list, &msk->rtx_queue);  			if (!msk->first_pending) -				WRITE_ONCE(msk->first_pending, dfrag); +				msk->first_pending = dfrag;  		}  		pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk,  			 dfrag->data_seq, dfrag->data_len, dfrag->already_sent, @@ -1936,22 +1951,36 @@ do_error:  static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied); -static int __mptcp_recvmsg_mskq(struct sock *sk, -				struct msghdr *msg, -				size_t len, int flags, +static int __mptcp_recvmsg_mskq(struct sock *sk, struct msghdr *msg, +				size_t len, int flags, int copied_total,  				struct scm_timestamping_internal *tss,  				int *cmsg_flags)  {  	struct mptcp_sock *msk = mptcp_sk(sk);  	struct sk_buff *skb, *tmp; +	int total_data_len = 0;  	int copied = 0;  	skb_queue_walk_safe(&sk->sk_receive_queue, skb, tmp) { -		u32 offset = MPTCP_SKB_CB(skb)->offset; +		u32 delta, offset = MPTCP_SKB_CB(skb)->offset;  		u32 data_len = skb->len - offset; -		u32 count = min_t(size_t, len - copied, data_len); +		u32 count;  		int err; +		if (flags & MSG_PEEK) { +			/* skip already peeked skbs */ +			if (total_data_len + data_len <= copied_total) { +				total_data_len += data_len; +				continue; +			} + +			/* skip the already peeked data in the current skb */ +			delta = copied_total - total_data_len; +			offset += delta; +			data_len -= delta; +		} + +		count = min_t(size_t, len - copied, data_len);  		if (!(flags & MSG_TRUNC)) {  			err = skb_copy_datagram_msg(skb, offset, msg, count);  			if (unlikely(err < 0)) { @@ -1968,16 +1997,14 @@ static int __mptcp_recvmsg_mskq(struct sock *sk,  		copied += count; -		if (count < data_len) { -			if (!(flags & MSG_PEEK)) { +		if (!(flags & MSG_PEEK)) { +			msk->bytes_consumed += count; +			if (count < data_len) {  				MPTCP_SKB_CB(skb)->offset += count;  				MPTCP_SKB_CB(skb)->map_seq += count; -				msk->bytes_consumed += count; +				break;  			} -			break; -		} -		if (!(flags & MSG_PEEK)) {  			/* avoid the indirect call, we know the destructor is sock_rfree */  			skb->destructor = NULL;  			skb->sk = NULL; @@ -1985,7 +2012,6 @@ static int __mptcp_recvmsg_mskq(struct sock *sk,  			sk_mem_uncharge(sk, skb->truesize);  			__skb_unlink(skb, &sk->sk_receive_queue);  			skb_attempt_defer_free(skb); -			msk->bytes_consumed += count;  		}  		if (copied >= len) @@ -2049,9 +2075,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)  	if (msk->rcvq_space.copied <= msk->rcvq_space.space)  		goto new_measure; -	msk->rcvq_space.space = msk->rcvq_space.copied; -	if (mptcp_rcvbuf_grow(sk)) { - +	if (mptcp_rcvbuf_grow(sk, msk->rcvq_space.copied)) {  		/* Make subflows follow along.  If we do not do this, we  		 * get drops at subflow level if skbs can't be moved to  		 * the mptcp rx queue fast enough (announced rcv_win can @@ -2063,8 +2087,9 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)  			ssk = mptcp_subflow_tcp_sock(subflow);  			slow = lock_sock_fast(ssk); -			tcp_sk(ssk)->rcvq_space.space = msk->rcvq_space.copied; -			tcp_rcvbuf_grow(ssk); +			/* subflows can be added before tcp_init_transfer() */ +			if (tcp_sk(ssk)->rcvq_space.space) +				tcp_rcvbuf_grow(ssk, msk->rcvq_space.copied);  			unlock_sock_fast(ssk, slow);  		}  	} @@ -2183,7 +2208,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,  	while (copied < len) {  		int err, bytes_read; -		bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags, &tss, &cmsg_flags); +		bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags, +						  copied, &tss, &cmsg_flags);  		if (unlikely(bytes_read < 0)) {  			if (!copied)  				copied = bytes_read; @@ -2874,7 +2900,7 @@ static void __mptcp_clear_xmit(struct sock *sk)  	struct mptcp_sock *msk = mptcp_sk(sk);  	struct mptcp_data_frag *dtmp, *dfrag; -	WRITE_ONCE(msk->first_pending, NULL); +	msk->first_pending = NULL;  	list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)  		dfrag_clear(sk, dfrag);  } @@ -3414,9 +3440,6 @@ void __mptcp_data_acked(struct sock *sk)  void __mptcp_check_push(struct sock *sk, struct sock *ssk)  { -	if (!mptcp_send_head(sk)) -		return; -  	if (!sock_owned_by_user(sk))  		__mptcp_subflow_push_pending(sk, ssk, false);  	else diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 52f9cfa4ce95..379a88e14e8d 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -414,7 +414,7 @@ static inline struct mptcp_data_frag *mptcp_send_head(const struct sock *sk)  {  	const struct mptcp_sock *msk = mptcp_sk(sk); -	return READ_ONCE(msk->first_pending); +	return msk->first_pending;  }  static inline struct mptcp_data_frag *mptcp_send_next(struct sock *sk) diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c index 92b984fa8175..fc35a11cdca2 100644 --- a/net/netfilter/nft_connlimit.c +++ b/net/netfilter/nft_connlimit.c @@ -48,7 +48,7 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,  		return;  	} -	count = priv->list->count; +	count = READ_ONCE(priv->list->count);  	if ((count > priv->limit) ^ priv->invert) {  		regs->verdict.code = NFT_BREAK; diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index d526e69a2a2b..6f2ae7cad731 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -22,6 +22,7 @@  #include <net/netfilter/nf_conntrack_timeout.h>  #include <net/netfilter/nf_conntrack_l4proto.h>  #include <net/netfilter/nf_conntrack_expect.h> +#include <net/netfilter/nf_conntrack_seqadj.h>  struct nft_ct_helper_obj  {  	struct nf_conntrack_helper *helper4; @@ -379,6 +380,14 @@ static bool nft_ct_tmpl_alloc_pcpu(void)  }  #endif +static void __nft_ct_get_destroy(const struct nft_ctx *ctx, struct nft_ct *priv) +{ +#ifdef CONFIG_NF_CONNTRACK_LABELS +	if (priv->key == NFT_CT_LABELS) +		nf_connlabels_put(ctx->net); +#endif +} +  static int nft_ct_get_init(const struct nft_ctx *ctx,  			   const struct nft_expr *expr,  			   const struct nlattr * const tb[]) @@ -413,6 +422,10 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,  		if (tb[NFTA_CT_DIRECTION] != NULL)  			return -EINVAL;  		len = NF_CT_LABELS_MAX_SIZE; + +		err = nf_connlabels_get(ctx->net, (len * BITS_PER_BYTE) - 1); +		if (err) +			return err;  		break;  #endif  	case NFT_CT_HELPER: @@ -494,7 +507,8 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,  		case IP_CT_DIR_REPLY:  			break;  		default: -			return -EINVAL; +			err = -EINVAL; +			goto err;  		}  	} @@ -502,11 +516,11 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,  	err = nft_parse_register_store(ctx, tb[NFTA_CT_DREG], &priv->dreg, NULL,  				       NFT_DATA_VALUE, len);  	if (err < 0) -		return err; +		goto err;  	err = nf_ct_netns_get(ctx->net, ctx->family);  	if (err < 0) -		return err; +		goto err;  	if (priv->key == NFT_CT_BYTES ||  	    priv->key == NFT_CT_PKTS  || @@ -514,6 +528,9 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,  		nf_ct_set_acct(ctx->net, true);  	return 0; +err: +	__nft_ct_get_destroy(ctx, priv); +	return err;  }  static void __nft_ct_set_destroy(const struct nft_ctx *ctx, struct nft_ct *priv) @@ -626,6 +643,9 @@ err1:  static void nft_ct_get_destroy(const struct nft_ctx *ctx,  			       const struct nft_expr *expr)  { +	struct nft_ct *priv = nft_expr_priv(expr); + +	__nft_ct_get_destroy(ctx, priv);  	nf_ct_netns_put(ctx->net, ctx->family);  } @@ -1173,6 +1193,10 @@ static void nft_ct_helper_obj_eval(struct nft_object *obj,  	if (help) {  		rcu_assign_pointer(help->helper, to_assign);  		set_bit(IPS_HELPER_BIT, &ct->status); + +		if ((ct->status & IPS_NAT_MASK) && !nfct_seqadj(ct)) +			if (!nfct_seqadj_ext_add(ct)) +				regs->verdict.code = NF_DROP;  	}  } diff --git a/net/sctp/input.c b/net/sctp/input.c index 7e99894778d4..e119e460ccde 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -190,7 +190,7 @@ int sctp_rcv(struct sk_buff *skb)  		goto discard_release;  	nf_reset_ct(skb); -	if (sk_filter(sk, skb)) +	if (sk_filter(sk, skb) || skb->len < sizeof(struct sctp_chunkhdr))  		goto discard_release;  	/* Create an SCTP packet structure. */ diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index a64ae15b1a60..71734411ff4c 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -723,8 +723,10 @@ tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,  		/* shouldn't get to wraparound:  		 * too long in async stage, something bad happened  		 */ -		if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) +		if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) { +			tls_offload_rx_resync_async_request_cancel(resync_async);  			return false; +		}  		/* asynchronous stage: log all headers seq such that  		 * req_seq <= seq <= end_seq, and wait for real resync request diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 346dfd2bd987..03d07b54359a 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4136,8 +4136,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)  			rdev->wiphy.txq_quantum = old_txq_quantum;  		} -		if (old_rts_threshold) -			kfree(old_radio_rts_threshold); +		kfree(old_radio_rts_threshold);  		return result;  	} diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c index 84ea9215c0a7..b8b7bba84a65 100644 --- a/scripts/kconfig/mconf.c +++ b/scripts/kconfig/mconf.c @@ -12,6 +12,7 @@  #include <errno.h>  #include <fcntl.h>  #include <limits.h> +#include <locale.h>  #include <stdarg.h>  #include <stdlib.h>  #include <string.h> @@ -931,6 +932,8 @@ int main(int ac, char **av)  	signal(SIGINT, sig_handler); +	setlocale(LC_ALL, ""); +  	if (ac > 1 && strcmp(av[1], "-s") == 0) {  		silent = 1;  		/* Silence conf_read() until the real callback is set up */ diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c index ae1fe5f60327..521700ed7152 100644 --- a/scripts/kconfig/nconf.c +++ b/scripts/kconfig/nconf.c @@ -7,6 +7,7 @@  #ifndef _GNU_SOURCE  #define _GNU_SOURCE  #endif +#include <locale.h>  #include <string.h>  #include <strings.h>  #include <stdlib.h> @@ -1478,6 +1479,8 @@ int main(int ac, char **av)  	int lines, columns;  	char *mode; +	setlocale(LC_ALL, ""); +  	if (ac > 1 && strcmp(av[1], "-s") == 0) {  		/* Silence conf_read() until the real callback is set up */  		conf_set_message_callback(NULL); diff --git a/scripts/package/install-extmod-build b/scripts/package/install-extmod-build index b96538787f3d..054fdf45cc37 100755 --- a/scripts/package/install-extmod-build +++ b/scripts/package/install-extmod-build @@ -63,7 +63,7 @@ if [ "${CC}" != "${HOSTCC}" ]; then  	# Clear VPATH and srcroot because the source files reside in the output  	# directory.  	# shellcheck disable=SC2016 # $(MAKE) and $(build) will be expanded by Make -	"${MAKE}" run-command KBUILD_RUN_COMMAND='+$(MAKE) HOSTCC='"${CC}"' VPATH= srcroot=. $(build)='"$(realpath --relative-base=. "${destdir}")"/scripts +	"${MAKE}" run-command KBUILD_RUN_COMMAND='+$(MAKE) HOSTCC='"${CC}"' VPATH= srcroot=. $(build)='"$(realpath --relative-to=. "${destdir}")"/scripts  	rm -f "${destdir}/scripts/Kbuild"  fi diff --git a/sound/hda/codecs/realtek/alc269.c b/sound/hda/codecs/realtek/alc269.c index 8ad5febd822a..4aec5067c59d 100644 --- a/sound/hda/codecs/realtek/alc269.c +++ b/sound/hda/codecs/realtek/alc269.c @@ -3736,6 +3736,7 @@ enum {  	ALC285_FIXUP_ASUS_GA605K_I2C_SPEAKER2_TO_DAC1,  	ALC269_FIXUP_POSITIVO_P15X_HEADSET_MIC,  	ALC289_FIXUP_ASUS_ZEPHYRUS_DUAL_SPK, +	ALC256_FIXUP_VAIO_RPL_MIC_NO_PRESENCE,  };  /* A special fixup for Lenovo C940 and Yoga Duet 7; @@ -6172,6 +6173,16 @@ static const struct hda_fixup alc269_fixups[] = {  			{ 0x1e, 0x90170150 }, /* Internal Speaker */  			{ }  		}, +	}, +	[ALC256_FIXUP_VAIO_RPL_MIC_NO_PRESENCE] = { +		.type = HDA_FIXUP_PINS, +		.v.pins = (const struct hda_pintbl[]) { +			{ 0x19, 0x03a1113c }, /* use as headset mic, without its own jack detect */ +			{ 0x1a, 0x22a190a0 }, /* dock mic */ +			{ } +		}, +		.chained = true, +		.chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST  	}  }; @@ -6578,6 +6589,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {  	SND_PCI_QUIRK(0x103c, 0x8c16, "HP Spectre x360 2-in-1 Laptop 16-aa0xxx", ALC245_FIXUP_HP_SPECTRE_X360_16_AA0XXX),  	SND_PCI_QUIRK(0x103c, 0x8c17, "HP Spectre 16", ALC287_FIXUP_CS35L41_I2C_2),  	SND_PCI_QUIRK(0x103c, 0x8c21, "HP Pavilion Plus Laptop 14-ey0XXX", ALC245_FIXUP_HP_X360_MUTE_LEDS), +	SND_PCI_QUIRK(0x103c, 0x8c2d, "HP Victus 15-fa1xxx (MB 8C2D)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),  	SND_PCI_QUIRK(0x103c, 0x8c30, "HP Victus 15-fb1xxx", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),  	SND_PCI_QUIRK(0x103c, 0x8c46, "HP EliteBook 830 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),  	SND_PCI_QUIRK(0x103c, 0x8c47, "HP EliteBook 840 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), @@ -6959,6 +6971,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {  	SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),  	SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL5[03]RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),  	SND_PCI_QUIRK(0x1558, 0xa554, "VAIO VJFH52", ALC269_FIXUP_VAIO_VJFH52_MIC_NO_PRESENCE), +	SND_PCI_QUIRK(0x1558, 0xa559, "VAIO RPL", ALC256_FIXUP_VAIO_RPL_MIC_NO_PRESENCE),  	SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL50NU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),  	SND_PCI_QUIRK(0x1558, 0xa650, "Clevo NP[567]0SN[CD]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),  	SND_PCI_QUIRK(0x1558, 0xa671, "Clevo NP70SN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), @@ -7080,6 +7093,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {  	SND_PCI_QUIRK(0x17aa, 0x38a9, "Thinkbook 16P", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),  	SND_PCI_QUIRK(0x17aa, 0x38ab, "Thinkbook 16P", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),  	SND_PCI_QUIRK(0x17aa, 0x38b4, "Legion Slim 7 16IRH8", ALC287_FIXUP_CS35L41_I2C_2), +	HDA_CODEC_QUIRK(0x17aa, 0x391c, "Lenovo Yoga 7 2-in-1 14AKP10", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),  	SND_PCI_QUIRK(0x17aa, 0x38b5, "Legion Slim 7 16IRH8", ALC287_FIXUP_CS35L41_I2C_2),  	SND_PCI_QUIRK(0x17aa, 0x38b6, "Legion Slim 7 16APH8", ALC287_FIXUP_CS35L41_I2C_2),  	SND_PCI_QUIRK(0x17aa, 0x38b7, "Legion Slim 7 16APH8", ALC287_FIXUP_CS35L41_I2C_2), diff --git a/sound/soc/amd/acp/amd-acp70-acpi-match.c b/sound/soc/amd/acp/amd-acp70-acpi-match.c index dcecac792e6d..871b4f054a84 100644 --- a/sound/soc/amd/acp/amd-acp70-acpi-match.c +++ b/sound/soc/amd/acp/amd-acp70-acpi-match.c @@ -30,6 +30,20 @@ static const struct snd_soc_acpi_endpoint spk_r_endpoint = {  	.group_id = 1  }; +static const struct snd_soc_acpi_endpoint spk_2_endpoint = { +	.num = 0, +	.aggregated = 1, +	.group_position = 2, +	.group_id = 1 +}; + +static const struct snd_soc_acpi_endpoint spk_3_endpoint = { +	.num = 0, +	.aggregated = 1, +	.group_position = 3, +	.group_id = 1 +}; +  static const struct snd_soc_acpi_adr_device rt711_rt1316_group_adr[] = {  	{  		.adr = 0x000030025D071101ull, @@ -112,6 +126,134 @@ static const struct snd_soc_acpi_adr_device rt1320_1_single_adr[] = {  	}  }; +static const struct snd_soc_acpi_endpoint cs42l43_endpoints[] = { +	{ /* Jack Playback Endpoint */ +		.num = 0, +		.aggregated = 0, +		.group_position = 0, +		.group_id = 0, +	}, +	{ /* DMIC Capture Endpoint */ +		.num = 1, +		.aggregated = 0, +		.group_position = 0, +		.group_id = 0, +	}, +	{ /* Jack Capture Endpoint */ +		.num = 2, +		.aggregated = 0, +		.group_position = 0, +		.group_id = 0, +	}, +	{ /* Speaker Playback Endpoint */ +		.num = 3, +		.aggregated = 0, +		.group_position = 0, +		.group_id = 0, +	}, +}; + +static const struct snd_soc_acpi_adr_device cs42l43_0_adr[] = { +	{ +		.adr = 0x00003001FA424301ull, +		.num_endpoints = ARRAY_SIZE(cs42l43_endpoints), +		.endpoints = cs42l43_endpoints, +		.name_prefix = "cs42l43" +	} +}; + +static const struct snd_soc_acpi_adr_device cs42l43_1_cs35l56x4_1_adr[] = { +	{ +		.adr = 0x00013001FA424301ull, +		.num_endpoints = ARRAY_SIZE(cs42l43_endpoints), +		.endpoints = cs42l43_endpoints, +		.name_prefix = "cs42l43" +	}, +	{ +		.adr = 0x00013001FA355601ull, +		.num_endpoints = 1, +		.endpoints = &spk_l_endpoint, +		.name_prefix = "AMP1" +	}, +	{ +		.adr = 0x00013101FA355601ull, +		.num_endpoints = 1, +		.endpoints = &spk_r_endpoint, +		.name_prefix = "AMP2" +	}, +	{ +		.adr = 0x00013201FA355601ull, +		.num_endpoints = 1, +		.endpoints = &spk_2_endpoint, +		.name_prefix = "AMP3" +	}, +	{ +		.adr = 0x00013301FA355601ull, +		.num_endpoints = 1, +		.endpoints = &spk_3_endpoint, +		.name_prefix = "AMP4" +	}, +}; + +static const struct snd_soc_acpi_adr_device cs35l56x4_1_adr[] = { +	{ +		.adr = 0x00013301FA355601ull, +		.num_endpoints = 1, +		.endpoints = &spk_l_endpoint, +		.name_prefix = "AMP1" +	}, +	{ +		.adr = 0x00013201FA355601ull, +		.num_endpoints = 1, +		.endpoints = &spk_r_endpoint, +		.name_prefix = "AMP2" +	}, +	{ +		.adr = 0x00013101FA355601ull, +		.num_endpoints = 1, +		.endpoints = &spk_2_endpoint, +		.name_prefix = "AMP3" +	}, +	{ +		.adr = 0x00013001FA355601ull, +		.num_endpoints = 1, +		.endpoints = &spk_3_endpoint, +		.name_prefix = "AMP4" +	}, +}; + +static const struct snd_soc_acpi_link_adr acp70_cs42l43_l1_cs35l56x4_l1[] = { +	{ +		.mask = BIT(1), +		.num_adr = ARRAY_SIZE(cs42l43_1_cs35l56x4_1_adr), +		.adr_d = cs42l43_1_cs35l56x4_1_adr, +	}, +	{} +}; + +static const struct snd_soc_acpi_link_adr acp70_cs42l43_l0_cs35l56x4_l1[] = { +	{ +		.mask = BIT(0), +		.num_adr = ARRAY_SIZE(cs42l43_0_adr), +		.adr_d = cs42l43_0_adr, +	}, +	{ +		.mask = BIT(1), +		.num_adr = ARRAY_SIZE(cs35l56x4_1_adr), +		.adr_d = cs35l56x4_1_adr, +	}, +	{} +}; + +static const struct snd_soc_acpi_link_adr acp70_cs35l56x4_l1[] = { +	{ +		.mask = BIT(1), +		.num_adr = ARRAY_SIZE(cs35l56x4_1_adr), +		.adr_d = cs35l56x4_1_adr, +	}, +	{} +}; +  static const struct snd_soc_acpi_link_adr acp70_rt722_only[] = {  	{  		.mask = BIT(0), @@ -151,6 +293,21 @@ struct snd_soc_acpi_mach snd_soc_acpi_amd_acp70_sdw_machines[] = {  		.links = acp70_4_in_1_sdca,  		.drv_name = "amd_sdw",  	}, +	{ +		.link_mask = BIT(0) | BIT(1), +		.links = acp70_cs42l43_l0_cs35l56x4_l1, +		.drv_name = "amd_sdw", +	}, +	{ +		.link_mask = BIT(1), +		.links = acp70_cs42l43_l1_cs35l56x4_l1, +		.drv_name = "amd_sdw", +	}, +	{ +		.link_mask = BIT(1), +		.links = acp70_cs35l56x4_l1, +		.drv_name = "amd_sdw", +	},  	{},  };  EXPORT_SYMBOL(snd_soc_acpi_amd_acp70_sdw_machines); diff --git a/sound/soc/codecs/cs-amp-lib-test.c b/sound/soc/codecs/cs-amp-lib-test.c index 2fde84309338..3406887cdfa2 100644 --- a/sound/soc/codecs/cs-amp-lib-test.c +++ b/sound/soc/codecs/cs-amp-lib-test.c @@ -7,6 +7,7 @@  #include <kunit/resource.h>  #include <kunit/test.h> +#include <kunit/test-bug.h>  #include <kunit/static_stub.h>  #include <linux/device/faux.h>  #include <linux/firmware/cirrus/cs_dsp.h> diff --git a/sound/soc/codecs/cs530x.c b/sound/soc/codecs/cs530x.c index b9eff240b929..535387cd7aa3 100644 --- a/sound/soc/codecs/cs530x.c +++ b/sound/soc/codecs/cs530x.c @@ -793,7 +793,7 @@ static int cs530x_set_sysclk(struct snd_soc_component *component, int clk_id,  	case CS530X_SYSCLK_SRC_PLL:  		break;  	default: -		dev_err(component->dev, "Invalid clock id %d\n", clk_id); +		dev_err(component->dev, "Invalid sysclk source: %d\n", source);  		return -EINVAL;  	} diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c index cb1508fc99f8..5aff5a459a43 100644 --- a/sound/soc/codecs/max98090.c +++ b/sound/soc/codecs/max98090.c @@ -1239,6 +1239,8 @@ static const struct snd_soc_dapm_widget max98091_dapm_widgets[] = {  	SND_SOC_DAPM_SUPPLY("DMIC4_ENA", M98090_REG_DIGITAL_MIC_ENABLE,  		 M98090_DIGMIC4_SHIFT, 0, max98090_shdn_event,  			 SND_SOC_DAPM_POST_PMU), +	SND_SOC_DAPM_SUPPLY("DMIC34_HPF", M98090_REG_FILTER_CONFIG, +		M98090_FLT_DMIC34HPF_SHIFT, 0, NULL, 0),  };  static const struct snd_soc_dapm_route max98090_dapm_routes[] = { @@ -1427,8 +1429,8 @@ static const struct snd_soc_dapm_route max98091_dapm_routes[] = {  	/* DMIC inputs */  	{"DMIC3", NULL, "DMIC3_ENA"},  	{"DMIC4", NULL, "DMIC4_ENA"}, -	{"DMIC3", NULL, "AHPF"}, -	{"DMIC4", NULL, "AHPF"}, +	{"DMIC3", NULL, "DMIC34_HPF"}, +	{"DMIC4", NULL, "DMIC34_HPF"},  };  static int max98090_add_widgets(struct snd_soc_component *component) diff --git a/sound/soc/codecs/rt721-sdca.c b/sound/soc/codecs/rt721-sdca.c index a4bd29d7220b..5f7b505d5414 100644 --- a/sound/soc/codecs/rt721-sdca.c +++ b/sound/soc/codecs/rt721-sdca.c @@ -281,6 +281,10 @@ static void rt721_sdca_jack_preset(struct rt721_sdca_priv *rt721)  	rt_sdca_index_write(rt721->mbq_regmap, RT721_BOOST_CTRL,  		RT721_BST_4CH_TOP_GATING_CTRL1, 0x002a);  	regmap_write(rt721->regmap, 0x2f58, 0x07); + +	regmap_write(rt721->regmap, 0x2f51, 0x00); +	rt_sdca_index_write(rt721->mbq_regmap, RT721_HDA_SDCA_FLOAT, +		RT721_MISC_CTL, 0x0004);  }  static void rt721_sdca_jack_init(struct rt721_sdca_priv *rt721) diff --git a/sound/soc/codecs/rt721-sdca.h b/sound/soc/codecs/rt721-sdca.h index 71fac9cd8739..24ce188562ba 100644 --- a/sound/soc/codecs/rt721-sdca.h +++ b/sound/soc/codecs/rt721-sdca.h @@ -137,6 +137,7 @@ struct rt721_sdca_dmic_kctrl_priv {  #define RT721_HDA_LEGACY_UAJ_CTL		0x02  #define RT721_HDA_LEGACY_CTL1			0x05  #define RT721_HDA_LEGACY_RESET_CTL		0x06 +#define RT721_MISC_CTL				0x07  #define RT721_XU_REL_CTRL			0x0c  #define RT721_GE_REL_CTRL1			0x0d  #define RT721_HDA_LEGACY_GPIO_WAKE_EN_CTL	0x0e diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c index aabd90a8b3ec..cac26ba0aa4b 100644 --- a/sound/soc/fsl/fsl_micfil.c +++ b/sound/soc/fsl/fsl_micfil.c @@ -131,7 +131,7 @@ static struct fsl_micfil_soc_data fsl_micfil_imx943 = {  	.fifos = 8,  	.fifo_depth = 32,  	.dataline =  0xf, -	.formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_DSD_U32_BE, +	.formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_DSD_U32_LE,  	.use_edma = true,  	.use_verid = true,  	.volume_sx = false, @@ -823,7 +823,7 @@ static int fsl_micfil_hw_params(struct snd_pcm_substream *substream,  		break;  	} -	if (format == SNDRV_PCM_FORMAT_DSD_U32_BE) { +	if (format == SNDRV_PCM_FORMAT_DSD_U32_LE) {  		micfil->dec_bypass = true;  		/*  		 * According to equation 29 in RM: diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index 757e7868e322..72bfc91e21b9 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c @@ -353,7 +353,6 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,  		break;  	case SND_SOC_DAIFMT_PDM:  		val_cr2 |= FSL_SAI_CR2_BCP; -		val_cr4 &= ~FSL_SAI_CR4_MF;  		sai->is_pdm_mode = true;  		break;  	case SND_SOC_DAIFMT_RIGHT_J: @@ -638,7 +637,7 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,  	val_cr5 |= FSL_SAI_CR5_WNW(slot_width);  	val_cr5 |= FSL_SAI_CR5_W0W(slot_width); -	if (sai->is_lsb_first || sai->is_pdm_mode) +	if (sai->is_lsb_first)  		val_cr5 |= FSL_SAI_CR5_FBT(0);  	else  		val_cr5 |= FSL_SAI_CR5_FBT(word_width - 1); @@ -653,12 +652,12 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,  		val_cr4 |= FSL_SAI_CR4_CHMOD;  	/* -	 * For SAI provider mode, when Tx(Rx) sync with Rx(Tx) clock, Rx(Tx) will -	 * generate bclk and frame clock for Tx(Rx), we should set RCR4(TCR4), -	 * RCR5(TCR5) for playback(capture), or there will be sync error. +	 * When Tx(Rx) sync with Rx(Tx) clock, Rx(Tx) will provide bclk and +	 * frame clock for Tx(Rx). We should set RCR4(TCR4), RCR5(TCR5) +	 * for playback(capture), or there will be sync error.  	 */ -	if (!sai->is_consumer_mode[tx] && fsl_sai_dir_is_synced(sai, adir)) { +	if (fsl_sai_dir_is_synced(sai, adir)) {  		regmap_update_bits(sai->regmap, FSL_SAI_xCR4(!tx, ofs),  				   FSL_SAI_CR4_SYWD_MASK | FSL_SAI_CR4_FRSZ_MASK |  				   FSL_SAI_CR4_CHMOD_MASK, diff --git a/sound/soc/intel/avs/pcm.c b/sound/soc/intel/avs/pcm.c index d31058e2de5b..80c001120cdd 100644 --- a/sound/soc/intel/avs/pcm.c +++ b/sound/soc/intel/avs/pcm.c @@ -651,6 +651,7 @@ static void avs_dai_fe_shutdown(struct snd_pcm_substream *substream, struct snd_  	data = snd_soc_dai_get_dma_data(dai, substream); +	disable_work_sync(&data->period_elapsed_work);  	snd_hdac_ext_stream_release(data->host_stream, HDAC_EXT_STREAM_TYPE_HOST);  	avs_dai_shutdown(substream, dai);  } @@ -754,6 +755,8 @@ static int avs_dai_fe_prepare(struct snd_pcm_substream *substream, struct snd_so  	data = snd_soc_dai_get_dma_data(dai, substream);  	host_stream = data->host_stream; +	if (runtime->state == SNDRV_PCM_STATE_XRUN) +		hdac_stream(host_stream)->prepared = false;  	if (hdac_stream(host_stream)->prepared)  		return 0; diff --git a/sound/soc/intel/avs/probes.c b/sound/soc/intel/avs/probes.c index 693ecfe68fd0..74096236984a 100644 --- a/sound/soc/intel/avs/probes.c +++ b/sound/soc/intel/avs/probes.c @@ -14,8 +14,8 @@  #include "debug.h"  #include "messages.h" -static int avs_dsp_init_probe(struct avs_dev *adev, union avs_connector_node_id node_id, -			      size_t buffer_size) +static int avs_dsp_init_probe(struct avs_dev *adev, struct snd_compr_params *params, int bps, +			      union avs_connector_node_id node_id, size_t buffer_size)  {  	struct avs_probe_cfg cfg = {{0}};  	struct avs_module_entry mentry; @@ -27,12 +27,16 @@ static int avs_dsp_init_probe(struct avs_dev *adev, union avs_connector_node_id  		return ret;  	/* -	 * Probe module uses no cycles, audio data format and input and output -	 * frame sizes are unused. It is also not owned by any pipeline. +	 * Probe module uses no cycles, input and output frame sizes are unused. +	 * It is also not owned by any pipeline.  	 */  	cfg.base.ibs = 1;  	/* BSS module descriptor is always segment of index=2. */  	cfg.base.is_pages = mentry.segments[2].flags.length; +	cfg.base.audio_fmt.sampling_freq = params->codec.sample_rate; +	cfg.base.audio_fmt.bit_depth = bps; +	cfg.base.audio_fmt.num_channels = params->codec.ch_out; +	cfg.base.audio_fmt.valid_bit_depth = bps;  	cfg.gtw_cfg.node_id = node_id;  	cfg.gtw_cfg.dma_buffer_size = buffer_size; @@ -128,8 +132,6 @@ static int avs_probe_compr_set_params(struct snd_compr_stream *cstream,  	struct hdac_ext_stream *host_stream = avs_compr_get_host_stream(cstream);  	struct snd_compr_runtime *rtd = cstream->runtime;  	struct avs_dev *adev = to_avs_dev(dai->dev); -	/* compr params do not store bit depth, default to S32_LE. */ -	snd_pcm_format_t format = SNDRV_PCM_FORMAT_S32_LE;  	unsigned int format_val;  	int bps, ret; @@ -142,7 +144,7 @@ static int avs_probe_compr_set_params(struct snd_compr_stream *cstream,  	ret = snd_compr_malloc_pages(cstream, rtd->buffer_size);  	if (ret < 0)  		return ret; -	bps = snd_pcm_format_physical_width(format); +	bps = snd_pcm_format_physical_width(params->codec.format);  	if (bps < 0)  		return bps;  	format_val = snd_hdac_stream_format(params->codec.ch_out, bps, params->codec.sample_rate); @@ -166,7 +168,7 @@ static int avs_probe_compr_set_params(struct snd_compr_stream *cstream,  		node_id.vindex = hdac_stream(host_stream)->stream_tag - 1;  		node_id.dma_type = AVS_DMA_HDA_HOST_INPUT; -		ret = avs_dsp_init_probe(adev, node_id, rtd->dma_bytes); +		ret = avs_dsp_init_probe(adev, params, bps, node_id, rtd->dma_bytes);  		if (ret < 0) {  			dev_err(dai->dev, "probe init failed: %d\n", ret);  			avs_dsp_enable_d0ix(adev); diff --git a/sound/soc/intel/common/soc-acpi-intel-ptl-match.c b/sound/soc/intel/common/soc-acpi-intel-ptl-match.c index 3c8b10e21ceb..4853f4f31786 100644 --- a/sound/soc/intel/common/soc-acpi-intel-ptl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-ptl-match.c @@ -227,33 +227,6 @@ static const struct snd_soc_acpi_endpoint cs42l43_amp_spkagg_endpoints[] = {  	},  }; -static const struct snd_soc_acpi_endpoint cs42l43_endpoints[] = { -	{ /* Jack Playback Endpoint */ -		.num = 0, -		.aggregated = 0, -		.group_position = 0, -		.group_id = 0, -	}, -	{ /* DMIC Capture Endpoint */ -		.num = 1, -		.aggregated = 0, -		.group_position = 0, -		.group_id = 0, -	}, -	{ /* Jack Capture Endpoint */ -		.num = 2, -		.aggregated = 0, -		.group_position = 0, -		.group_id = 0, -	}, -	{ /* Speaker Playback Endpoint */ -		.num = 3, -		.aggregated = 0, -		.group_position = 0, -		.group_id = 0, -	}, -}; -  static const struct snd_soc_acpi_adr_device cs42l43_2_adr[] = {  	{  		.adr = 0x00023001fa424301ull, @@ -305,15 +278,6 @@ static const struct snd_soc_acpi_adr_device cs35l56_3_3amp_adr[] = {  	}  }; -static const struct snd_soc_acpi_adr_device cs42l43_3_adr[] = { -	{ -		.adr = 0x00033001FA424301ull, -		.num_endpoints = ARRAY_SIZE(cs42l43_endpoints), -		.endpoints = cs42l43_endpoints, -		.name_prefix = "cs42l43" -	} -}; -  static const struct snd_soc_acpi_adr_device rt711_sdca_0_adr[] = {  	{  		.adr = 0x000030025D071101ull, @@ -486,15 +450,6 @@ static const struct snd_soc_acpi_link_adr ptl_cs42l43_l2_cs35l56x6_l13[] = {  	{}  }; -static const struct snd_soc_acpi_link_adr ptl_cs42l43_l3[] = { -	{ -		.mask = BIT(3), -		.num_adr = ARRAY_SIZE(cs42l43_3_adr), -		.adr_d = cs42l43_3_adr, -	}, -	{} -}; -  static const struct snd_soc_acpi_link_adr ptl_rt721_l0[] = {  	{  		.mask = BIT(0), @@ -714,13 +669,6 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_ptl_sdw_machines[] = {  	},  	{  		.link_mask = BIT(3), -		.links = ptl_cs42l43_l3, -		.drv_name = "sof_sdw", -		.sof_tplg_filename = "sof-ptl-cs42l43-l3.tplg", -		.get_function_tplg_files = sof_sdw_get_tplg_files, -	}, -	{ -		.link_mask = BIT(3),  		.links = ptl_sdw_rt712_vb_l3_rt1320_l3,  		.drv_name = "sof_sdw",  		.machine_check = snd_soc_acpi_intel_sdca_is_device_rt712_vb, diff --git a/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c b/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c index 5d025ad72263..c63b3444bc17 100644 --- a/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c +++ b/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c @@ -3176,7 +3176,6 @@ err_pm_put:  static void mt8195_afe_pcm_dev_remove(struct platform_device *pdev)  { -	pm_runtime_disable(&pdev->dev);  	if (!pm_runtime_status_suspended(&pdev->dev))  		mt8195_afe_runtime_suspend(&pdev->dev);  } diff --git a/sound/soc/mediatek/mt8365/mt8365-afe-pcm.c b/sound/soc/mediatek/mt8365/mt8365-afe-pcm.c index 10793bbe9275..d48252cd96ac 100644 --- a/sound/soc/mediatek/mt8365/mt8365-afe-pcm.c +++ b/sound/soc/mediatek/mt8365/mt8365-afe-pcm.c @@ -2238,7 +2238,6 @@ static void mt8365_afe_pcm_dev_remove(struct platform_device *pdev)  	mt8365_afe_disable_top_cg(afe, MT8365_TOP_CG_AFE); -	pm_runtime_disable(&pdev->dev);  	if (!pm_runtime_status_suspended(&pdev->dev))  		mt8365_afe_runtime_suspend(&pdev->dev);  } diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c index 06a802f9dba5..67e9ca18883c 100644 --- a/sound/soc/qcom/qdsp6/q6asm.c +++ b/sound/soc/qcom/qdsp6/q6asm.c @@ -377,9 +377,9 @@ static void q6asm_audio_client_free_buf(struct audio_client *ac,  	spin_lock_irqsave(&ac->lock, flags);  	port->num_periods = 0; +	spin_unlock_irqrestore(&ac->lock, flags);  	kfree(port->buf);  	port->buf = NULL; -	spin_unlock_irqrestore(&ac->lock, flags);  }  /** diff --git a/sound/soc/renesas/rz-ssi.c b/sound/soc/renesas/rz-ssi.c index e00940814157..81b883e8ac92 100644 --- a/sound/soc/renesas/rz-ssi.c +++ b/sound/soc/renesas/rz-ssi.c @@ -85,6 +85,7 @@ struct rz_ssi_stream {  	struct snd_pcm_substream *substream;  	int fifo_sample_size;	/* sample capacity of SSI FIFO */  	int dma_buffer_pos;	/* The address for the next DMA descriptor */ +	int completed_dma_buf_pos; /* The address of the last completed DMA descriptor. */  	int period_counter;	/* for keeping track of periods transferred */  	int sample_width;  	int buffer_pos;		/* current frame position in the buffer */ @@ -215,6 +216,7 @@ static void rz_ssi_stream_init(struct rz_ssi_stream *strm,  	rz_ssi_set_substream(strm, substream);  	strm->sample_width = samples_to_bytes(runtime, 1);  	strm->dma_buffer_pos = 0; +	strm->completed_dma_buf_pos = 0;  	strm->period_counter = 0;  	strm->buffer_pos = 0; @@ -437,6 +439,10 @@ static void rz_ssi_pointer_update(struct rz_ssi_stream *strm, int frames)  		snd_pcm_period_elapsed(strm->substream);  		strm->period_counter = current_period;  	} + +	strm->completed_dma_buf_pos += runtime->period_size; +	if (strm->completed_dma_buf_pos >= runtime->buffer_size) +		strm->completed_dma_buf_pos = 0;  }  static int rz_ssi_pio_recv(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm) @@ -778,10 +784,14 @@ no_dma:  	return -ENODEV;  } -static int rz_ssi_trigger_resume(struct rz_ssi_priv *ssi) +static int rz_ssi_trigger_resume(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm)  { +	struct snd_pcm_substream *substream = strm->substream; +	struct snd_pcm_runtime *runtime = substream->runtime;  	int ret; +	strm->dma_buffer_pos = strm->completed_dma_buf_pos + runtime->period_size; +  	if (rz_ssi_is_stream_running(&ssi->playback) ||  	    rz_ssi_is_stream_running(&ssi->capture))  		return 0; @@ -794,16 +804,6 @@ static int rz_ssi_trigger_resume(struct rz_ssi_priv *ssi)  				ssi->hw_params_cache.channels);  } -static void rz_ssi_streams_suspend(struct rz_ssi_priv *ssi) -{ -	if (rz_ssi_is_stream_running(&ssi->playback) || -	    rz_ssi_is_stream_running(&ssi->capture)) -		return; - -	ssi->playback.dma_buffer_pos = 0; -	ssi->capture.dma_buffer_pos = 0; -} -  static int rz_ssi_dai_trigger(struct snd_pcm_substream *substream, int cmd,  			      struct snd_soc_dai *dai)  { @@ -813,7 +813,7 @@ static int rz_ssi_dai_trigger(struct snd_pcm_substream *substream, int cmd,  	switch (cmd) {  	case SNDRV_PCM_TRIGGER_RESUME: -		ret = rz_ssi_trigger_resume(ssi); +		ret = rz_ssi_trigger_resume(ssi, strm);  		if (ret)  			return ret; @@ -852,7 +852,6 @@ static int rz_ssi_dai_trigger(struct snd_pcm_substream *substream, int cmd,  	case SNDRV_PCM_TRIGGER_SUSPEND:  		rz_ssi_stop(ssi, strm); -		rz_ssi_streams_suspend(ssi);  		break;  	case SNDRV_PCM_TRIGGER_STOP: diff --git a/sound/soc/sdw_utils/soc_sdw_utils.c b/sound/soc/sdw_utils/soc_sdw_utils.c index 270c66b90228..f7c8c16308de 100644 --- a/sound/soc/sdw_utils/soc_sdw_utils.c +++ b/sound/soc/sdw_utils/soc_sdw_utils.c @@ -638,7 +638,6 @@ struct asoc_sdw_codec_info codec_info_list[] = {  			{  				.direction = {true, false},  				.dai_name = "cs42l43-dp6", -				.component_name = "cs42l43",  				.dai_type = SOC_SDW_DAI_TYPE_AMP,  				.dailink = {SOC_SDW_AMP_OUT_DAI_ID, SOC_SDW_UNUSED_DAI_ID},  				.init = asoc_sdw_cs42l43_spk_init, diff --git a/sound/usb/mixer_s1810c.c b/sound/usb/mixer_s1810c.c index 15960d25e748..6e09e074c0e7 100644 --- a/sound/usb/mixer_s1810c.c +++ b/sound/usb/mixer_s1810c.c @@ -178,7 +178,7 @@ snd_sc1810c_get_status_field(struct usb_device *dev,  	pkt_out.fields[SC1810C_STATE_F1_IDX] = SC1810C_SET_STATE_F1;  	pkt_out.fields[SC1810C_STATE_F2_IDX] = SC1810C_SET_STATE_F2; -	ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), +	ret = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0),  			      SC1810C_SET_STATE_REQ,  			      SC1810C_SET_STATE_REQTYPE,  			      (*seqnum), 0, &pkt_out, sizeof(pkt_out)); @@ -597,15 +597,6 @@ int snd_sc1810_init_mixer(struct usb_mixer_interface *mixer)  	if (!list_empty(&chip->mixer_list))  		return 0; -	dev_info(&dev->dev, -		 "Presonus Studio 1810c, device_setup: %u\n", chip->setup); -	if (chip->setup == 1) -		dev_info(&dev->dev, "(8out/18in @ 48kHz)\n"); -	else if (chip->setup == 2) -		dev_info(&dev->dev, "(6out/8in @ 192kHz)\n"); -	else -		dev_info(&dev->dev, "(8out/14in @ 96kHz)\n"); -  	ret = snd_s1810c_init_mixer_maps(chip);  	if (ret < 0)  		return ret; @@ -634,16 +625,28 @@ int snd_sc1810_init_mixer(struct usb_mixer_interface *mixer)  	if (ret < 0)  		return ret; -	// The 1824c has a Mono Main switch instead of a -	// A/B select switch. -	if (mixer->chip->usb_id == USB_ID(0x194f, 0x010d)) { -		ret = snd_s1810c_switch_init(mixer, &snd_s1824c_mono_sw); +	switch (chip->usb_id) { +	case USB_ID(0x194f, 0x010c): /* Presonus Studio 1810c */ +		dev_info(&dev->dev, +			 "Presonus Studio 1810c, device_setup: %u\n", chip->setup); +		if (chip->setup == 1) +			dev_info(&dev->dev, "(8out/18in @ 48kHz)\n"); +		else if (chip->setup == 2) +			dev_info(&dev->dev, "(6out/8in @ 192kHz)\n"); +		else +			dev_info(&dev->dev, "(8out/14in @ 96kHz)\n"); + +		ret = snd_s1810c_switch_init(mixer, &snd_s1810c_ab_sw);  		if (ret < 0)  			return ret; -	} else if (mixer->chip->usb_id == USB_ID(0x194f, 0x010c)) { -		ret = snd_s1810c_switch_init(mixer, &snd_s1810c_ab_sw); + +		break; +	case USB_ID(0x194f, 0x010d): /* Presonus Studio 1824c */ +		ret = snd_s1810c_switch_init(mixer, &snd_s1824c_mono_sw);  		if (ret < 0)  			return ret; + +		break;  	}  	return ret; diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index a8f6cd4841b0..dbe32a5d02cd 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -311,7 +311,7 @@ struct pt_regs___arm64 {  #define __PT_RET_REG regs[31]  #define __PT_FP_REG __unsupported__  #define __PT_RC_REG gpr[3] -#define __PT_SP_REG sp +#define __PT_SP_REG gpr[1]  #define __PT_IP_REG nip  #elif defined(bpf_target_sparc) diff --git a/tools/net/ynl/lib/ynl-priv.h b/tools/net/ynl/lib/ynl-priv.h index 29481989ea76..ced7dce44efb 100644 --- a/tools/net/ynl/lib/ynl-priv.h +++ b/tools/net/ynl/lib/ynl-priv.h @@ -313,7 +313,7 @@ ynl_attr_put_str(struct nlmsghdr *nlh, unsigned int attr_type, const char *str)  	struct nlattr *attr;  	size_t len; -	len = strlen(str); +	len = strlen(str) + 1;  	if (__ynl_attr_put_overflow(nlh, len))  		return; @@ -321,7 +321,7 @@ ynl_attr_put_str(struct nlmsghdr *nlh, unsigned int attr_type, const char *str)  	attr->nla_type = attr_type;  	strcpy((char *)ynl_attr_data(attr), str); -	attr->nla_len = NLA_HDRLEN + NLA_ALIGN(len); +	attr->nla_len = NLA_HDRLEN + len;  	nlh->nlmsg_len += NLMSG_ALIGN(attr->nla_len);  } diff --git a/tools/net/ynl/pyynl/ethtool.py b/tools/net/ynl/pyynl/ethtool.py index 9b523cbb3568..fd0f6b8d54d1 100755 --- a/tools/net/ynl/pyynl/ethtool.py +++ b/tools/net/ynl/pyynl/ethtool.py @@ -44,6 +44,9 @@ def print_field(reply, *desc):      Pretty-print a set of fields from the reply. desc specifies the      fields and the optional type (bool/yn).      """ +    if not reply: +        return +      if len(desc) == 0:          return print_field(reply, *zip(reply.keys(), reply.keys())) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 620854fdaaf6..9004fbc06769 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -3516,8 +3516,11 @@ static bool skip_alt_group(struct instruction *insn)  {  	struct instruction *alt_insn = insn->alts ? insn->alts->insn : NULL; +	if (!insn->alt_group) +		return false; +  	/* ANNOTATE_IGNORE_ALTERNATIVE */ -	if (insn->alt_group && insn->alt_group->ignore) +	if (insn->alt_group->ignore)  		return true;  	/* diff --git a/tools/testing/selftests/cachestat/.gitignore b/tools/testing/selftests/cachestat/.gitignore index d6c30b43a4bb..abbb13b6e96b 100644 --- a/tools/testing/selftests/cachestat/.gitignore +++ b/tools/testing/selftests/cachestat/.gitignore @@ -1,2 +1,3 @@  # SPDX-License-Identifier: GPL-2.0-only  test_cachestat +tmpshmcstat diff --git a/tools/testing/selftests/cachestat/test_cachestat.c b/tools/testing/selftests/cachestat/test_cachestat.c index c952640f163b..ab838bcb9ec5 100644 --- a/tools/testing/selftests/cachestat/test_cachestat.c +++ b/tools/testing/selftests/cachestat/test_cachestat.c @@ -226,7 +226,7 @@ bool run_cachestat_test(enum file_type type)  	int syscall_ret;  	size_t compute_len = PS * 512;  	struct cachestat_range cs_range = { PS, compute_len }; -	char *filename = "tmpshmcstat"; +	char *filename = "tmpshmcstat", *map;  	struct cachestat cs;  	bool ret = true;  	int fd; @@ -257,7 +257,7 @@ bool run_cachestat_test(enum file_type type)  		}  		break;  	case FILE_MMAP: -		char *map = mmap(NULL, filesize, PROT_READ | PROT_WRITE, +		map = mmap(NULL, filesize, PROT_READ | PROT_WRITE,  				 MAP_SHARED, fd, 0);  		if (map == MAP_FAILED) { diff --git a/tools/testing/selftests/net/bareudp.sh b/tools/testing/selftests/net/bareudp.sh index 4046131e7888..d9e5b967f815 100755 --- a/tools/testing/selftests/net/bareudp.sh +++ b/tools/testing/selftests/net/bareudp.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash  # SPDX-License-Identifier: GPL-2.0  # Test various bareudp tunnel configurations. diff --git a/tools/testing/selftests/vfio/lib/include/vfio_util.h b/tools/testing/selftests/vfio/lib/include/vfio_util.h index ed31606e01b7..240409bf5f8a 100644 --- a/tools/testing/selftests/vfio/lib/include/vfio_util.h +++ b/tools/testing/selftests/vfio/lib/include/vfio_util.h @@ -206,10 +206,29 @@ struct vfio_pci_device *vfio_pci_device_init(const char *bdf, const char *iommu_  void vfio_pci_device_cleanup(struct vfio_pci_device *device);  void vfio_pci_device_reset(struct vfio_pci_device *device); -void vfio_pci_dma_map(struct vfio_pci_device *device, -		      struct vfio_dma_region *region); -void vfio_pci_dma_unmap(struct vfio_pci_device *device, -			struct vfio_dma_region *region); +int __vfio_pci_dma_map(struct vfio_pci_device *device, +		       struct vfio_dma_region *region); +int __vfio_pci_dma_unmap(struct vfio_pci_device *device, +			 struct vfio_dma_region *region, +			 u64 *unmapped); +int __vfio_pci_dma_unmap_all(struct vfio_pci_device *device, u64 *unmapped); + +static inline void vfio_pci_dma_map(struct vfio_pci_device *device, +				    struct vfio_dma_region *region) +{ +	VFIO_ASSERT_EQ(__vfio_pci_dma_map(device, region), 0); +} + +static inline void vfio_pci_dma_unmap(struct vfio_pci_device *device, +				      struct vfio_dma_region *region) +{ +	VFIO_ASSERT_EQ(__vfio_pci_dma_unmap(device, region, NULL), 0); +} + +static inline void vfio_pci_dma_unmap_all(struct vfio_pci_device *device) +{ +	VFIO_ASSERT_EQ(__vfio_pci_dma_unmap_all(device, NULL), 0); +}  void vfio_pci_config_access(struct vfio_pci_device *device, bool write,  			    size_t config, size_t size, void *data); diff --git a/tools/testing/selftests/vfio/lib/vfio_pci_device.c b/tools/testing/selftests/vfio/lib/vfio_pci_device.c index 0921b2451ba5..a381fd253aa7 100644 --- a/tools/testing/selftests/vfio/lib/vfio_pci_device.c +++ b/tools/testing/selftests/vfio/lib/vfio_pci_device.c @@ -2,6 +2,7 @@  #include <dirent.h>  #include <fcntl.h>  #include <libgen.h> +#include <stdint.h>  #include <stdlib.h>  #include <string.h>  #include <unistd.h> @@ -141,7 +142,7 @@ static void vfio_pci_irq_get(struct vfio_pci_device *device, u32 index,  	ioctl_assert(device->fd, VFIO_DEVICE_GET_IRQ_INFO, irq_info);  } -static void vfio_iommu_dma_map(struct vfio_pci_device *device, +static int vfio_iommu_dma_map(struct vfio_pci_device *device,  			       struct vfio_dma_region *region)  {  	struct vfio_iommu_type1_dma_map args = { @@ -152,10 +153,13 @@ static void vfio_iommu_dma_map(struct vfio_pci_device *device,  		.size = region->size,  	}; -	ioctl_assert(device->container_fd, VFIO_IOMMU_MAP_DMA, &args); +	if (ioctl(device->container_fd, VFIO_IOMMU_MAP_DMA, &args)) +		return -errno; + +	return 0;  } -static void iommufd_dma_map(struct vfio_pci_device *device, +static int iommufd_dma_map(struct vfio_pci_device *device,  			    struct vfio_dma_region *region)  {  	struct iommu_ioas_map args = { @@ -169,54 +173,108 @@ static void iommufd_dma_map(struct vfio_pci_device *device,  		.ioas_id = device->ioas_id,  	}; -	ioctl_assert(device->iommufd, IOMMU_IOAS_MAP, &args); +	if (ioctl(device->iommufd, IOMMU_IOAS_MAP, &args)) +		return -errno; + +	return 0;  } -void vfio_pci_dma_map(struct vfio_pci_device *device, +int __vfio_pci_dma_map(struct vfio_pci_device *device,  		      struct vfio_dma_region *region)  { +	int ret; +  	if (device->iommufd) -		iommufd_dma_map(device, region); +		ret = iommufd_dma_map(device, region);  	else -		vfio_iommu_dma_map(device, region); +		ret = vfio_iommu_dma_map(device, region); + +	if (ret) +		return ret;  	list_add(®ion->link, &device->dma_regions); + +	return 0;  } -static void vfio_iommu_dma_unmap(struct vfio_pci_device *device, -				 struct vfio_dma_region *region) +static int vfio_iommu_dma_unmap(int fd, u64 iova, u64 size, u32 flags, +				u64 *unmapped)  {  	struct vfio_iommu_type1_dma_unmap args = {  		.argsz = sizeof(args), -		.iova = region->iova, -		.size = region->size, +		.iova = iova, +		.size = size, +		.flags = flags,  	}; -	ioctl_assert(device->container_fd, VFIO_IOMMU_UNMAP_DMA, &args); +	if (ioctl(fd, VFIO_IOMMU_UNMAP_DMA, &args)) +		return -errno; + +	if (unmapped) +		*unmapped = args.size; + +	return 0;  } -static void iommufd_dma_unmap(struct vfio_pci_device *device, -			      struct vfio_dma_region *region) +static int iommufd_dma_unmap(int fd, u64 iova, u64 length, u32 ioas_id, +			     u64 *unmapped)  {  	struct iommu_ioas_unmap args = {  		.size = sizeof(args), -		.iova = region->iova, -		.length = region->size, -		.ioas_id = device->ioas_id, +		.iova = iova, +		.length = length, +		.ioas_id = ioas_id,  	}; -	ioctl_assert(device->iommufd, IOMMU_IOAS_UNMAP, &args); +	if (ioctl(fd, IOMMU_IOAS_UNMAP, &args)) +		return -errno; + +	if (unmapped) +		*unmapped = args.length; + +	return 0;  } -void vfio_pci_dma_unmap(struct vfio_pci_device *device, -			struct vfio_dma_region *region) +int __vfio_pci_dma_unmap(struct vfio_pci_device *device, +			 struct vfio_dma_region *region, u64 *unmapped)  { +	int ret; + +	if (device->iommufd) +		ret = iommufd_dma_unmap(device->iommufd, region->iova, +					region->size, device->ioas_id, +					unmapped); +	else +		ret = vfio_iommu_dma_unmap(device->container_fd, region->iova, +					   region->size, 0, unmapped); + +	if (ret) +		return ret; + +	list_del_init(®ion->link); + +	return 0; +} + +int __vfio_pci_dma_unmap_all(struct vfio_pci_device *device, u64 *unmapped) +{ +	int ret; +	struct vfio_dma_region *curr, *next; +  	if (device->iommufd) -		iommufd_dma_unmap(device, region); +		ret = iommufd_dma_unmap(device->iommufd, 0, UINT64_MAX, +					device->ioas_id, unmapped);  	else -		vfio_iommu_dma_unmap(device, region); +		ret = vfio_iommu_dma_unmap(device->container_fd, 0, 0, +					   VFIO_DMA_UNMAP_FLAG_ALL, unmapped); + +	if (ret) +		return ret; + +	list_for_each_entry_safe(curr, next, &device->dma_regions, link) +		list_del_init(&curr->link); -	list_del(®ion->link); +	return 0;  }  static void vfio_pci_region_get(struct vfio_pci_device *device, int index, diff --git a/tools/testing/selftests/vfio/vfio_dma_mapping_test.c b/tools/testing/selftests/vfio/vfio_dma_mapping_test.c index ab19c54a774d..4f1ea79a200c 100644 --- a/tools/testing/selftests/vfio/vfio_dma_mapping_test.c +++ b/tools/testing/selftests/vfio/vfio_dma_mapping_test.c @@ -112,6 +112,8 @@ FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous, 0, 0);  FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_2mb, SZ_2M, MAP_HUGETLB | MAP_HUGE_2MB);  FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_1gb, SZ_1G, MAP_HUGETLB | MAP_HUGE_1GB); +#undef FIXTURE_VARIANT_ADD_IOMMU_MODE +  FIXTURE_SETUP(vfio_dma_mapping_test)  {  	self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode); @@ -129,6 +131,7 @@ TEST_F(vfio_dma_mapping_test, dma_map_unmap)  	struct vfio_dma_region region;  	struct iommu_mapping mapping;  	u64 mapping_size = size; +	u64 unmapped;  	int rc;  	region.vaddr = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0); @@ -184,7 +187,9 @@ TEST_F(vfio_dma_mapping_test, dma_map_unmap)  	}  unmap: -	vfio_pci_dma_unmap(self->device, ®ion); +	rc = __vfio_pci_dma_unmap(self->device, ®ion, &unmapped); +	ASSERT_EQ(rc, 0); +	ASSERT_EQ(unmapped, region.size);  	printf("Unmapped IOVA 0x%lx\n", region.iova);  	ASSERT_EQ(INVALID_IOVA, __to_iova(self->device, region.vaddr));  	ASSERT_NE(0, iommu_mapping_get(device_bdf, region.iova, &mapping)); @@ -192,6 +197,94 @@ unmap:  	ASSERT_TRUE(!munmap(region.vaddr, size));  } +FIXTURE(vfio_dma_map_limit_test) { +	struct vfio_pci_device *device; +	struct vfio_dma_region region; +	size_t mmap_size; +}; + +FIXTURE_VARIANT(vfio_dma_map_limit_test) { +	const char *iommu_mode; +}; + +#define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode)			       \ +FIXTURE_VARIANT_ADD(vfio_dma_map_limit_test, _iommu_mode) {		       \ +	.iommu_mode = #_iommu_mode,					       \ +} + +FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(); + +#undef FIXTURE_VARIANT_ADD_IOMMU_MODE + +FIXTURE_SETUP(vfio_dma_map_limit_test) +{ +	struct vfio_dma_region *region = &self->region; +	u64 region_size = getpagesize(); + +	/* +	 * Over-allocate mmap by double the size to provide enough backing vaddr +	 * for overflow tests +	 */ +	self->mmap_size = 2 * region_size; + +	self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode); +	region->vaddr = mmap(NULL, self->mmap_size, PROT_READ | PROT_WRITE, +			     MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); +	ASSERT_NE(region->vaddr, MAP_FAILED); + +	/* One page prior to the end of address space */ +	region->iova = ~(iova_t)0 & ~(region_size - 1); +	region->size = region_size; +} + +FIXTURE_TEARDOWN(vfio_dma_map_limit_test) +{ +	vfio_pci_device_cleanup(self->device); +	ASSERT_EQ(munmap(self->region.vaddr, self->mmap_size), 0); +} + +TEST_F(vfio_dma_map_limit_test, unmap_range) +{ +	struct vfio_dma_region *region = &self->region; +	u64 unmapped; +	int rc; + +	vfio_pci_dma_map(self->device, region); +	ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr)); + +	rc = __vfio_pci_dma_unmap(self->device, region, &unmapped); +	ASSERT_EQ(rc, 0); +	ASSERT_EQ(unmapped, region->size); +} + +TEST_F(vfio_dma_map_limit_test, unmap_all) +{ +	struct vfio_dma_region *region = &self->region; +	u64 unmapped; +	int rc; + +	vfio_pci_dma_map(self->device, region); +	ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr)); + +	rc = __vfio_pci_dma_unmap_all(self->device, &unmapped); +	ASSERT_EQ(rc, 0); +	ASSERT_EQ(unmapped, region->size); +} + +TEST_F(vfio_dma_map_limit_test, overflow) +{ +	struct vfio_dma_region *region = &self->region; +	int rc; + +	region->size = self->mmap_size; + +	rc = __vfio_pci_dma_map(self->device, region); +	ASSERT_EQ(rc, -EOVERFLOW); + +	rc = __vfio_pci_dma_unmap(self->device, region, NULL); +	ASSERT_EQ(rc, -EOVERFLOW); +} +  int main(int argc, char *argv[])  {  	device_bdf = vfio_selftests_get_bdf(&argc, argv);  | 
