summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap2
-rw-r--r--CREDITS4
-rw-r--r--Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml4
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scif.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,pm4125-sdw.yaml4
-rw-r--r--Documentation/devicetree/bindings/spi/spi-cadence.yaml11
-rw-r--r--Documentation/devicetree/bindings/spi/spi-rockchip.yaml1
-rw-r--r--Documentation/devicetree/bindings/ufs/qcom,sm8650-ufshc.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml10
-rw-r--r--Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/nxp,ptn36502.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/onnn,nb7vpq904m.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/parade,ps8830.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml3
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,wcd939x-usbss.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/ti,tusb1046.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/usb-switch-ports.yaml68
-rw-r--r--Documentation/devicetree/bindings/usb/usb-switch.yaml52
-rw-r--r--Documentation/netlink/specs/dpll.yaml2
-rw-r--r--Documentation/networking/ax25.rst7
-rw-r--r--Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst12
-rw-r--r--Documentation/networking/net_failover.rst6
-rw-r--r--Documentation/networking/netconsole.rst3
-rw-r--r--MAINTAINERS35
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi8
-rw-r--r--arch/arm/boot/dts/broadcom/bcm2835-rpi-common.dtsi9
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2712.dtsi2
-rw-r--r--arch/arm64/include/asm/pgtable.h3
-rw-r--r--arch/arm64/mm/copypage.c11
-rw-r--r--arch/csky/abiv2/cacheflush.c2
-rw-r--r--arch/csky/abiv2/inc/abi/cacheflush.h4
-rw-r--r--arch/mips/mti-malta/malta-setup.c4
-rw-r--r--arch/mips/pci/pci-malta.c3
-rw-r--r--arch/riscv/include/asm/asm.h8
-rw-r--r--arch/riscv/include/asm/cpufeature.h2
-rw-r--r--arch/riscv/include/asm/hwprobe.h7
-rw-r--r--arch/riscv/include/asm/pgtable-64.h2
-rw-r--r--arch/riscv/include/asm/pgtable.h2
-rw-r--r--arch/riscv/include/asm/vdso/arch_data.h6
-rw-r--r--arch/riscv/kernel/cpu.c4
-rw-r--r--arch/riscv/kernel/cpufeature.c4
-rw-r--r--arch/riscv/kernel/smp.c24
-rw-r--r--arch/riscv/kernel/sys_hwprobe.c76
-rw-r--r--arch/riscv/kernel/unaligned_access_speed.c9
-rw-r--r--arch/riscv/kernel/vdso/hwprobe.c2
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/configs/debug_defconfig14
-rw-r--r--arch/s390/configs/defconfig14
-rw-r--r--arch/s390/configs/zfcpdump_defconfig1
-rw-r--r--arch/s390/crypto/phmac_s390.c52
-rw-r--r--arch/s390/include/asm/pci.h1
-rw-r--r--arch/s390/mm/dump_pagetables.c21
-rw-r--r--arch/s390/pci/pci_event.c4
-rw-r--r--arch/s390/pci/pci_irq.c9
-rw-r--r--arch/x86/Makefile2
-rw-r--r--arch/x86/events/intel/core.c1
-rw-r--r--arch/x86/events/intel/ds.c3
-rw-r--r--arch/x86/events/intel/uncore.c1
-rw-r--r--arch/x86/include/asm/intel-family.h6
-rw-r--r--arch/x86/include/asm/page_64.h3
-rw-r--r--arch/x86/kernel/cpu/amd.c12
-rw-r--r--arch/x86/kernel/cpu/bugs.c11
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c22
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c11
-rw-r--r--arch/x86/kernel/fpu/core.c3
-rw-r--r--block/blk-crypto.c2
-rw-r--r--block/blk-settings.c10
-rw-r--r--drivers/acpi/acpi_mrrm.c3
-rw-r--r--drivers/acpi/acpi_video.c4
-rw-r--r--drivers/acpi/acpica/tbprint.c6
-rw-r--r--drivers/acpi/button.c4
-rw-r--r--drivers/acpi/fan.h7
-rw-r--r--drivers/acpi/fan_attr.c2
-rw-r--r--drivers/acpi/fan_core.c36
-rw-r--r--drivers/acpi/fan_hwmon.c11
-rw-r--r--drivers/acpi/property.c2
-rw-r--r--drivers/acpi/riscv/rimt.c122
-rw-r--r--drivers/acpi/spcr.c2
-rw-r--r--drivers/android/binder.c38
-rw-r--r--drivers/android/binder/freeze.rs18
-rw-r--r--drivers/android/binder/node.rs2
-rw-r--r--drivers/android/binder/process.rs50
-rw-r--r--drivers/android/binder/transaction.rs6
-rw-r--r--drivers/base/arch_topology.c2
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/devcoredump.c136
-rw-r--r--drivers/base/regmap/regmap-slimbus.c6
-rw-r--r--drivers/bcma/main.c6
-rw-r--r--drivers/block/nbd.c15
-rw-r--r--drivers/block/null_blk/main.c1
-rw-r--r--drivers/bluetooth/bpa10x.c4
-rw-r--r--drivers/bluetooth/btintel_pcie.c11
-rw-r--r--drivers/bluetooth/btmtksdio.c12
-rw-r--r--drivers/bluetooth/btmtkuart.c4
-rw-r--r--drivers/bluetooth/btnxpuart.c4
-rw-r--r--drivers/bluetooth/hci_ag6xx.c2
-rw-r--r--drivers/bluetooth/hci_aml.c2
-rw-r--r--drivers/bluetooth/hci_ath.c2
-rw-r--r--drivers/bluetooth/hci_bcm.c2
-rw-r--r--drivers/bluetooth/hci_h4.c6
-rw-r--r--drivers/bluetooth/hci_intel.c2
-rw-r--r--drivers/bluetooth/hci_ll.c2
-rw-r--r--drivers/bluetooth/hci_mrvl.c6
-rw-r--r--drivers/bluetooth/hci_nokia.c4
-rw-r--r--drivers/bluetooth/hci_qca.c2
-rw-r--r--drivers/bluetooth/hci_uart.h2
-rw-r--r--drivers/comedi/comedi_buf.c2
-rw-r--r--drivers/cpufreq/amd-pstate.c6
-rw-r--r--drivers/cpuidle/governors/menu.c28
-rw-r--r--drivers/crypto/aspeed/aspeed-acry.c2
-rw-r--r--drivers/dma-buf/dma-fence.c2
-rw-r--r--drivers/dpll/dpll_netlink.c36
-rw-r--r--drivers/dpll/zl3073x/dpll.c2
-rw-r--r--drivers/firewire/core-transaction.c2
-rw-r--r--drivers/firewire/init_ohci1394_dma.c10
-rw-r--r--drivers/firmware/arm_ffa/driver.c37
-rw-r--r--drivers/firmware/arm_scmi/common.h32
-rw-r--r--drivers/firmware/arm_scmi/driver.c59
-rw-r--r--drivers/gpio/gpio-104-idio-16.c1
-rw-r--r--drivers/gpio/gpio-idio-16.c5
-rw-r--r--drivers/gpio/gpio-ljca.c14
-rw-r--r--drivers/gpio/gpio-pci-idio-16.c1
-rw-r--r--drivers/gpio/gpio-regmap.c26
-rw-r--r--drivers/gpio/gpiolib-acpi-core.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c3
-rw-r--r--drivers/gpu/drm/amd/include/amd_cper.h2
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h8
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml2
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c8
-rw-r--r--drivers/gpu/drm/drm_panic.c60
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c55
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c25
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c18
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c3
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c18
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c10
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h11
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c14
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c2
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c7
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c1
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c6
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c3
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c19
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c4
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c5
-rw-r--r--drivers/gpu/drm/xe/xe_validation.h8
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c96
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h10
-rw-r--r--drivers/hwmon/cgbc-hwmon.c3
-rw-r--r--drivers/hwmon/gpd-fan.c10
-rw-r--r--drivers/hwmon/pmbus/isl68137.c3
-rw-r--r--drivers/hwmon/pmbus/max34440.c12
-rw-r--r--drivers/hwmon/sht3x.c27
-rw-r--r--drivers/misc/amd-sbi/Kconfig2
-rw-r--r--drivers/misc/fastrpc.c2
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/mei_lb.c3
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/mei/pci-txe.c14
-rw-r--r--drivers/misc/vmw_balloon.c8
-rw-r--r--drivers/most/most_usb.c13
-rw-r--r--drivers/net/bonding/bond_main.c47
-rw-r--r--drivers/net/can/bxcan.c2
-rw-r--r--drivers/net/can/dev/netlink.c6
-rw-r--r--drivers/net/can/esd/esdacc.c2
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-tx.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c7
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c25
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h2
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c6
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c24
-rw-r--r--drivers/net/ethernet/sfc/mae.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c63
-rw-r--r--drivers/net/mctp/mctp-usb.c8
-rw-r--r--drivers/net/netconsole.c21
-rw-r--r--drivers/net/ovpn/tcp.c26
-rw-r--r--drivers/net/phy/dp83867.c6
-rw-r--r--drivers/net/phy/dp83869.c4
-rw-r--r--drivers/net/phy/micrel.c4
-rw-r--r--drivers/net/phy/realtek/realtek_main.c16
-rw-r--r--drivers/net/usb/asix_devices.c12
-rw-r--r--drivers/net/usb/rtl8150.c11
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/virtio_net.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c54
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c10
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c34
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c5
-rw-r--r--drivers/nvme/host/pci.c13
-rw-r--r--drivers/nvme/target/auth.c5
-rw-r--r--drivers/nvmem/rcar-efuse.c1
-rw-r--r--drivers/of/irq.c44
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c28
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c100
-rw-r--r--drivers/pci/pcie/aspm.c34
-rw-r--r--drivers/pci/setup-bus.c2
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c1
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c12
-rw-r--r--drivers/ptp/ptp_ocp.c2
-rw-r--r--drivers/regulator/bd718x7-regulator.c2
-rw-r--r--drivers/scsi/hosts.c5
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c8
-rw-r--r--drivers/scsi/scsi_error.c4
-rw-r--r--drivers/scsi/storvsc_drv.c96
-rw-r--r--drivers/spi/spi-airoha-snfi.c128
-rw-r--r--drivers/spi/spi-amlogic-spifc-a4.c4
-rw-r--r--drivers/spi/spi-cadence-quadspi.c5
-rw-r--r--drivers/spi/spi-dw-mmio.c4
-rw-r--r--drivers/spi/spi-intel-pci.c3
-rw-r--r--drivers/spi/spi-intel.c6
-rw-r--r--drivers/spi/spi-nxp-fspi.c32
-rw-r--r--drivers/spi/spi-rockchip-sfc.c12
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.c12
-rw-r--r--drivers/staging/gpib/fmh_gpib/fmh_gpib.c5
-rw-r--r--drivers/staging/gpib/ni_usb/ni_usb_gpib.c13
-rw-r--r--drivers/tee/qcomtee/Kconfig1
-rw-r--r--drivers/tee/qcomtee/call.c2
-rw-r--r--drivers/tee/qcomtee/core.c2
-rw-r--r--drivers/tty/serial/8250/8250_dw.c4
-rw-r--r--drivers/tty/serial/8250/8250_exar.c11
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c6
-rw-r--r--drivers/tty/serial/sc16is7xx.c7
-rw-r--r--drivers/tty/serial/sh-sci.c14
-rw-r--r--drivers/ufs/core/ufshcd.c28
-rw-r--r--drivers/usb/core/quirks.c2
-rw-r--r--drivers/usb/dwc3/dwc3-generic-plat.c3
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c2
-rw-r--r--drivers/usb/host/xhci-dbgcap.c15
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/misc/Kconfig1
-rw-r--r--drivers/usb/serial/option.c10
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c4
-rw-r--r--drivers/vfio/vfio_iommu_type1.c173
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c8
-rw-r--r--drivers/video/fbdev/core/bitblit.c16
-rw-r--r--drivers/video/fbdev/core/fbcon.c19
-rw-r--r--drivers/video/fbdev/core/fbmem.c1
-rw-r--r--drivers/video/fbdev/pvr2fb.c2
-rw-r--r--drivers/video/fbdev/valkyriefb.c2
-rw-r--r--fs/9p/vfs_dentry.c10
-rw-r--r--fs/9p/vfs_inode.c8
-rw-r--r--fs/9p/vfs_inode_dotl.c8
-rw-r--r--fs/btrfs/delayed-inode.c2
-rw-r--r--fs/btrfs/delayed-inode.h7
-rw-r--r--fs/btrfs/ref-verify.c2
-rw-r--r--fs/btrfs/send.c56
-rw-r--r--fs/btrfs/super.c8
-rw-r--r--fs/erofs/zmap.c59
-rw-r--r--fs/hugetlbfs/inode.c9
-rw-r--r--fs/nfsd/nfs4proc.c21
-rw-r--r--fs/nfsd/nfs4state.c1
-rw-r--r--fs/nfsd/nfs4xdr.c21
-rw-r--r--fs/nfsd/nfsd.h3
-rw-r--r--fs/nfsd/xdr4.h1
-rw-r--r--fs/ocfs2/move_extents.c5
-rw-r--r--fs/resctrl/monitor.c16
-rw-r--r--fs/smb/client/cifsfs.c2
-rw-r--r--fs/smb/client/cifsglob.h4
-rw-r--r--fs/smb/client/cifsproto.h3
-rw-r--r--fs/smb/client/cifssmb.c8
-rw-r--r--fs/smb/client/connect.c46
-rw-r--r--fs/smb/client/dfs_cache.c55
-rw-r--r--fs/smb/client/inode.c5
-rw-r--r--fs/smb/client/smb2ops.c7
-rw-r--r--fs/smb/client/smb2proto.h6
-rw-r--r--fs/smb/client/smb2transport.c18
-rw-r--r--fs/smb/client/smbdirect.c103
-rw-r--r--fs/smb/client/trace.c1
-rw-r--r--fs/smb/common/smbdirect/smbdirect_socket.h13
-rw-r--r--fs/smb/server/transport_ipc.c8
-rw-r--r--fs/smb/server/transport_rdma.c391
-rw-r--r--fs/sysfs/group.c26
-rw-r--r--fs/xfs/Kconfig11
-rw-r--r--fs/xfs/libxfs/xfs_rtgroup.h6
-rw-r--r--fs/xfs/scrub/nlinks.c34
-rw-r--r--fs/xfs/xfs_buf.c2
-rw-r--r--fs/xfs/xfs_buf.h1
-rw-r--r--fs/xfs/xfs_mount.h1
-rw-r--r--fs/xfs/xfs_super.c53
-rw-r--r--fs/xfs/xfs_zone_alloc.c156
-rw-r--r--fs/xfs/xfs_zone_gc.c108
-rw-r--r--fs/xfs/xfs_zone_priv.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/linux/arm_ffa.h21
-rw-r--r--include/linux/blk_types.h11
-rw-r--r--include/linux/fbcon.h2
-rw-r--r--include/linux/gpio/regmap.h5
-rw-r--r--include/linux/hung_task.h8
-rw-r--r--include/linux/mlx5/mlx5_ifc.h4
-rw-r--r--include/linux/pm_runtime.h8
-rw-r--r--include/linux/regmap.h2
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/linux/virtio_net.h4
-rw-r--r--include/net/bluetooth/hci.h1
-rw-r--r--include/net/bluetooth/hci_core.h1
-rw-r--r--include/net/bluetooth/l2cap.h4
-rw-r--r--include/net/bluetooth/mgmt.h2
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/net/tls.h25
-rw-r--r--include/scsi/scsi_device.h10
-rw-r--r--include/trace/events/tcp.h9
-rw-r--r--include/uapi/drm/xe_drm.h15
-rw-r--r--include/uapi/linux/fb.h2
-rw-r--r--io_uring/fdinfo.c8
-rw-r--r--io_uring/filetable.c2
-rw-r--r--io_uring/io_uring.c2
-rw-r--r--io_uring/kbuf.c33
-rw-r--r--io_uring/net.c2
-rw-r--r--io_uring/sqpoll.c65
-rw-r--r--io_uring/sqpoll.h1
-rw-r--r--io_uring/waitid.c2
-rw-r--r--kernel/dma/debug.c5
-rw-r--r--kernel/irq/chip.c2
-rw-r--r--kernel/irq/manage.c4
-rw-r--r--kernel/power/hibernate.c4
-rw-r--r--kernel/power/main.c22
-rw-r--r--kernel/power/process.c1
-rw-r--r--kernel/power/suspend.c1
-rw-r--r--kernel/sched/ext.c126
-rw-r--r--kernel/sched/fair.c12
-rw-r--r--kernel/sched/sched.h3
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/trace/rv/monitors/pagefault/Kconfig1
-rw-r--r--kernel/trace/rv/rv.c12
-rw-r--r--lib/Kconfig.kmsan2
-rw-r--r--lib/crypto/Kconfig2
-rw-r--r--lib/kunit/kunit-test.c2
-rw-r--r--lib/kunit/test.c3
-rw-r--r--mm/damon/core.c7
-rw-r--r--mm/damon/sysfs.c7
-rw-r--r--mm/huge_memory.c3
-rw-r--r--mm/hugetlb.c5
-rw-r--r--mm/migrate.c3
-rw-r--r--mm/mremap.c15
-rw-r--r--mm/page_owner.c3
-rw-r--r--mm/slub.c31
-rw-r--r--net/batman-adv/originator.c14
-rw-r--r--net/bluetooth/hci_conn.c7
-rw-r--r--net/bluetooth/hci_event.c11
-rw-r--r--net/bluetooth/hci_sync.c21
-rw-r--r--net/bluetooth/iso.c10
-rw-r--r--net/bluetooth/l2cap_core.c4
-rw-r--r--net/bluetooth/mgmt.c26
-rw-r--r--net/bluetooth/rfcomm/tty.c26
-rw-r--r--net/core/datagram.c44
-rw-r--r--net/core/devmem.c27
-rw-r--r--net/core/gro.c10
-rw-r--r--net/core/gro_cells.c5
-rw-r--r--net/core/rtnetlink.c3
-rw-r--r--net/hsr/hsr_netlink.c8
-rw-r--r--net/ipv4/tcp_input.c21
-rw-r--r--net/mac80211/cfg.c3
-rw-r--r--net/mac80211/key.c11
-rw-r--r--net/mptcp/mib.c1
-rw-r--r--net/mptcp/mib.h1
-rw-r--r--net/mptcp/pm_kernel.c6
-rw-r--r--net/mptcp/protocol.c83
-rw-r--r--net/mptcp/protocol.h2
-rw-r--r--net/netfilter/nft_connlimit.c2
-rw-r--r--net/netfilter/nft_ct.c30
-rw-r--r--net/sctp/input.c2
-rw-r--r--net/sctp/inqueue.c13
-rw-r--r--net/smc/smc_inet.c13
-rw-r--r--net/tls/tls_device.c4
-rw-r--r--net/vmw_vsock/af_vsock.c38
-rw-r--r--net/wireless/nl80211.c3
-rw-r--r--net/xfrm/espintcp.c6
-rw-r--r--rust/kernel/auxiliary.rs8
-rw-r--r--rust/kernel/device.rs4
-rw-r--r--scripts/kconfig/mconf.c3
-rw-r--r--scripts/kconfig/nconf.c3
-rwxr-xr-xscripts/package/install-extmod-build2
-rw-r--r--sound/hda/codecs/realtek/alc269.c14
-rw-r--r--sound/soc/amd/acp/amd-acp70-acpi-match.c157
-rw-r--r--sound/soc/codecs/cs-amp-lib-test.c1
-rw-r--r--sound/soc/codecs/cs530x.c2
-rw-r--r--sound/soc/codecs/max98090.c6
-rw-r--r--sound/soc/codecs/rt721-sdca.c4
-rw-r--r--sound/soc/codecs/rt721-sdca.h1
-rw-r--r--sound/soc/fsl/fsl_micfil.c4
-rw-r--r--sound/soc/fsl/fsl_sai.c11
-rw-r--r--sound/soc/intel/avs/pcm.c3
-rw-r--r--sound/soc/intel/avs/probes.c18
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-ptl-match.c52
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-afe-pcm.c1
-rw-r--r--sound/soc/mediatek/mt8365/mt8365-afe-pcm.c1
-rw-r--r--sound/soc/qcom/qdsp6/q6asm.c2
-rw-r--r--sound/soc/renesas/rz-ssi.c25
-rw-r--r--sound/soc/sdw_utils/soc_sdw_utils.c1
-rw-r--r--sound/usb/mixer_s1810c.c35
-rw-r--r--tools/net/ynl/lib/ynl-priv.h4
-rwxr-xr-xtools/net/ynl/pyynl/ethtool.py3
-rw-r--r--tools/objtool/check.c10
-rw-r--r--tools/testing/selftests/cachestat/.gitignore1
-rw-r--r--tools/testing/selftests/cachestat/test_cachestat.c4
-rwxr-xr-xtools/testing/selftests/net/bareudp.sh2
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh18
-rw-r--r--tools/testing/selftests/net/sctp_hello.c17
-rwxr-xr-xtools/testing/selftests/net/sctp_vrf.sh73
-rw-r--r--tools/testing/selftests/vfio/lib/include/vfio_util.h27
-rw-r--r--tools/testing/selftests/vfio/lib/vfio_pci_device.c104
-rw-r--r--tools/testing/selftests/vfio/vfio_dma_mapping_test.c95
480 files changed, 4544 insertions, 2378 deletions
diff --git a/.mailmap b/.mailmap
index c371ba3fb845..369cfe467932 100644
--- a/.mailmap
+++ b/.mailmap
@@ -27,6 +27,7 @@ Alan Cox <alan@lxorguk.ukuu.org.uk>
Alan Cox <root@hraefn.swansea.linux.org.uk>
Aleksandar Markovic <aleksandar.markovic@mips.com> <aleksandar.markovic@imgtec.com>
Aleksey Gorelov <aleksey_gorelov@phoenix.com>
+Alex Williamson <alex@shazbot.org> <alex.williamson@redhat.com>
Alexander Lobakin <alobakin@pm.me> <alobakin@dlink.ru>
Alexander Lobakin <alobakin@pm.me> <alobakin@marvell.com>
Alexander Lobakin <alobakin@pm.me> <bloodyreaper@yandex.ru>
@@ -643,6 +644,7 @@ Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com>
Quentin Monnet <qmo@kernel.org> <quentin.monnet@netronome.com>
Quentin Monnet <qmo@kernel.org> <quentin@isovalent.com>
Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
+Rae Moar <raemoar63@gmail.com> <rmoar@google.com>
Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
Rajeev Nandan <quic_rajeevny@quicinc.com> <rajeevny@codeaurora.org>
Rajendra Nayak <quic_rjendra@quicinc.com> <rnayak@codeaurora.org>
diff --git a/CREDITS b/CREDITS
index 903ea238e64f..fa5397f4ebcd 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2036,6 +2036,10 @@ S: Botanicka' 68a
S: 602 00 Brno
S: Czech Republic
+N: Karsten Keil
+E: isdn@linux-pingi.de
+D: ISDN subsystem maintainer
+
N: Jakob Kemi
E: jakob.kemi@telia.com
D: V4L W9966 Webcam driver
diff --git a/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml b/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml
index 5caa3779660d..5491d0775ede 100644
--- a/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml
+++ b/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml
@@ -180,9 +180,9 @@ allOf:
then:
properties:
reg:
- minItems: 2
+ maxItems: 2
reg-names:
- minItems: 2
+ maxItems: 2
else:
properties:
reg:
diff --git a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml
index 6a47e08e0e97..f9cffbb2df07 100644
--- a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml
@@ -142,7 +142,9 @@ allOf:
required:
- orientation-switch
then:
- $ref: /schemas/usb/usb-switch.yaml#
+ allOf:
+ - $ref: /schemas/usb/usb-switch.yaml#
+ - $ref: /schemas/usb/usb-switch-ports.yaml#
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
index a58370a6a5d3..fba7b2549dde 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
@@ -24,6 +24,10 @@ properties:
- enum:
- qcom,qcs8300-qmp-ufs-phy
- const: qcom,sa8775p-qmp-ufs-phy
+ - items:
+ - enum:
+ - qcom,kaanapali-qmp-ufs-phy
+ - const: qcom,sm8750-qmp-ufs-phy
- enum:
- qcom,msm8996-qmp-ufs-phy
- qcom,msm8998-qmp-ufs-phy
diff --git a/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml b/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
index e906403208c0..ea1135c91fb7 100644
--- a/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
@@ -125,7 +125,9 @@ allOf:
contains:
const: google,gs101-usb31drd-phy
then:
- $ref: /schemas/usb/usb-switch.yaml#
+ allOf:
+ - $ref: /schemas/usb/usb-switch.yaml#
+ - $ref: /schemas/usb/usb-switch-ports.yaml#
properties:
clocks:
diff --git a/Documentation/devicetree/bindings/serial/renesas,scif.yaml b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
index e925cd4c3ac8..72483bc3274d 100644
--- a/Documentation/devicetree/bindings/serial/renesas,scif.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
@@ -197,6 +197,7 @@ allOf:
- renesas,rcar-gen2-scif
- renesas,rcar-gen3-scif
- renesas,rcar-gen4-scif
+ - renesas,rcar-gen5-scif
then:
properties:
interrupts:
diff --git a/Documentation/devicetree/bindings/sound/qcom,pm4125-sdw.yaml b/Documentation/devicetree/bindings/sound/qcom,pm4125-sdw.yaml
index 23624f32ac30..769e4cb5b99b 100644
--- a/Documentation/devicetree/bindings/sound/qcom,pm4125-sdw.yaml
+++ b/Documentation/devicetree/bindings/sound/qcom,pm4125-sdw.yaml
@@ -32,7 +32,7 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 2
- maxItems: 2
+ maxItems: 4
items:
enum: [1, 2, 3, 4]
@@ -48,7 +48,7 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 2
- maxItems: 2
+ maxItems: 5
items:
enum: [1, 2, 3, 4, 5]
diff --git a/Documentation/devicetree/bindings/spi/spi-cadence.yaml b/Documentation/devicetree/bindings/spi/spi-cadence.yaml
index 8de96abe9da1..27414b78d61d 100644
--- a/Documentation/devicetree/bindings/spi/spi-cadence.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-cadence.yaml
@@ -14,9 +14,14 @@ allOf:
properties:
compatible:
- enum:
- - cdns,spi-r1p6
- - xlnx,zynq-spi-r1p6
+ oneOf:
+ - enum:
+ - xlnx,zynq-spi-r1p6
+ - items:
+ - enum:
+ - xlnx,zynqmp-spi-r1p6
+ - xlnx,versal-net-spi-r1p6
+ - const: cdns,spi-r1p6
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
index 748faf7f7081..ce6762c92fda 100644
--- a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
@@ -34,6 +34,7 @@ properties:
- rockchip,rk3328-spi
- rockchip,rk3368-spi
- rockchip,rk3399-spi
+ - rockchip,rk3506-spi
- rockchip,rk3528-spi
- rockchip,rk3562-spi
- rockchip,rk3568-spi
diff --git a/Documentation/devicetree/bindings/ufs/qcom,sm8650-ufshc.yaml b/Documentation/devicetree/bindings/ufs/qcom,sm8650-ufshc.yaml
index aaa0bbb5bfe1..cea84ab2204f 100644
--- a/Documentation/devicetree/bindings/ufs/qcom,sm8650-ufshc.yaml
+++ b/Documentation/devicetree/bindings/ufs/qcom,sm8650-ufshc.yaml
@@ -15,6 +15,7 @@ select:
compatible:
contains:
enum:
+ - qcom,kaanapali-ufshc
- qcom,sm8650-ufshc
- qcom,sm8750-ufshc
required:
@@ -24,6 +25,7 @@ properties:
compatible:
items:
- enum:
+ - qcom,kaanapali-ufshc
- qcom,sm8650-ufshc
- qcom,sm8750-ufshc
- const: qcom,ufshc
diff --git a/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml b/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml
index e3a7df91f7f1..89b1fb90aeeb 100644
--- a/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml
+++ b/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml
@@ -76,6 +76,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
+ - $ref: usb-switch-ports.yaml#
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml b/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml
index baf130669c38..73e7a60a0060 100644
--- a/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml
@@ -89,13 +89,21 @@ required:
- reg
- "#address-cells"
- "#size-cells"
- - dma-ranges
- ranges
- clocks
- clock-names
- interrupts
- power-domains
+allOf:
+ - if:
+ properties:
+ compatible:
+ const: fsl,imx8mp-dwc3
+ then:
+ required:
+ - dma-ranges
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml b/Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml
index e588514fab2d..793662f6f3bf 100644
--- a/Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml
+++ b/Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml
@@ -52,6 +52,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
+ - $ref: usb-switch-ports.yaml#
- if:
required:
- mode-switch
diff --git a/Documentation/devicetree/bindings/usb/nxp,ptn36502.yaml b/Documentation/devicetree/bindings/usb/nxp,ptn36502.yaml
index d805dde80796..4d2fcaa71870 100644
--- a/Documentation/devicetree/bindings/usb/nxp,ptn36502.yaml
+++ b/Documentation/devicetree/bindings/usb/nxp,ptn36502.yaml
@@ -46,6 +46,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
+ - $ref: usb-switch-ports.yaml#
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/usb/onnn,nb7vpq904m.yaml b/Documentation/devicetree/bindings/usb/onnn,nb7vpq904m.yaml
index 589914d22bf2..25fab5fdc2cd 100644
--- a/Documentation/devicetree/bindings/usb/onnn,nb7vpq904m.yaml
+++ b/Documentation/devicetree/bindings/usb/onnn,nb7vpq904m.yaml
@@ -91,6 +91,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
+ - $ref: usb-switch-ports.yaml#
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/usb/parade,ps8830.yaml b/Documentation/devicetree/bindings/usb/parade,ps8830.yaml
index aeb33667818e..eaeab1c01a59 100644
--- a/Documentation/devicetree/bindings/usb/parade,ps8830.yaml
+++ b/Documentation/devicetree/bindings/usb/parade,ps8830.yaml
@@ -81,6 +81,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
+ - $ref: usb-switch-ports.yaml#
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml
index dfd084ed9024..d49a58d5478f 100644
--- a/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml
@@ -68,6 +68,7 @@ properties:
- qcom,sm8550-dwc3
- qcom,sm8650-dwc3
- qcom,x1e80100-dwc3
+ - qcom,x1e80100-dwc3-mp
- const: qcom,snps-dwc3
reg:
@@ -460,8 +461,10 @@ allOf:
then:
properties:
interrupts:
+ minItems: 4
maxItems: 5
interrupt-names:
+ minItems: 4
items:
- const: dwc_usb3
- const: pwr_event
diff --git a/Documentation/devicetree/bindings/usb/qcom,wcd939x-usbss.yaml b/Documentation/devicetree/bindings/usb/qcom,wcd939x-usbss.yaml
index 96346723f3e9..96dcec9b7620 100644
--- a/Documentation/devicetree/bindings/usb/qcom,wcd939x-usbss.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,wcd939x-usbss.yaml
@@ -60,6 +60,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
+ - $ref: usb-switch-ports.yaml#
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/usb/ti,tusb1046.yaml b/Documentation/devicetree/bindings/usb/ti,tusb1046.yaml
index f713cac4a8ac..e1501ea6b50b 100644
--- a/Documentation/devicetree/bindings/usb/ti,tusb1046.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,tusb1046.yaml
@@ -11,6 +11,7 @@ maintainers:
allOf:
- $ref: usb-switch.yaml#
+ - $ref: usb-switch-ports.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/usb/usb-switch-ports.yaml b/Documentation/devicetree/bindings/usb/usb-switch-ports.yaml
new file mode 100644
index 000000000000..6bf0c97e30ae
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/usb-switch-ports.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/usb-switch-ports.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: USB Orientation and Mode Switches Ports Graph Properties
+
+maintainers:
+ - Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+description:
+ Ports Graph properties for devices handling USB mode and orientation switching.
+
+properties:
+ port:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ description:
+ A port node to link the device to a TypeC controller for the purpose of
+ handling altmode muxing and orientation switching.
+
+ properties:
+ endpoint:
+ $ref: /schemas/graph.yaml#/$defs/endpoint-base
+ unevaluatedProperties: false
+ properties:
+ data-lanes:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 8
+ uniqueItems: true
+ items:
+ maximum: 8
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Super Speed (SS) Output endpoint to the Type-C connector
+
+ port@1:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ description:
+ Super Speed (SS) Input endpoint from the Super-Speed PHY
+ unevaluatedProperties: false
+
+ properties:
+ endpoint:
+ $ref: /schemas/graph.yaml#/$defs/endpoint-base
+ unevaluatedProperties: false
+ properties:
+ data-lanes:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 8
+ uniqueItems: true
+ items:
+ maximum: 8
+
+oneOf:
+ - required:
+ - port
+ - required:
+ - ports
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/usb/usb-switch.yaml b/Documentation/devicetree/bindings/usb/usb-switch.yaml
index 896201912630..f77731493dc4 100644
--- a/Documentation/devicetree/bindings/usb/usb-switch.yaml
+++ b/Documentation/devicetree/bindings/usb/usb-switch.yaml
@@ -25,56 +25,4 @@ properties:
description: Possible handler of SuperSpeed signals retiming
type: boolean
- port:
- $ref: /schemas/graph.yaml#/$defs/port-base
- description:
- A port node to link the device to a TypeC controller for the purpose of
- handling altmode muxing and orientation switching.
-
- properties:
- endpoint:
- $ref: /schemas/graph.yaml#/$defs/endpoint-base
- unevaluatedProperties: false
- properties:
- data-lanes:
- $ref: /schemas/types.yaml#/definitions/uint32-array
- minItems: 1
- maxItems: 8
- uniqueItems: true
- items:
- maximum: 8
-
- ports:
- $ref: /schemas/graph.yaml#/properties/ports
- properties:
- port@0:
- $ref: /schemas/graph.yaml#/properties/port
- description:
- Super Speed (SS) Output endpoint to the Type-C connector
-
- port@1:
- $ref: /schemas/graph.yaml#/$defs/port-base
- description:
- Super Speed (SS) Input endpoint from the Super-Speed PHY
- unevaluatedProperties: false
-
- properties:
- endpoint:
- $ref: /schemas/graph.yaml#/$defs/endpoint-base
- unevaluatedProperties: false
- properties:
- data-lanes:
- $ref: /schemas/types.yaml#/definitions/uint32-array
- minItems: 1
- maxItems: 8
- uniqueItems: true
- items:
- maximum: 8
-
-oneOf:
- - required:
- - port
- - required:
- - ports
-
additionalProperties: true
diff --git a/Documentation/netlink/specs/dpll.yaml b/Documentation/netlink/specs/dpll.yaml
index cafb4ec20447..80728f6f9bc8 100644
--- a/Documentation/netlink/specs/dpll.yaml
+++ b/Documentation/netlink/specs/dpll.yaml
@@ -605,6 +605,8 @@ operations:
reply: &pin-attrs
attributes:
- id
+ - module-name
+ - clock-id
- board-label
- panel-label
- package-label
diff --git a/Documentation/networking/ax25.rst b/Documentation/networking/ax25.rst
index 605e72c6c877..89c79dd6c6f9 100644
--- a/Documentation/networking/ax25.rst
+++ b/Documentation/networking/ax25.rst
@@ -11,6 +11,7 @@ found on https://linux-ax25.in-berlin.de.
There is a mailing list for discussing Linux amateur radio matters
called linux-hams@vger.kernel.org. To subscribe to it, send a message to
-majordomo@vger.kernel.org with the words "subscribe linux-hams" in the body
-of the message, the subject field is ignored. You don't need to be
-subscribed to post but of course that means you might miss an answer.
+linux-hams+subscribe@vger.kernel.org or use the web interface at
+https://vger.kernel.org. The subject and body of the message are
+ignored. You don't need to be subscribed to post but of course that
+means you might miss an answer.
diff --git a/Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst b/Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
index 289c146a8291..6877a3260582 100644
--- a/Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
+++ b/Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
@@ -137,16 +137,20 @@ d. Checksum offload header v5
Checksum offload header fields are in big endian format.
+Packet format::
+
Bit 0 - 6 7 8-15 16-31
Function Header Type Next Header Checksum Valid Reserved
Header Type is to indicate the type of header, this usually is set to CHECKSUM
Header types
-= ==========================================
+
+= ===============
0 Reserved
1 Reserved
2 checksum header
+= ===============
Checksum Valid is to indicate whether the header checksum is valid. Value of 1
implies that checksum is calculated on this packet and is valid, value of 0
@@ -183,9 +187,11 @@ rmnet in a single linear skb. rmnet will process the individual
packets and either ACK the MAP command or deliver the IP packet to the
network stack as needed
-MAP header|IP Packet|Optional padding|MAP header|IP Packet|Optional padding....
+Packet format::
+
+ MAP header|IP Packet|Optional padding|MAP header|IP Packet|Optional padding....
-MAP header|IP Packet|Optional padding|MAP header|Command Packet|Optional pad...
+ MAP header|IP Packet|Optional padding|MAP header|Command Packet|Optional pad...
3. Userspace configuration
==========================
diff --git a/Documentation/networking/net_failover.rst b/Documentation/networking/net_failover.rst
index f4e1b4e07adc..2f776e90d318 100644
--- a/Documentation/networking/net_failover.rst
+++ b/Documentation/networking/net_failover.rst
@@ -96,9 +96,8 @@ needed to these network configuration daemons to make sure that an IP is
received only on the 'failover' device.
Below is the patch snippet used with 'cloud-ifupdown-helper' script found on
-Debian cloud images:
+Debian cloud images::
-::
@@ -27,6 +27,8 @@ do_setup() {
local working="$cfgdir/.$INTERFACE"
local final="$cfgdir/$INTERFACE"
@@ -172,9 +171,8 @@ appropriate FDB entry is added.
The following script is executed on the destination hypervisor once migration
completes, and it reattaches the VF to the VM and brings down the virtio-net
-interface.
+interface::
-::
# reattach-vf.sh
#!/bin/bash
diff --git a/Documentation/networking/netconsole.rst b/Documentation/networking/netconsole.rst
index 59cb9982afe6..2555e75e5cc1 100644
--- a/Documentation/networking/netconsole.rst
+++ b/Documentation/networking/netconsole.rst
@@ -19,9 +19,6 @@ Userdata append support by Matthew Wood <thepacketgeek@gmail.com>, Jan 22 2024
Sysdata append support by Breno Leitao <leitao@debian.org>, Jan 15 2025
-Please send bug reports to Matt Mackall <mpm@selenic.com>
-Satyam Sharma <satyam.sharma@gmail.com>, and Cong Wang <xiyou.wangcong@gmail.com>
-
Introduction:
=============
diff --git a/MAINTAINERS b/MAINTAINERS
index 545a4776795e..46bd8e033042 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1997,6 +1997,10 @@ F: include/uapi/linux/if_arcnet.h
ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS)
M: Arnd Bergmann <arnd@arndb.de>
+M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Alexandre Belloni <alexandre.belloni@bootlin.com>
+M: Linus Walleij <linus.walleij@linaro.org>
+R: Drew Fustini <fustini@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: soc@lists.linux.dev
S: Maintained
@@ -3841,6 +3845,7 @@ F: drivers/hwmon/asus-ec-sensors.c
ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS
M: Corentin Chary <corentin.chary@gmail.com>
M: Luke D. Jones <luke@ljones.dev>
+M: Denis Benato <benato.denis96@gmail.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
W: https://asus-linux.org/
@@ -13111,6 +13116,15 @@ F: include/uapi/linux/io_uring.h
F: include/uapi/linux/io_uring/
F: io_uring/
+IO_URING ZCRX
+M: Pavel Begunkov <asml.silence@gmail.com>
+L: io-uring@vger.kernel.org
+L: netdev@vger.kernel.org
+T: git https://github.com/isilence/linux.git zcrx/for-next
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux.git
+S: Maintained
+F: io_uring/zcrx.*
+
IPMI SUBSYSTEM
M: Corey Minyard <corey@minyard.net>
L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
@@ -13246,10 +13260,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git mast
F: drivers/infiniband/ulp/isert
ISDN/CMTP OVER BLUETOOTH
-M: Karsten Keil <isdn@linux-pingi.de>
-L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
L: netdev@vger.kernel.org
-S: Odd Fixes
+S: Orphan
W: http://www.isdn4linux.de
F: Documentation/isdn/
F: drivers/isdn/capi/
@@ -13258,10 +13270,8 @@ F: include/uapi/linux/isdn/
F: net/bluetooth/cmtp/
ISDN/mISDN SUBSYSTEM
-M: Karsten Keil <isdn@linux-pingi.de>
-L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
L: netdev@vger.kernel.org
-S: Maintained
+S: Orphan
W: http://www.isdn4linux.de
F: drivers/isdn/Kconfig
F: drivers/isdn/Makefile
@@ -13415,9 +13425,12 @@ F: mm/kasan/
F: scripts/Makefile.kasan
KCONFIG
+M: Nathan Chancellor <nathan@kernel.org>
+M: Nicolas Schier <nsc@kernel.org>
L: linux-kbuild@vger.kernel.org
-S: Orphan
+S: Odd Fixes
Q: https://patchwork.kernel.org/project/linux-kbuild/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/kbuild/linux.git
F: Documentation/kbuild/kconfig*
F: scripts/Kconfig.include
F: scripts/kconfig/
@@ -13602,7 +13615,7 @@ F: fs/smb/server/
KERNEL UNIT TESTING FRAMEWORK (KUnit)
M: Brendan Higgins <brendan.higgins@linux.dev>
M: David Gow <davidgow@google.com>
-R: Rae Moar <rmoar@google.com>
+R: Rae Moar <raemoar63@gmail.com>
L: linux-kselftest@vger.kernel.org
L: kunit-dev@googlegroups.com
S: Maintained
@@ -14394,6 +14407,7 @@ F: tools/memory-model/
LINUX-NEXT TREE
M: Stephen Rothwell <sfr@canb.auug.org.au>
+M: Mark Brown <broonie@kernel.org>
L: linux-next@vger.kernel.org
S: Supported
B: mailto:linux-next@vger.kernel.org and the appropriate development tree
@@ -21317,6 +21331,7 @@ F: drivers/media/platform/qcom/venus/
QUALCOMM WCN36XX WIRELESS DRIVER
M: Loic Poulain <loic.poulain@oss.qualcomm.com>
L: wcn36xx@lists.infradead.org
+L: linux-wireless@vger.kernel.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx
F: drivers/net/wireless/ath/wcn36xx/
@@ -26885,7 +26900,7 @@ S: Maintained
F: drivers/vfio/cdx/*
VFIO DRIVER
-M: Alex Williamson <alex.williamson@redhat.com>
+M: Alex Williamson <alex@shazbot.org>
L: kvm@vger.kernel.org
S: Maintained
T: git https://github.com/awilliam/linux-vfio.git
@@ -27048,7 +27063,7 @@ T: git git://linuxtv.org/media.git
F: drivers/media/test-drivers/vimc/*
VIRT LIB
-M: Alex Williamson <alex.williamson@redhat.com>
+M: Alex Williamson <alex@shazbot.org>
M: Paolo Bonzini <pbonzini@redhat.com>
L: kvm@vger.kernel.org
S: Supported
diff --git a/Makefile b/Makefile
index d14824792227..088565edc911 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 18
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
NAME = Baby Opossum Posse
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi b/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi
index c78ed064d166..1eb6406449d1 100644
--- a/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi
@@ -77,6 +77,14 @@
/delete-property/ pinctrl-0;
};
+&pm {
+ clocks = <&firmware_clocks 5>,
+ <&clocks BCM2835_CLOCK_PERI_IMAGE>,
+ <&clocks BCM2835_CLOCK_H264>,
+ <&clocks BCM2835_CLOCK_ISP>;
+ clock-names = "v3d", "peri_image", "h264", "isp";
+};
+
&rmem {
/*
* RPi4's co-processor will copy the board's bootloader configuration
diff --git a/arch/arm/boot/dts/broadcom/bcm2835-rpi-common.dtsi b/arch/arm/boot/dts/broadcom/bcm2835-rpi-common.dtsi
index 8b3c21d9f333..fa9d784c88b6 100644
--- a/arch/arm/boot/dts/broadcom/bcm2835-rpi-common.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm2835-rpi-common.dtsi
@@ -13,7 +13,16 @@
clock-names = "pixel", "hdmi";
};
+&pm {
+ clocks = <&firmware_clocks 5>,
+ <&clocks BCM2835_CLOCK_PERI_IMAGE>,
+ <&clocks BCM2835_CLOCK_H264>,
+ <&clocks BCM2835_CLOCK_ISP>;
+ clock-names = "v3d", "peri_image", "h264", "isp";
+};
+
&v3d {
+ clocks = <&firmware_clocks 5>;
power-domains = <&power RPI_POWER_DOMAIN_V3D>;
};
diff --git a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
index e77a66adc22a..205b87f557d6 100644
--- a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
@@ -326,6 +326,8 @@
<0x7fffe000 0x2000>;
interrupt-controller;
#address-cells = <0>;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) |
+ IRQ_TYPE_LEVEL_HIGH)>;
#interrupt-cells = <3>;
};
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index aa89c2e67ebc..0944e296dd4a 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -293,7 +293,8 @@ static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
static inline pte_t pte_mkwrite_novma(pte_t pte)
{
pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
- pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
+ if (pte_sw_dirty(pte))
+ pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
return pte;
}
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index a86c897017df..cd5912ba617b 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -35,7 +35,7 @@ void copy_highpage(struct page *to, struct page *from)
from != folio_page(src, 0))
return;
- WARN_ON_ONCE(!folio_try_hugetlb_mte_tagging(dst));
+ folio_try_hugetlb_mte_tagging(dst);
/*
* Populate tags for all subpages.
@@ -51,8 +51,13 @@ void copy_highpage(struct page *to, struct page *from)
}
folio_set_hugetlb_mte_tagged(dst);
} else if (page_mte_tagged(from)) {
- /* It's a new page, shouldn't have been tagged yet */
- WARN_ON_ONCE(!try_page_mte_tagging(to));
+ /*
+ * Most of the time it's a new page that shouldn't have been
+ * tagged yet. However, folio migration can end up reusing the
+ * same page without untagging it. Ignore the warning if the
+ * page is already tagged.
+ */
+ try_page_mte_tagging(to);
mte_copy_page_tags(kto, kfrom);
set_page_mte_tagged(to);
diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c
index 876028b1083f..064b0f0f95ca 100644
--- a/arch/csky/abiv2/cacheflush.c
+++ b/arch/csky/abiv2/cacheflush.c
@@ -21,7 +21,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
folio = page_folio(pfn_to_page(pfn));
- if (test_and_set_bit(PG_dcache_clean, &folio->flags))
+ if (test_and_set_bit(PG_dcache_clean, &folio->flags.f))
return;
icache_inv_range(address, address + nr*PAGE_SIZE);
diff --git a/arch/csky/abiv2/inc/abi/cacheflush.h b/arch/csky/abiv2/inc/abi/cacheflush.h
index 6513ac5d2578..da51a0f02391 100644
--- a/arch/csky/abiv2/inc/abi/cacheflush.h
+++ b/arch/csky/abiv2/inc/abi/cacheflush.h
@@ -20,8 +20,8 @@
static inline void flush_dcache_folio(struct folio *folio)
{
- if (test_bit(PG_dcache_clean, &folio->flags))
- clear_bit(PG_dcache_clean, &folio->flags);
+ if (test_bit(PG_dcache_clean, &folio->flags.f))
+ clear_bit(PG_dcache_clean, &folio->flags.f);
}
#define flush_dcache_folio flush_dcache_folio
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index 3a2836e9d856..816570514c37 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -47,7 +47,7 @@ static struct resource standard_io_resources[] = {
.name = "keyboard",
.start = 0x60,
.end = 0x6f,
- .flags = IORESOURCE_IO | IORESOURCE_BUSY
+ .flags = IORESOURCE_IO
},
{
.name = "dma page reg",
@@ -213,7 +213,7 @@ void __init plat_mem_setup(void)
/* Request I/O space for devices used on the Malta board. */
for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
- request_resource(&ioport_resource, standard_io_resources+i);
+ insert_resource(&ioport_resource, standard_io_resources + i);
/*
* Enable DMA channel 4 (cascade channel) in the PIIX4 south bridge.
diff --git a/arch/mips/pci/pci-malta.c b/arch/mips/pci/pci-malta.c
index 6aefdf20ca05..2e35aeba45bc 100644
--- a/arch/mips/pci/pci-malta.c
+++ b/arch/mips/pci/pci-malta.c
@@ -230,8 +230,7 @@ void __init mips_pcibios_init(void)
}
/* PIIX4 ACPI starts at 0x1000 */
- if (controller->io_resource->start < 0x00001000UL)
- controller->io_resource->start = 0x00001000UL;
+ PCIBIOS_MIN_IO = 0x1000;
iomem_resource.end &= 0xfffffffffULL; /* 64 GB */
ioport_resource.end = controller->io_resource->end;
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index 8bd2a11382a3..ac28066bb564 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -84,15 +84,9 @@
.endm
#ifdef CONFIG_SMP
-#ifdef CONFIG_32BIT
-#define PER_CPU_OFFSET_SHIFT 2
-#else
-#define PER_CPU_OFFSET_SHIFT 3
-#endif
-
.macro asm_per_cpu dst sym tmp
lw \tmp, TASK_TI_CPU_NUM(tp)
- slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT
+ slli \tmp, \tmp, RISCV_LGPTR
la \dst, __per_cpu_offset
add \dst, \dst, \tmp
REG_L \tmp, 0(\dst)
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index fbd0e4306c93..62837fa981e8 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -31,6 +31,8 @@ struct riscv_isainfo {
DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
+extern const struct seq_operations cpuinfo_op;
+
/* Per-cpu ISA extensions. */
extern struct riscv_isainfo hart_isa[NR_CPUS];
diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h
index 948d2b34e94e..58f8dda73259 100644
--- a/arch/riscv/include/asm/hwprobe.h
+++ b/arch/riscv/include/asm/hwprobe.h
@@ -42,4 +42,11 @@ static inline bool riscv_hwprobe_pair_cmp(struct riscv_hwprobe *pair,
return pair->value == other_pair->value;
}
+#ifdef CONFIG_MMU
+void riscv_hwprobe_register_async_probe(void);
+void riscv_hwprobe_complete_async_probe(void);
+#else
+static inline void riscv_hwprobe_register_async_probe(void) {}
+static inline void riscv_hwprobe_complete_async_probe(void) {}
+#endif
#endif
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index 1018d2216901..6e789fa58514 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -69,6 +69,8 @@ typedef struct {
#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
+#define MAX_POSSIBLE_PHYSMEM_BITS 56
+
/*
* rv64 PTE format:
* | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 29e994a9afb6..5a08eb5fe99f 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -654,6 +654,8 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
return __pgprot(prot);
}
+#define pgprot_dmacoherent pgprot_writecombine
+
/*
* Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By
* default the M-mode firmware enables the hardware updating scheme when only Svadu is present in
diff --git a/arch/riscv/include/asm/vdso/arch_data.h b/arch/riscv/include/asm/vdso/arch_data.h
index da57a3786f7a..88b37af55175 100644
--- a/arch/riscv/include/asm/vdso/arch_data.h
+++ b/arch/riscv/include/asm/vdso/arch_data.h
@@ -12,6 +12,12 @@ struct vdso_arch_data {
/* Boolean indicating all CPUs have the same static hwprobe values. */
__u8 homogeneous_cpus;
+
+ /*
+ * A gate to check and see if the hwprobe data is actually ready, as
+ * probing is deferred to avoid boot slowdowns.
+ */
+ __u8 ready;
};
#endif /* __RISCV_ASM_VDSO_ARCH_DATA_H */
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index f6b13e9f5e6c..3dbc8cc557dd 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -62,10 +62,8 @@ int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned lo
return -ENODEV;
}
- if (!of_device_is_available(node)) {
- pr_info("CPU with hartid=%lu is not available\n", *hart);
+ if (!of_device_is_available(node))
return -ENODEV;
- }
if (of_property_read_string(node, "riscv,isa-base", &isa))
goto old_interface;
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index 67b59699357d..72ca768f4e91 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -932,9 +932,9 @@ static int has_thead_homogeneous_vlenb(void)
{
int cpu;
u32 prev_vlenb = 0;
- u32 vlenb;
+ u32 vlenb = 0;
- /* Ignore thead,vlenb property if xtheavector is not enabled in the kernel */
+ /* Ignore thead,vlenb property if xtheadvector is not enabled in the kernel */
if (!IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR))
return 0;
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index e650dec44817..5ed5095320e6 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -40,6 +40,17 @@ enum ipi_message_type {
IPI_MAX
};
+static const char * const ipi_names[] = {
+ [IPI_RESCHEDULE] = "Rescheduling interrupts",
+ [IPI_CALL_FUNC] = "Function call interrupts",
+ [IPI_CPU_STOP] = "CPU stop interrupts",
+ [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
+ [IPI_IRQ_WORK] = "IRQ work interrupts",
+ [IPI_TIMER] = "Timer broadcast interrupts",
+ [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
+ [IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts",
+};
+
unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = {
[0 ... NR_CPUS-1] = INVALID_HARTID
};
@@ -199,7 +210,7 @@ void riscv_ipi_set_virq_range(int virq, int nr)
/* Request IPIs */
for (i = 0; i < nr_ipi; i++) {
err = request_percpu_irq(ipi_virq_base + i, handle_IPI,
- "IPI", &ipi_dummy_dev);
+ ipi_names[i], &ipi_dummy_dev);
WARN_ON(err);
ipi_desc[i] = irq_to_desc(ipi_virq_base + i);
@@ -210,17 +221,6 @@ void riscv_ipi_set_virq_range(int virq, int nr)
riscv_ipi_enable();
}
-static const char * const ipi_names[] = {
- [IPI_RESCHEDULE] = "Rescheduling interrupts",
- [IPI_CALL_FUNC] = "Function call interrupts",
- [IPI_CPU_STOP] = "CPU stop interrupts",
- [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
- [IPI_IRQ_WORK] = "IRQ work interrupts",
- [IPI_TIMER] = "Timer broadcast interrupts",
- [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
- [IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts",
-};
-
void show_ipi_stats(struct seq_file *p, int prec)
{
unsigned int cpu, i;
diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c
index 000f4451a9d8..199d13f86f31 100644
--- a/arch/riscv/kernel/sys_hwprobe.c
+++ b/arch/riscv/kernel/sys_hwprobe.c
@@ -5,6 +5,9 @@
* more details.
*/
#include <linux/syscalls.h>
+#include <linux/completion.h>
+#include <linux/atomic.h>
+#include <linux/once.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/hwprobe.h>
@@ -28,6 +31,11 @@ static void hwprobe_arch_id(struct riscv_hwprobe *pair,
bool first = true;
int cpu;
+ if (pair->key != RISCV_HWPROBE_KEY_MVENDORID &&
+ pair->key != RISCV_HWPROBE_KEY_MIMPID &&
+ pair->key != RISCV_HWPROBE_KEY_MARCHID)
+ goto out;
+
for_each_cpu(cpu, cpus) {
u64 cpu_id;
@@ -58,6 +66,7 @@ static void hwprobe_arch_id(struct riscv_hwprobe *pair,
}
}
+out:
pair->value = id;
}
@@ -454,28 +463,32 @@ static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
return 0;
}
-static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
- size_t pair_count, size_t cpusetsize,
- unsigned long __user *cpus_user,
- unsigned int flags)
-{
- if (flags & RISCV_HWPROBE_WHICH_CPUS)
- return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
- cpus_user, flags);
+#ifdef CONFIG_MMU
- return hwprobe_get_values(pairs, pair_count, cpusetsize,
- cpus_user, flags);
+static DECLARE_COMPLETION(boot_probes_done);
+static atomic_t pending_boot_probes = ATOMIC_INIT(1);
+
+void riscv_hwprobe_register_async_probe(void)
+{
+ atomic_inc(&pending_boot_probes);
}
-#ifdef CONFIG_MMU
+void riscv_hwprobe_complete_async_probe(void)
+{
+ if (atomic_dec_and_test(&pending_boot_probes))
+ complete(&boot_probes_done);
+}
-static int __init init_hwprobe_vdso_data(void)
+static int complete_hwprobe_vdso_data(void)
{
struct vdso_arch_data *avd = vdso_k_arch_data;
u64 id_bitsmash = 0;
struct riscv_hwprobe pair;
int key;
+ if (unlikely(!atomic_dec_and_test(&pending_boot_probes)))
+ wait_for_completion(&boot_probes_done);
+
/*
* Initialize vDSO data with the answers for the "all CPUs" case, to
* save a syscall in the common case.
@@ -503,13 +516,52 @@ static int __init init_hwprobe_vdso_data(void)
* vDSO should defer to the kernel for exotic cpu masks.
*/
avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
+
+ /*
+ * Make sure all the VDSO values are visible before we look at them.
+ * This pairs with the implicit "no speculativly visible accesses"
+ * barrier in the VDSO hwprobe code.
+ */
+ smp_wmb();
+ avd->ready = true;
+ return 0;
+}
+
+static int __init init_hwprobe_vdso_data(void)
+{
+ struct vdso_arch_data *avd = vdso_k_arch_data;
+
+ /*
+ * Prevent the vDSO cached values from being used, as they're not ready
+ * yet.
+ */
+ avd->ready = false;
return 0;
}
arch_initcall_sync(init_hwprobe_vdso_data);
+#else
+
+static int complete_hwprobe_vdso_data(void) { return 0; }
+
#endif /* CONFIG_MMU */
+static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
+ size_t pair_count, size_t cpusetsize,
+ unsigned long __user *cpus_user,
+ unsigned int flags)
+{
+ DO_ONCE_SLEEPABLE(complete_hwprobe_vdso_data);
+
+ if (flags & RISCV_HWPROBE_WHICH_CPUS)
+ return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
+ cpus_user, flags);
+
+ return hwprobe_get_values(pairs, pair_count, cpusetsize,
+ cpus_user, flags);
+}
+
SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
cpus, unsigned int, flags)
diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
index ae2068425fbc..70b5e6927620 100644
--- a/arch/riscv/kernel/unaligned_access_speed.c
+++ b/arch/riscv/kernel/unaligned_access_speed.c
@@ -379,6 +379,7 @@ free:
static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
{
schedule_on_each_cpu(check_vector_unaligned_access);
+ riscv_hwprobe_complete_async_probe();
return 0;
}
@@ -473,8 +474,12 @@ static int __init check_unaligned_access_all_cpus(void)
per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param;
} else if (!check_vector_unaligned_access_emulated_all_cpus() &&
IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) {
- kthread_run(vec_check_unaligned_access_speed_all_cpus,
- NULL, "vec_check_unaligned_access_speed_all_cpus");
+ riscv_hwprobe_register_async_probe();
+ if (IS_ERR(kthread_run(vec_check_unaligned_access_speed_all_cpus,
+ NULL, "vec_check_unaligned_access_speed_all_cpus"))) {
+ pr_warn("Failed to create vec_unalign_check kthread\n");
+ riscv_hwprobe_complete_async_probe();
+ }
}
/*
diff --git a/arch/riscv/kernel/vdso/hwprobe.c b/arch/riscv/kernel/vdso/hwprobe.c
index 2ddeba6c68dd..8f45500d0a6e 100644
--- a/arch/riscv/kernel/vdso/hwprobe.c
+++ b/arch/riscv/kernel/vdso/hwprobe.c
@@ -27,7 +27,7 @@ static int riscv_vdso_get_values(struct riscv_hwprobe *pairs, size_t pair_count,
* homogeneous, then this function can handle requests for arbitrary
* masks.
*/
- if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus))
+ if (flags != 0 || (!all_cpus && !avd->homogeneous_cpus) || unlikely(!avd->ready))
return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags);
/* This is something we can handle, fill out the pairs. */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index c4145672ca34..df22b10d9141 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -158,7 +158,6 @@ config S390
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
select ARCH_WANT_KERNEL_PMD_MKWRITE
select ARCH_WANT_LD_ORPHAN_WARN
- select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
select ARCH_WANTS_THP_SWAP
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS2
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index b31c1df90257..8433f769f7e1 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -101,6 +101,7 @@ CONFIG_SLUB_STATS=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
+CONFIG_PERSISTENT_HUGE_ZERO_FOLIO=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CMA_DEBUGFS=y
CONFIG_CMA_SYSFS=y
@@ -123,12 +124,12 @@ CONFIG_TLS_DEVICE=y
CONFIG_TLS_TOE=y
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
-CONFIG_XDP_SOCKETS=y
-CONFIG_XDP_SOCKETS_DIAG=m
-CONFIG_DIBS=y
-CONFIG_DIBS_LO=y
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
+CONFIG_DIBS=y
+CONFIG_DIBS_LO=y
+CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -472,6 +473,7 @@ CONFIG_SCSI_DH_EMC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LLBITMAP=y
# CONFIG_MD_BITMAP_FILE is not set
CONFIG_MD_LINEAR=m
CONFIG_MD_CLUSTER=m
@@ -654,9 +656,12 @@ CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
CONFIG_JFS_STATISTICS=y
CONFIG_XFS_FS=y
+CONFIG_XFS_SUPPORT_V4=y
+CONFIG_XFS_SUPPORT_ASCII_CI=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y
+# CONFIG_XFS_ONLINE_SCRUB is not set
CONFIG_XFS_DEBUG=y
CONFIG_GFS2_FS=m
CONFIG_GFS2_FS_LOCKING_DLM=y
@@ -666,7 +671,6 @@ CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_BTRFS_DEBUG=y
CONFIG_BTRFS_ASSERT=y
CONFIG_NILFS2_FS=m
-CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
CONFIG_FS_VERITY=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 161dad7ef211..4414dabd04a6 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -94,6 +94,7 @@ CONFIG_SLAB_BUCKETS=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
+CONFIG_PERSISTENT_HUGE_ZERO_FOLIO=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CMA_SYSFS=y
CONFIG_CMA_AREAS=7
@@ -114,12 +115,12 @@ CONFIG_TLS_DEVICE=y
CONFIG_TLS_TOE=y
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
-CONFIG_XDP_SOCKETS=y
-CONFIG_XDP_SOCKETS_DIAG=m
-CONFIG_DIBS=y
-CONFIG_DIBS_LO=y
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
+CONFIG_DIBS=y
+CONFIG_DIBS_LO=y
+CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -462,6 +463,7 @@ CONFIG_SCSI_DH_EMC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LLBITMAP=y
# CONFIG_MD_BITMAP_FILE is not set
CONFIG_MD_LINEAR=m
CONFIG_MD_CLUSTER=m
@@ -644,16 +646,18 @@ CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
CONFIG_JFS_STATISTICS=y
CONFIG_XFS_FS=y
+CONFIG_XFS_SUPPORT_V4=y
+CONFIG_XFS_SUPPORT_ASCII_CI=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y
+# CONFIG_XFS_ONLINE_SCRUB is not set
CONFIG_GFS2_FS=m
CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
-CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
CONFIG_FS_VERITY=y
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index ed0b137353ad..b5478267d6a7 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -33,7 +33,6 @@ CONFIG_NET=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_SAFE=y
CONFIG_BLK_DEV_RAM=y
-# CONFIG_DCSSBLK is not set
# CONFIG_DASD is not set
CONFIG_ENCLOSURE_SERVICES=y
CONFIG_SCSI=y
diff --git a/arch/s390/crypto/phmac_s390.c b/arch/s390/crypto/phmac_s390.c
index 7ecfdc4fba2d..89f3e6d8fd89 100644
--- a/arch/s390/crypto/phmac_s390.c
+++ b/arch/s390/crypto/phmac_s390.c
@@ -169,11 +169,18 @@ struct kmac_sha2_ctx {
u64 buflen[2];
};
+enum async_op {
+ OP_NOP = 0,
+ OP_UPDATE,
+ OP_FINAL,
+ OP_FINUP,
+};
+
/* phmac request context */
struct phmac_req_ctx {
struct hash_walk_helper hwh;
struct kmac_sha2_ctx kmac_ctx;
- bool final;
+ enum async_op async_op;
};
/*
@@ -610,6 +617,7 @@ static int phmac_update(struct ahash_request *req)
* using engine to serialize requests.
*/
if (rc == 0 || rc == -EKEYEXPIRED) {
+ req_ctx->async_op = OP_UPDATE;
atomic_inc(&tfm_ctx->via_engine_ctr);
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
if (rc != -EINPROGRESS)
@@ -647,8 +655,7 @@ static int phmac_final(struct ahash_request *req)
* using engine to serialize requests.
*/
if (rc == 0 || rc == -EKEYEXPIRED) {
- req->nbytes = 0;
- req_ctx->final = true;
+ req_ctx->async_op = OP_FINAL;
atomic_inc(&tfm_ctx->via_engine_ctr);
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
if (rc != -EINPROGRESS)
@@ -676,13 +683,16 @@ static int phmac_finup(struct ahash_request *req)
if (rc)
goto out;
+ req_ctx->async_op = OP_FINUP;
+
/* Try synchronous operations if no active engine usage */
if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
rc = phmac_kmac_update(req, false);
if (rc == 0)
- req->nbytes = 0;
+ req_ctx->async_op = OP_FINAL;
}
- if (!rc && !req->nbytes && !atomic_read(&tfm_ctx->via_engine_ctr)) {
+ if (!rc && req_ctx->async_op == OP_FINAL &&
+ !atomic_read(&tfm_ctx->via_engine_ctr)) {
rc = phmac_kmac_final(req, false);
if (rc == 0)
goto out;
@@ -694,7 +704,7 @@ static int phmac_finup(struct ahash_request *req)
* using engine to serialize requests.
*/
if (rc == 0 || rc == -EKEYEXPIRED) {
- req_ctx->final = true;
+ /* req->async_op has been set to either OP_FINUP or OP_FINAL */
atomic_inc(&tfm_ctx->via_engine_ctr);
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
if (rc != -EINPROGRESS)
@@ -855,15 +865,16 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
/*
* Three kinds of requests come in here:
- * update when req->nbytes > 0 and req_ctx->final is false
- * final when req->nbytes = 0 and req_ctx->final is true
- * finup when req->nbytes > 0 and req_ctx->final is true
- * For update and finup the hwh walk needs to be prepared and
- * up to date but the actual nr of bytes in req->nbytes may be
- * any non zero number. For final there is no hwh walk needed.
+ * 1. req->async_op == OP_UPDATE with req->nbytes > 0
+ * 2. req->async_op == OP_FINUP with req->nbytes > 0
+ * 3. req->async_op == OP_FINAL
+ * For update and finup the hwh walk has already been prepared
+ * by the caller. For final there is no hwh walk needed.
*/
- if (req->nbytes) {
+ switch (req_ctx->async_op) {
+ case OP_UPDATE:
+ case OP_FINUP:
rc = phmac_kmac_update(req, true);
if (rc == -EKEYEXPIRED) {
/*
@@ -880,10 +891,11 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
hwh_advance(hwh, rc);
goto out;
}
- req->nbytes = 0;
- }
-
- if (req_ctx->final) {
+ if (req_ctx->async_op == OP_UPDATE)
+ break;
+ req_ctx->async_op = OP_FINAL;
+ fallthrough;
+ case OP_FINAL:
rc = phmac_kmac_final(req, true);
if (rc == -EKEYEXPIRED) {
/*
@@ -897,10 +909,14 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
cond_resched();
return -ENOSPC;
}
+ break;
+ default:
+ /* unknown/unsupported/unimplemented asynch op */
+ return -EOPNOTSUPP;
}
out:
- if (rc || req_ctx->final)
+ if (rc || req_ctx->async_op == OP_FINAL)
memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
pr_debug("request complete with rc=%d\n", rc);
local_bh_disable();
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 6890925d5587..a32f465ecf73 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -145,7 +145,6 @@ struct zpci_dev {
u8 has_resources : 1;
u8 is_physfn : 1;
u8 util_str_avail : 1;
- u8 irqs_registered : 1;
u8 tid_avail : 1;
u8 rtr_avail : 1; /* Relaxed translation allowed */
unsigned int devfn; /* DEVFN part of the RID*/
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 9af2aae0a515..528d7c70979f 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -291,16 +291,14 @@ static int ptdump_cmp(const void *a, const void *b)
static int add_marker(unsigned long start, unsigned long end, const char *name)
{
- size_t oldsize, newsize;
-
- oldsize = markers_cnt * sizeof(*markers);
- newsize = oldsize + 2 * sizeof(*markers);
- if (!oldsize)
- markers = kvmalloc(newsize, GFP_KERNEL);
- else
- markers = kvrealloc(markers, newsize, GFP_KERNEL);
- if (!markers)
- goto error;
+ struct addr_marker *new;
+ size_t newsize;
+
+ newsize = (markers_cnt + 2) * sizeof(*markers);
+ new = kvrealloc(markers, newsize, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ markers = new;
markers[markers_cnt].is_start = 1;
markers[markers_cnt].start_address = start;
markers[markers_cnt].size = end - start;
@@ -312,9 +310,6 @@ static int add_marker(unsigned long start, unsigned long end, const char *name)
markers[markers_cnt].name = name;
markers_cnt++;
return 0;
-error:
- markers_cnt = 0;
- return -ENOMEM;
}
static int pt_dump_init(void)
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index b95376041501..27db1e72c623 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -188,7 +188,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
* is unbound or probed and that userspace can't access its
* configuration space while we perform recovery.
*/
- pci_dev_lock(pdev);
+ device_lock(&pdev->dev);
if (pdev->error_state == pci_channel_io_perm_failure) {
ers_res = PCI_ERS_RESULT_DISCONNECT;
goto out_unlock;
@@ -257,7 +257,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
driver->err_handler->resume(pdev);
pci_uevent_ers(pdev, PCI_ERS_RESULT_RECOVERED);
out_unlock:
- pci_dev_unlock(pdev);
+ device_unlock(&pdev->dev);
zpci_report_status(zdev, "recovery", status_str);
return ers_res;
diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
index 84482a921332..e73be96ce5fe 100644
--- a/arch/s390/pci/pci_irq.c
+++ b/arch/s390/pci/pci_irq.c
@@ -107,9 +107,6 @@ static int zpci_set_irq(struct zpci_dev *zdev)
else
rc = zpci_set_airq(zdev);
- if (!rc)
- zdev->irqs_registered = 1;
-
return rc;
}
@@ -123,9 +120,6 @@ static int zpci_clear_irq(struct zpci_dev *zdev)
else
rc = zpci_clear_airq(zdev);
- if (!rc)
- zdev->irqs_registered = 0;
-
return rc;
}
@@ -427,8 +421,7 @@ bool arch_restore_msi_irqs(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
- if (!zdev->irqs_registered)
- zpci_set_irq(zdev);
+ zpci_set_irq(zdev);
return true;
}
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 4db7e4bf69f5..8fbff3106c56 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -75,7 +75,7 @@ export BITS
#
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383
#
-KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
+KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx -mno-sse4a
KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 28f5468a6ea3..fe65be0b9d9c 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -7596,6 +7596,7 @@ __init int intel_pmu_init(void)
break;
case INTEL_PANTHERLAKE_L:
+ case INTEL_WILDCATLAKE_L:
pr_cont("Pantherlake Hybrid events, ");
name = "pantherlake_hybrid";
goto lnl_common;
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index c0b7ac1c7594..01bc59e9286c 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -317,7 +317,8 @@ static u64 __grt_latency_data(struct perf_event *event, u64 status,
{
u64 val;
- WARN_ON_ONCE(hybrid_pmu(event->pmu)->pmu_type == hybrid_big);
+ WARN_ON_ONCE(is_hybrid() &&
+ hybrid_pmu(event->pmu)->pmu_type == hybrid_big);
dse &= PERF_PEBS_DATA_SOURCE_GRT_MASK;
val = hybrid_var(event->pmu, pebs_data_source)[dse];
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index a762f7f5b161..d6c945cc5d07 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1895,6 +1895,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_MATCH_VFM(INTEL_ARROWLAKE_H, &mtl_uncore_init),
X86_MATCH_VFM(INTEL_LUNARLAKE_M, &lnl_uncore_init),
X86_MATCH_VFM(INTEL_PANTHERLAKE_L, &ptl_uncore_init),
+ X86_MATCH_VFM(INTEL_WILDCATLAKE_L, &ptl_uncore_init),
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_uncore_init),
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_uncore_init),
X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_uncore_init),
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index f32a0eca2ae5..950bfd006905 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -150,12 +150,12 @@
#define INTEL_LUNARLAKE_M IFM(6, 0xBD) /* Lion Cove / Skymont */
-#define INTEL_PANTHERLAKE_L IFM(6, 0xCC) /* Cougar Cove / Crestmont */
+#define INTEL_PANTHERLAKE_L IFM(6, 0xCC) /* Cougar Cove / Darkmont */
#define INTEL_WILDCATLAKE_L IFM(6, 0xD5)
-#define INTEL_NOVALAKE IFM(18, 0x01)
-#define INTEL_NOVALAKE_L IFM(18, 0x03)
+#define INTEL_NOVALAKE IFM(18, 0x01) /* Coyote Cove / Arctic Wolf */
+#define INTEL_NOVALAKE_L IFM(18, 0x03) /* Coyote Cove / Arctic Wolf */
/* "Small Core" Processors (Atom/E-Core) */
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index 015d23f3e01f..53f4089333f2 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -43,6 +43,9 @@ extern unsigned long __phys_addr_symbol(unsigned long);
void clear_page_orig(void *page);
void clear_page_rep(void *page);
void clear_page_erms(void *page);
+KCFI_REFERENCE(clear_page_orig);
+KCFI_REFERENCE(clear_page_rep);
+KCFI_REFERENCE(clear_page_erms);
static inline void clear_page(void *page)
{
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index ccaa51ce63f6..8e36964a7721 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -516,7 +516,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
setup_force_cpu_cap(X86_FEATURE_ZEN5);
break;
case 0x50 ... 0x5f:
- case 0x90 ... 0xaf:
+ case 0x80 ... 0xaf:
case 0xc0 ... 0xcf:
setup_force_cpu_cap(X86_FEATURE_ZEN6);
break;
@@ -1035,8 +1035,18 @@ static void init_amd_zen4(struct cpuinfo_x86 *c)
}
}
+static const struct x86_cpu_id zen5_rdseed_microcode[] = {
+ ZEN_MODEL_STEP_UCODE(0x1a, 0x02, 0x1, 0x0b00215a),
+ ZEN_MODEL_STEP_UCODE(0x1a, 0x11, 0x0, 0x0b101054),
+};
+
static void init_amd_zen5(struct cpuinfo_x86 *c)
{
+ if (!x86_match_min_microcode_rev(zen5_rdseed_microcode)) {
+ clear_cpu_cap(c, X86_FEATURE_RDSEED);
+ msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18);
+ pr_emerg_once("RDSEED32 is broken. Disabling the corresponding CPUID bit.\n");
+ }
}
static void init_amd(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 6a526ae1fe99..d7fa03bf51b4 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -1463,7 +1463,9 @@ static void __init retbleed_update_mitigation(void)
break;
default:
if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) {
- pr_err(RETBLEED_INTEL_MSG);
+ if (retbleed_mitigation != RETBLEED_MITIGATION_NONE)
+ pr_err(RETBLEED_INTEL_MSG);
+
retbleed_mitigation = RETBLEED_MITIGATION_NONE;
}
}
@@ -1825,13 +1827,6 @@ void unpriv_ebpf_notify(int new_state)
}
#endif
-static inline bool match_option(const char *arg, int arglen, const char *opt)
-{
- int len = strlen(opt);
-
- return len == arglen && !strncmp(arg, opt, len);
-}
-
/* The kernel command line selection for spectre v2 */
enum spectre_v2_mitigation_cmd {
SPECTRE_V2_CMD_NONE,
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index cdce885e2fd5..b7c797dc94f4 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -194,7 +194,7 @@ static bool need_sha_check(u32 cur_rev)
}
switch (cur_rev >> 8) {
- case 0x80012: return cur_rev <= 0x800126f; break;
+ case 0x80012: return cur_rev <= 0x8001277; break;
case 0x80082: return cur_rev <= 0x800820f; break;
case 0x83010: return cur_rev <= 0x830107c; break;
case 0x86001: return cur_rev <= 0x860010e; break;
@@ -233,13 +233,31 @@ static bool need_sha_check(u32 cur_rev)
return true;
}
+static bool cpu_has_entrysign(void)
+{
+ unsigned int fam = x86_family(bsp_cpuid_1_eax);
+ unsigned int model = x86_model(bsp_cpuid_1_eax);
+
+ if (fam == 0x17 || fam == 0x19)
+ return true;
+
+ if (fam == 0x1a) {
+ if (model <= 0x2f ||
+ (0x40 <= model && model <= 0x4f) ||
+ (0x60 <= model && model <= 0x6f))
+ return true;
+ }
+
+ return false;
+}
+
static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)
{
struct patch_digest *pd = NULL;
u8 digest[SHA256_DIGEST_SIZE];
int i;
- if (x86_family(bsp_cpuid_1_eax) < 0x17)
+ if (!cpu_has_entrysign())
return true;
if (!need_sha_check(cur_rev))
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index 2cd25a0d4637..fe1a2aa53c16 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -458,7 +458,16 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r)
r->mon.mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS;
}
- if (rdt_cpu_has(X86_FEATURE_ABMC)) {
+ /*
+ * resctrl assumes a system that supports assignable counters can
+ * switch to "default" mode. Ensure that there is a "default" mode
+ * to switch to. This enforces a dependency between the independent
+ * X86_FEATURE_ABMC and X86_FEATURE_CQM_MBM_TOTAL/X86_FEATURE_CQM_MBM_LOCAL
+ * hardware features.
+ */
+ if (rdt_cpu_has(X86_FEATURE_ABMC) &&
+ (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL) ||
+ rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL))) {
r->mon.mbm_cntr_assignable = true;
cpuid_count(0x80000020, 5, &eax, &ebx, &ecx, &edx);
r->mon.num_mbm_cntrs = (ebx & GENMASK(15, 0)) + 1;
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 1f71cc135e9a..e88eacb1b5bb 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -825,6 +825,9 @@ void fpu__clear_user_states(struct fpu *fpu)
!fpregs_state_valid(fpu, smp_processor_id()))
os_xrstor_supervisor(fpu->fpstate);
+ /* Ensure XFD state is in sync before reloading XSTATE */
+ xfd_update_state(fpu->fpstate);
+
/* Reset user states in registers. */
restore_fpregs_from_init_fpstate(XFEATURE_MASK_USER_RESTORE);
diff --git a/block/blk-crypto.c b/block/blk-crypto.c
index 4b1ad84d1b5a..3e7bf1974cbd 100644
--- a/block/blk-crypto.c
+++ b/block/blk-crypto.c
@@ -292,7 +292,7 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr)
}
if (!bio_crypt_check_alignment(bio)) {
- bio->bi_status = BLK_STS_IOERR;
+ bio->bi_status = BLK_STS_INVAL;
goto fail;
}
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 54cffaae4df4..d74b13ec8e54 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -184,6 +184,16 @@ static int blk_validate_integrity_limits(struct queue_limits *lim)
if (!bi->interval_exp)
bi->interval_exp = ilog2(lim->logical_block_size);
+ /*
+ * The PI generation / validation helpers do not expect intervals to
+ * straddle multiple bio_vecs. Enforce alignment so that those are
+ * never generated, and that each buffer is aligned as expected.
+ */
+ if (bi->csum_type) {
+ lim->dma_alignment = max(lim->dma_alignment,
+ (1U << bi->interval_exp) - 1);
+ }
+
return 0;
}
diff --git a/drivers/acpi/acpi_mrrm.c b/drivers/acpi/acpi_mrrm.c
index 47ea3ccc2142..a6dbf623e557 100644
--- a/drivers/acpi/acpi_mrrm.c
+++ b/drivers/acpi/acpi_mrrm.c
@@ -63,6 +63,9 @@ static __init int acpi_parse_mrrm(struct acpi_table_header *table)
if (!mrrm)
return -ENODEV;
+ if (mrrm->header.revision != 1)
+ return -EINVAL;
+
if (mrrm->flags & ACPI_MRRM_FLAGS_REGION_ASSIGNMENT_OS)
return -EOPNOTSUPP;
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 103f29661576..be8e7e18abca 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -1959,8 +1959,10 @@ static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video)
struct acpi_video_device *dev;
mutex_lock(&video->device_list_lock);
- list_for_each_entry(dev, &video->video_device_list, entry)
+ list_for_each_entry(dev, &video->video_device_list, entry) {
acpi_video_dev_remove_notify_handler(dev);
+ cancel_delayed_work_sync(&dev->switch_brightness_work);
+ }
mutex_unlock(&video->device_list_lock);
acpi_video_bus_stop_devices(video);
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 049f6c2f1e32..e5631027f7f1 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -95,6 +95,11 @@ acpi_tb_print_table_header(acpi_physical_address address,
{
struct acpi_table_header local_header;
+#pragma GCC diagnostic push
+#if defined(__GNUC__) && __GNUC__ >= 11
+#pragma GCC diagnostic ignored "-Wstringop-overread"
+#endif
+
if (ACPI_COMPARE_NAMESEG(header->signature, ACPI_SIG_FACS)) {
/* FACS only has signature and length fields */
@@ -143,4 +148,5 @@ acpi_tb_print_table_header(acpi_physical_address address,
local_header.asl_compiler_id,
local_header.asl_compiler_revision));
}
+#pragma GCC diagnostic pop
}
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 0a7026040188..3c6dd9b4ba0a 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -619,8 +619,10 @@ static int acpi_button_add(struct acpi_device *device)
input_set_drvdata(input, device);
error = input_register_device(input);
- if (error)
+ if (error) {
+ input_free_device(input);
goto err_remove_fs;
+ }
switch (device->device_type) {
case ACPI_BUS_TYPE_POWER_BUTTON:
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
index 8a28a72a7c6a..bedbab0e8e4e 100644
--- a/drivers/acpi/fan.h
+++ b/drivers/acpi/fan.h
@@ -49,6 +49,7 @@ struct acpi_fan_fst {
};
struct acpi_fan {
+ acpi_handle handle;
bool acpi4;
bool has_fst;
struct acpi_fan_fif fif;
@@ -59,14 +60,14 @@ struct acpi_fan {
struct device_attribute fine_grain_control;
};
-int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst);
+int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst);
int acpi_fan_create_attributes(struct acpi_device *device);
void acpi_fan_delete_attributes(struct acpi_device *device);
#if IS_REACHABLE(CONFIG_HWMON)
-int devm_acpi_fan_create_hwmon(struct acpi_device *device);
+int devm_acpi_fan_create_hwmon(struct device *dev);
#else
-static inline int devm_acpi_fan_create_hwmon(struct acpi_device *device) { return 0; };
+static inline int devm_acpi_fan_create_hwmon(struct device *dev) { return 0; };
#endif
#endif
diff --git a/drivers/acpi/fan_attr.c b/drivers/acpi/fan_attr.c
index c1afb7b5ed3d..9b7fa52f3c2a 100644
--- a/drivers/acpi/fan_attr.c
+++ b/drivers/acpi/fan_attr.c
@@ -55,7 +55,7 @@ static ssize_t show_fan_speed(struct device *dev, struct device_attribute *attr,
struct acpi_fan_fst fst;
int status;
- status = acpi_fan_get_fst(acpi_dev, &fst);
+ status = acpi_fan_get_fst(acpi_dev->handle, &fst);
if (status)
return status;
diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c
index 04ff608f2ff0..46e7fe7a506d 100644
--- a/drivers/acpi/fan_core.c
+++ b/drivers/acpi/fan_core.c
@@ -44,25 +44,30 @@ static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long
return 0;
}
-int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst)
+int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
int ret = 0;
- status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- dev_err(&device->dev, "Get fan state failed\n");
- return -ENODEV;
- }
+ status = acpi_evaluate_object(handle, "_FST", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return -EIO;
obj = buffer.pointer;
- if (!obj || obj->type != ACPI_TYPE_PACKAGE ||
- obj->package.count != 3 ||
- obj->package.elements[1].type != ACPI_TYPE_INTEGER) {
- dev_err(&device->dev, "Invalid _FST data\n");
- ret = -EINVAL;
+ if (!obj)
+ return -ENODATA;
+
+ if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3) {
+ ret = -EPROTO;
+ goto err;
+ }
+
+ if (obj->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ obj->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ obj->package.elements[2].type != ACPI_TYPE_INTEGER) {
+ ret = -EPROTO;
goto err;
}
@@ -81,7 +86,7 @@ static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
struct acpi_fan_fst fst;
int status, i;
- status = acpi_fan_get_fst(device, &fst);
+ status = acpi_fan_get_fst(device->handle, &fst);
if (status)
return status;
@@ -311,11 +316,16 @@ static int acpi_fan_probe(struct platform_device *pdev)
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
char *name;
+ if (!device)
+ return -ENODEV;
+
fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
if (!fan) {
dev_err(&device->dev, "No memory for fan\n");
return -ENOMEM;
}
+
+ fan->handle = device->handle;
device->driver_data = fan;
platform_set_drvdata(pdev, fan);
@@ -337,7 +347,7 @@ static int acpi_fan_probe(struct platform_device *pdev)
}
if (fan->has_fst) {
- result = devm_acpi_fan_create_hwmon(device);
+ result = devm_acpi_fan_create_hwmon(&pdev->dev);
if (result)
return result;
diff --git a/drivers/acpi/fan_hwmon.c b/drivers/acpi/fan_hwmon.c
index e8d90605106e..4b2c2007f2d7 100644
--- a/drivers/acpi/fan_hwmon.c
+++ b/drivers/acpi/fan_hwmon.c
@@ -93,13 +93,12 @@ static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_
static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long *val)
{
- struct acpi_device *adev = to_acpi_device(dev->parent);
struct acpi_fan *fan = dev_get_drvdata(dev);
struct acpi_fan_fps *fps;
struct acpi_fan_fst fst;
int ret;
- ret = acpi_fan_get_fst(adev, &fst);
+ ret = acpi_fan_get_fst(fan->handle, &fst);
if (ret < 0)
return ret;
@@ -167,12 +166,12 @@ static const struct hwmon_chip_info acpi_fan_hwmon_chip_info = {
.info = acpi_fan_hwmon_info,
};
-int devm_acpi_fan_create_hwmon(struct acpi_device *device)
+int devm_acpi_fan_create_hwmon(struct device *dev)
{
- struct acpi_fan *fan = acpi_driver_data(device);
+ struct acpi_fan *fan = dev_get_drvdata(dev);
struct device *hdev;
- hdev = devm_hwmon_device_register_with_info(&device->dev, "acpi_fan", fan,
- &acpi_fan_hwmon_chip_info, NULL);
+ hdev = devm_hwmon_device_register_with_info(dev, "acpi_fan", fan, &acpi_fan_hwmon_chip_info,
+ NULL);
return PTR_ERR_OR_ZERO(hdev);
}
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 1b997a5497e7..43d5e457814e 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1107,7 +1107,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
size_t num_args,
struct fwnode_reference_args *args)
{
- return acpi_fwnode_get_reference_args(fwnode, propname, NULL, index, num_args, args);
+ return acpi_fwnode_get_reference_args(fwnode, propname, NULL, num_args, index, args);
}
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
diff --git a/drivers/acpi/riscv/rimt.c b/drivers/acpi/riscv/rimt.c
index 683fcfe35c31..7f423405e5ef 100644
--- a/drivers/acpi/riscv/rimt.c
+++ b/drivers/acpi/riscv/rimt.c
@@ -61,30 +61,6 @@ static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node,
return 0;
}
-/**
- * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
- *
- * @node: RIMT table node to be looked-up
- *
- * Returns: fwnode_handle pointer on success, NULL on failure
- */
-static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
-{
- struct fwnode_handle *fwnode = NULL;
- struct rimt_fwnode *curr;
-
- spin_lock(&rimt_fwnode_lock);
- list_for_each_entry(curr, &rimt_fwnode_list, list) {
- if (curr->rimt_node == node) {
- fwnode = curr->fwnode;
- break;
- }
- }
- spin_unlock(&rimt_fwnode_lock);
-
- return fwnode;
-}
-
static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node,
void *context)
{
@@ -202,6 +178,67 @@ static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type,
return NULL;
}
+/*
+ * RISC-V supports IOMMU as a PCI device or a platform device.
+ * When it is a platform device, there should be a namespace device as
+ * well along with RIMT. To create the link between RIMT information and
+ * the platform device, the IOMMU driver should register itself with the
+ * RIMT module. This is true for PCI based IOMMU as well.
+ */
+int rimt_iommu_register(struct device *dev)
+{
+ struct fwnode_handle *rimt_fwnode;
+ struct acpi_rimt_node *node;
+
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
+ if (!node) {
+ pr_err("Could not find IOMMU node in RIMT\n");
+ return -ENODEV;
+ }
+
+ if (dev_is_pci(dev)) {
+ rimt_fwnode = acpi_alloc_fwnode_static();
+ if (!rimt_fwnode)
+ return -ENOMEM;
+
+ rimt_fwnode->dev = dev;
+ if (!dev->fwnode)
+ dev->fwnode = rimt_fwnode;
+
+ rimt_set_fwnode(node, rimt_fwnode);
+ } else {
+ rimt_set_fwnode(node, dev->fwnode);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_IOMMU_API
+
+/**
+ * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
+ *
+ * @node: RIMT table node to be looked-up
+ *
+ * Returns: fwnode_handle pointer on success, NULL on failure
+ */
+static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
+{
+ struct fwnode_handle *fwnode = NULL;
+ struct rimt_fwnode *curr;
+
+ spin_lock(&rimt_fwnode_lock);
+ list_for_each_entry(curr, &rimt_fwnode_list, list) {
+ if (curr->rimt_node == node) {
+ fwnode = curr->fwnode;
+ break;
+ }
+ }
+ spin_unlock(&rimt_fwnode_lock);
+
+ return fwnode;
+}
+
static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node)
{
struct acpi_rimt_pcie_rc *pci_rc;
@@ -290,43 +327,6 @@ static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node,
return NULL;
}
-/*
- * RISC-V supports IOMMU as a PCI device or a platform device.
- * When it is a platform device, there should be a namespace device as
- * well along with RIMT. To create the link between RIMT information and
- * the platform device, the IOMMU driver should register itself with the
- * RIMT module. This is true for PCI based IOMMU as well.
- */
-int rimt_iommu_register(struct device *dev)
-{
- struct fwnode_handle *rimt_fwnode;
- struct acpi_rimt_node *node;
-
- node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
- if (!node) {
- pr_err("Could not find IOMMU node in RIMT\n");
- return -ENODEV;
- }
-
- if (dev_is_pci(dev)) {
- rimt_fwnode = acpi_alloc_fwnode_static();
- if (!rimt_fwnode)
- return -ENOMEM;
-
- rimt_fwnode->dev = dev;
- if (!dev->fwnode)
- dev->fwnode = rimt_fwnode;
-
- rimt_set_fwnode(node, rimt_fwnode);
- } else {
- rimt_set_fwnode(node, dev->fwnode);
- }
-
- return 0;
-}
-
-#ifdef CONFIG_IOMMU_API
-
static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node,
u32 id_in, u32 *id_out,
u8 type_mask)
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index d4d52d5e9016..73cb933fdc89 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -155,7 +155,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
* Baud Rate field. If this field is zero or not present, Configured
* Baud Rate is used.
*/
- if (table->precise_baudrate)
+ if (table->header.revision >= 4 && table->precise_baudrate)
baud_rate = table->precise_baudrate;
else switch (table->baud_rate) {
case 0:
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 8c99ceaa303b..a3a1b5c33ba3 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -851,17 +851,8 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
} else {
if (!internal)
node->local_weak_refs++;
- if (!node->has_weak_ref && list_empty(&node->work.entry)) {
- if (target_list == NULL) {
- pr_err("invalid inc weak node for %d\n",
- node->debug_id);
- return -EINVAL;
- }
- /*
- * See comment above
- */
+ if (!node->has_weak_ref && target_list && list_empty(&node->work.entry))
binder_enqueue_work_ilocked(&node->work, target_list);
- }
}
return 0;
}
@@ -2418,10 +2409,10 @@ err_fd_not_accepted:
/**
* struct binder_ptr_fixup - data to be fixed-up in target buffer
- * @offset offset in target buffer to fixup
- * @skip_size bytes to skip in copy (fixup will be written later)
- * @fixup_data data to write at fixup offset
- * @node list node
+ * @offset: offset in target buffer to fixup
+ * @skip_size: bytes to skip in copy (fixup will be written later)
+ * @fixup_data: data to write at fixup offset
+ * @node: list node
*
* This is used for the pointer fixup list (pf) which is created and consumed
* during binder_transaction() and is only accessed locally. No
@@ -2438,10 +2429,10 @@ struct binder_ptr_fixup {
/**
* struct binder_sg_copy - scatter-gather data to be copied
- * @offset offset in target buffer
- * @sender_uaddr user address in source buffer
- * @length bytes to copy
- * @node list node
+ * @offset: offset in target buffer
+ * @sender_uaddr: user address in source buffer
+ * @length: bytes to copy
+ * @node: list node
*
* This is used for the sg copy list (sgc) which is created and consumed
* during binder_transaction() and is only accessed locally. No
@@ -4063,14 +4054,15 @@ binder_freeze_notification_done(struct binder_proc *proc,
/**
* binder_free_buf() - free the specified buffer
- * @proc: binder proc that owns buffer
- * @buffer: buffer to be freed
- * @is_failure: failed to send transaction
+ * @proc: binder proc that owns buffer
+ * @thread: binder thread performing the buffer release
+ * @buffer: buffer to be freed
+ * @is_failure: failed to send transaction
*
- * If buffer for an async transaction, enqueue the next async
+ * If the buffer is for an async transaction, enqueue the next async
* transaction from the node.
*
- * Cleanup buffer and free it.
+ * Cleanup the buffer and free it.
*/
static void
binder_free_buf(struct binder_proc *proc,
diff --git a/drivers/android/binder/freeze.rs b/drivers/android/binder/freeze.rs
index e68c3c8bc55a..220de35ae85a 100644
--- a/drivers/android/binder/freeze.rs
+++ b/drivers/android/binder/freeze.rs
@@ -106,13 +106,22 @@ impl DeliverToRead for FreezeMessage {
return Ok(true);
}
if freeze.is_clearing {
- _removed_listener = freeze_entry.remove_node();
+ kernel::warn_on!(freeze.num_cleared_duplicates != 0);
+ if freeze.num_pending_duplicates > 0 {
+ // The primary freeze listener was deleted, so convert a pending duplicate back
+ // into the primary one.
+ freeze.num_pending_duplicates -= 1;
+ freeze.is_pending = true;
+ freeze.is_clearing = true;
+ } else {
+ _removed_listener = freeze_entry.remove_node();
+ }
drop(node_refs);
writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?;
writer.write_payload(&self.cookie.0)?;
Ok(true)
} else {
- let is_frozen = freeze.node.owner.inner.lock().is_frozen;
+ let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen();
if freeze.last_is_frozen == Some(is_frozen) {
return Ok(true);
}
@@ -245,8 +254,9 @@ impl Process {
);
return Err(EINVAL);
}
- if freeze.is_clearing {
- // Immediately send another FreezeMessage for BR_CLEAR_FREEZE_NOTIFICATION_DONE.
+ let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen();
+ if freeze.is_clearing || freeze.last_is_frozen != Some(is_frozen) {
+ // Immediately send another FreezeMessage.
clear_msg = Some(FreezeMessage::init(alloc, cookie));
}
freeze.is_pending = false;
diff --git a/drivers/android/binder/node.rs b/drivers/android/binder/node.rs
index ade895ef791e..08d362deaf61 100644
--- a/drivers/android/binder/node.rs
+++ b/drivers/android/binder/node.rs
@@ -687,7 +687,7 @@ impl Node {
);
}
if inner.freeze_list.is_empty() {
- _unused_capacity = mem::replace(&mut inner.freeze_list, KVVec::new());
+ _unused_capacity = mem::take(&mut inner.freeze_list);
}
}
diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs
index f13a747e784c..7607353a5e92 100644
--- a/drivers/android/binder/process.rs
+++ b/drivers/android/binder/process.rs
@@ -72,6 +72,33 @@ impl Mapping {
const PROC_DEFER_FLUSH: u8 = 1;
const PROC_DEFER_RELEASE: u8 = 2;
+#[derive(Copy, Clone)]
+pub(crate) enum IsFrozen {
+ Yes,
+ No,
+ InProgress,
+}
+
+impl IsFrozen {
+ /// Whether incoming transactions should be rejected due to freeze.
+ pub(crate) fn is_frozen(self) -> bool {
+ match self {
+ IsFrozen::Yes => true,
+ IsFrozen::No => false,
+ IsFrozen::InProgress => true,
+ }
+ }
+
+ /// Whether freeze notifications consider this process frozen.
+ pub(crate) fn is_fully_frozen(self) -> bool {
+ match self {
+ IsFrozen::Yes => true,
+ IsFrozen::No => false,
+ IsFrozen::InProgress => false,
+ }
+ }
+}
+
/// The fields of `Process` protected by the spinlock.
pub(crate) struct ProcessInner {
is_manager: bool,
@@ -98,7 +125,7 @@ pub(crate) struct ProcessInner {
/// are woken up.
outstanding_txns: u32,
/// Process is frozen and unable to service binder transactions.
- pub(crate) is_frozen: bool,
+ pub(crate) is_frozen: IsFrozen,
/// Process received sync transactions since last frozen.
pub(crate) sync_recv: bool,
/// Process received async transactions since last frozen.
@@ -124,7 +151,7 @@ impl ProcessInner {
started_thread_count: 0,
defer_work: 0,
outstanding_txns: 0,
- is_frozen: false,
+ is_frozen: IsFrozen::No,
sync_recv: false,
async_recv: false,
binderfs_file: None,
@@ -1260,7 +1287,7 @@ impl Process {
let is_manager = {
let mut inner = self.inner.lock();
inner.is_dead = true;
- inner.is_frozen = false;
+ inner.is_frozen = IsFrozen::No;
inner.sync_recv = false;
inner.async_recv = false;
inner.is_manager
@@ -1346,10 +1373,6 @@ impl Process {
.alloc
.take_for_each(|offset, size, debug_id, odata| {
let ptr = offset + address;
- pr_warn!(
- "{}: removing orphan mapping {offset}:{size}\n",
- self.pid_in_current_ns()
- );
let mut alloc =
Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
if let Some(data) = odata {
@@ -1371,7 +1394,7 @@ impl Process {
return;
}
inner.outstanding_txns -= 1;
- inner.is_frozen && inner.outstanding_txns == 0
+ inner.is_frozen.is_frozen() && inner.outstanding_txns == 0
};
if wake {
@@ -1385,7 +1408,7 @@ impl Process {
let mut inner = self.inner.lock();
inner.sync_recv = false;
inner.async_recv = false;
- inner.is_frozen = false;
+ inner.is_frozen = IsFrozen::No;
drop(inner);
msgs.send_messages();
return Ok(());
@@ -1394,7 +1417,7 @@ impl Process {
let mut inner = self.inner.lock();
inner.sync_recv = false;
inner.async_recv = false;
- inner.is_frozen = true;
+ inner.is_frozen = IsFrozen::InProgress;
if info.timeout_ms > 0 {
let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms);
@@ -1408,7 +1431,7 @@ impl Process {
.wait_interruptible_timeout(&mut inner, jiffies)
{
CondVarTimeoutResult::Signal { .. } => {
- inner.is_frozen = false;
+ inner.is_frozen = IsFrozen::No;
return Err(ERESTARTSYS);
}
CondVarTimeoutResult::Woken { jiffies: remaining } => {
@@ -1422,17 +1445,18 @@ impl Process {
}
if inner.txns_pending_locked() {
- inner.is_frozen = false;
+ inner.is_frozen = IsFrozen::No;
Err(EAGAIN)
} else {
drop(inner);
match self.prepare_freeze_messages() {
Ok(batch) => {
+ self.inner.lock().is_frozen = IsFrozen::Yes;
batch.send_messages();
Ok(())
}
Err(kernel::alloc::AllocError) => {
- self.inner.lock().is_frozen = false;
+ self.inner.lock().is_frozen = IsFrozen::No;
Err(ENOMEM)
}
}
diff --git a/drivers/android/binder/transaction.rs b/drivers/android/binder/transaction.rs
index 02512175d622..4bd3c0e417eb 100644
--- a/drivers/android/binder/transaction.rs
+++ b/drivers/android/binder/transaction.rs
@@ -249,7 +249,7 @@ impl Transaction {
if oneway {
if let Some(target_node) = self.target_node.clone() {
- if process_inner.is_frozen {
+ if process_inner.is_frozen.is_frozen() {
process_inner.async_recv = true;
if self.flags & TF_UPDATE_TXN != 0 {
if let Some(t_outdated) =
@@ -270,7 +270,7 @@ impl Transaction {
}
}
- if process_inner.is_frozen {
+ if process_inner.is_frozen.is_frozen() {
return Err(BinderError::new_frozen_oneway());
} else {
return Ok(());
@@ -280,7 +280,7 @@ impl Transaction {
}
}
- if process_inner.is_frozen {
+ if process_inner.is_frozen.is_frozen() {
process_inner.sync_recv = true;
return Err(BinderError::new_frozen());
}
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 1037169abb45..e1eff05bea4a 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -292,7 +292,7 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
* frequency (by keeping the initial capacity_freq_ref value).
*/
cpu_clk = of_clk_get(cpu_node, 0);
- if (!PTR_ERR_OR_ZERO(cpu_clk)) {
+ if (!IS_ERR_OR_NULL(cpu_clk)) {
per_cpu(capacity_freq_ref, cpu) =
clk_get_rate(cpu_clk) / HZ_PER_KHZ;
clk_put(cpu_clk);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 3c533dab8fa5..f69dc9c85954 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1784,7 +1784,7 @@ static int fw_devlink_dev_sync_state(struct device *dev, void *data)
return 0;
if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) {
- dev_warn(sup, "sync_state() pending due to %s\n",
+ dev_info(sup, "sync_state() pending due to %s\n",
dev_name(link->consumer));
return 0;
}
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 37faf6156d7c..55bdc7f5e59d 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -23,50 +23,46 @@ struct devcd_entry {
void *data;
size_t datalen;
/*
- * Here, mutex is required to serialize the calls to del_wk work between
- * user/kernel space which happens when devcd is added with device_add()
- * and that sends uevent to user space. User space reads the uevents,
- * and calls to devcd_data_write() which try to modify the work which is
- * not even initialized/queued from devcoredump.
+ * There are 2 races for which mutex is required.
*
+ * The first race is between device creation and userspace writing to
+ * schedule immediately destruction.
*
+ * This race is handled by arming the timer before device creation, but
+ * when device creation fails the timer still exists.
*
- * cpu0(X) cpu1(Y)
+ * To solve this, hold the mutex during device_add(), and set
+ * init_completed on success before releasing the mutex.
*
- * dev_coredump() uevent sent to user space
- * device_add() ======================> user space process Y reads the
- * uevents writes to devcd fd
- * which results into writes to
+ * That way the timer will never fire until device_add() is called,
+ * it will do nothing if init_completed is not set. The timer is also
+ * cancelled in that case.
*
- * devcd_data_write()
- * mod_delayed_work()
- * try_to_grab_pending()
- * timer_delete()
- * debug_assert_init()
- * INIT_DELAYED_WORK()
- * schedule_delayed_work()
- *
- *
- * Also, mutex alone would not be enough to avoid scheduling of
- * del_wk work after it get flush from a call to devcd_free()
- * mentioned as below.
- *
- * disabled_store()
- * devcd_free()
- * mutex_lock() devcd_data_write()
- * flush_delayed_work()
- * mutex_unlock()
- * mutex_lock()
- * mod_delayed_work()
- * mutex_unlock()
- * So, delete_work flag is required.
+ * The second race involves multiple parallel invocations of devcd_free(),
+ * add a deleted flag so only 1 can call the destructor.
*/
struct mutex mutex;
- bool delete_work;
+ bool init_completed, deleted;
struct module *owner;
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen);
void (*free)(void *data);
+ /*
+ * If nothing interferes and device_add() was returns success,
+ * del_wk will destroy the device after the timer fires.
+ *
+ * Multiple userspace processes can interfere in the working of the timer:
+ * - Writing to the coredump will reschedule the timer to run immediately,
+ * if still armed.
+ *
+ * This is handled by using "if (cancel_delayed_work()) {
+ * schedule_delayed_work() }", to prevent re-arming after having
+ * been previously fired.
+ * - Writing to /sys/class/devcoredump/disabled will destroy the
+ * coredump synchronously.
+ * This is handled by using disable_delayed_work_sync(), and then
+ * checking if deleted flag is set with &devcd->mutex held.
+ */
struct delayed_work del_wk;
struct device *failing_dev;
};
@@ -95,14 +91,27 @@ static void devcd_dev_release(struct device *dev)
kfree(devcd);
}
+static void __devcd_del(struct devcd_entry *devcd)
+{
+ devcd->deleted = true;
+ device_del(&devcd->devcd_dev);
+ put_device(&devcd->devcd_dev);
+}
+
static void devcd_del(struct work_struct *wk)
{
struct devcd_entry *devcd;
+ bool init_completed;
devcd = container_of(wk, struct devcd_entry, del_wk.work);
- device_del(&devcd->devcd_dev);
- put_device(&devcd->devcd_dev);
+ /* devcd->mutex serializes against dev_coredumpm_timeout */
+ mutex_lock(&devcd->mutex);
+ init_completed = devcd->init_completed;
+ mutex_unlock(&devcd->mutex);
+
+ if (init_completed)
+ __devcd_del(devcd);
}
static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
@@ -122,12 +131,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct devcd_entry *devcd = dev_to_devcd(dev);
- mutex_lock(&devcd->mutex);
- if (!devcd->delete_work) {
- devcd->delete_work = true;
- mod_delayed_work(system_wq, &devcd->del_wk, 0);
- }
- mutex_unlock(&devcd->mutex);
+ /*
+ * Although it's tempting to use mod_delayed work here,
+ * that will cause a reschedule if the timer already fired.
+ */
+ if (cancel_delayed_work(&devcd->del_wk))
+ schedule_delayed_work(&devcd->del_wk, 0);
return count;
}
@@ -151,11 +160,21 @@ static int devcd_free(struct device *dev, void *data)
{
struct devcd_entry *devcd = dev_to_devcd(dev);
+ /*
+ * To prevent a race with devcd_data_write(), disable work and
+ * complete manually instead.
+ *
+ * We cannot rely on the return value of
+ * disable_delayed_work_sync() here, because it might be in the
+ * middle of a cancel_delayed_work + schedule_delayed_work pair.
+ *
+ * devcd->mutex here guards against multiple parallel invocations
+ * of devcd_free().
+ */
+ disable_delayed_work_sync(&devcd->del_wk);
mutex_lock(&devcd->mutex);
- if (!devcd->delete_work)
- devcd->delete_work = true;
-
- flush_delayed_work(&devcd->del_wk);
+ if (!devcd->deleted)
+ __devcd_del(devcd);
mutex_unlock(&devcd->mutex);
return 0;
}
@@ -179,12 +198,10 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri
* put_device() <- last reference
* error = fn(dev, data) devcd_dev_release()
* devcd_free(dev, data) kfree(devcd)
- * mutex_lock(&devcd->mutex);
*
*
* In the above diagram, it looks like disabled_store() would be racing with parallelly
- * running devcd_del() and result in memory abort while acquiring devcd->mutex which
- * is called after kfree of devcd memory after dropping its last reference with
+ * running devcd_del() and result in memory abort after dropping its last reference with
* put_device(). However, this will not happens as fn(dev, data) runs
* with its own reference to device via klist_node so it is not its last reference.
* so, above situation would not occur.
@@ -374,7 +391,7 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
devcd->read = read;
devcd->free = free;
devcd->failing_dev = get_device(dev);
- devcd->delete_work = false;
+ devcd->deleted = false;
mutex_init(&devcd->mutex);
device_initialize(&devcd->devcd_dev);
@@ -383,8 +400,14 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
atomic_inc_return(&devcd_count));
devcd->devcd_dev.class = &devcd_class;
- mutex_lock(&devcd->mutex);
dev_set_uevent_suppress(&devcd->devcd_dev, true);
+
+ /* devcd->mutex prevents devcd_del() completing until init finishes */
+ mutex_lock(&devcd->mutex);
+ devcd->init_completed = false;
+ INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
+ schedule_delayed_work(&devcd->del_wk, timeout);
+
if (device_add(&devcd->devcd_dev))
goto put_device;
@@ -401,13 +424,20 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
dev_set_uevent_suppress(&devcd->devcd_dev, false);
kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
- INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
- schedule_delayed_work(&devcd->del_wk, timeout);
+
+ /*
+ * Safe to run devcd_del() now that we are done with devcd_dev.
+ * Alternatively we could have taken a ref on devcd_dev before
+ * dropping the lock.
+ */
+ devcd->init_completed = true;
mutex_unlock(&devcd->mutex);
return;
put_device:
- put_device(&devcd->devcd_dev);
mutex_unlock(&devcd->mutex);
+ cancel_delayed_work_sync(&devcd->del_wk);
+ put_device(&devcd->devcd_dev);
+
put_module:
module_put(owner);
free:
diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c
index 54eb7d227cf4..e523fae73004 100644
--- a/drivers/base/regmap/regmap-slimbus.c
+++ b/drivers/base/regmap/regmap-slimbus.c
@@ -48,8 +48,7 @@ struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
if (IS_ERR(bus))
return ERR_CAST(bus);
- return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config,
- lock_key, lock_name);
+ return __regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_slimbus);
@@ -63,8 +62,7 @@ struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
if (IS_ERR(bus))
return ERR_CAST(bus);
- return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config,
- lock_key, lock_name);
+ return __devm_regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 6ecfc821cf83..72f045e6ed51 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -294,6 +294,8 @@ static int bcma_register_devices(struct bcma_bus *bus)
int err;
list_for_each_entry(core, &bus->cores, list) {
+ struct device_node *np;
+
/* We support that core ourselves */
switch (core->id.id) {
case BCMA_CORE_4706_CHIPCOMMON:
@@ -311,6 +313,10 @@ static int bcma_register_devices(struct bcma_bus *bus)
if (bcma_is_core_needed_early(core->id.id))
continue;
+ np = core->dev.of_node;
+ if (np && !of_device_is_available(np))
+ continue;
+
/* Only first GMAC core on BCM4706 is connected and working */
if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
core->core_unit > 0)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 1188f32a5e5e..a853c65ac65d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -52,6 +52,7 @@
static DEFINE_IDR(nbd_index_idr);
static DEFINE_MUTEX(nbd_index_mutex);
static struct workqueue_struct *nbd_del_wq;
+static struct cred *nbd_cred;
static int nbd_total_devices = 0;
struct nbd_sock {
@@ -554,6 +555,7 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
int result;
struct msghdr msg = {} ;
unsigned int noreclaim_flag;
+ const struct cred *old_cred;
if (unlikely(!sock)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
@@ -562,6 +564,8 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
return -EINVAL;
}
+ old_cred = override_creds(nbd_cred);
+
msg.msg_iter = *iter;
noreclaim_flag = memalloc_noreclaim_save();
@@ -586,6 +590,8 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
memalloc_noreclaim_restore(noreclaim_flag);
+ revert_creds(old_cred);
+
return result;
}
@@ -2677,7 +2683,15 @@ static int __init nbd_init(void)
return -ENOMEM;
}
+ nbd_cred = prepare_kernel_cred(&init_task);
+ if (!nbd_cred) {
+ destroy_workqueue(nbd_del_wq);
+ unregister_blkdev(NBD_MAJOR, "nbd");
+ return -ENOMEM;
+ }
+
if (genl_register_family(&nbd_genl_family)) {
+ put_cred(nbd_cred);
destroy_workqueue(nbd_del_wq);
unregister_blkdev(NBD_MAJOR, "nbd");
return -EINVAL;
@@ -2732,6 +2746,7 @@ static void __exit nbd_cleanup(void)
/* Also wait for nbd_dev_remove_work() completes */
destroy_workqueue(nbd_del_wq);
+ put_cred(nbd_cred);
idr_destroy(&nbd_index_idr);
unregister_blkdev(NBD_MAJOR, "nbd");
}
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index f982027e8c85..0ee55f889cfd 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1949,6 +1949,7 @@ static int null_add_dev(struct nullb_device *dev)
.logical_block_size = dev->blocksize,
.physical_block_size = dev->blocksize,
.max_hw_sectors = dev->max_sectors,
+ .dma_alignment = dev->blocksize - 1,
};
struct nullb *nullb;
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index b7ba667a3d09..e305d04aac9d 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -41,6 +41,7 @@ struct bpa10x_data {
struct usb_anchor rx_anchor;
struct sk_buff *rx_skb[2];
+ struct hci_uart hu;
};
static void bpa10x_tx_complete(struct urb *urb)
@@ -96,7 +97,7 @@ static void bpa10x_rx_complete(struct urb *urb)
if (urb->status == 0) {
bool idx = usb_pipebulk(urb->pipe);
- data->rx_skb[idx] = h4_recv_buf(hdev, data->rx_skb[idx],
+ data->rx_skb[idx] = h4_recv_buf(&data->hu, data->rx_skb[idx],
urb->transfer_buffer,
urb->actual_length,
bpa10x_recv_pkts,
@@ -388,6 +389,7 @@ static int bpa10x_probe(struct usb_interface *intf,
hci_set_drvdata(hdev, data);
data->hdev = hdev;
+ data->hu.hdev = hdev;
SET_HCIDEV_DEV(hdev, &intf->dev);
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 6d3963bd56a9..a075d8ec4677 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -1467,11 +1467,6 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
btintel_pcie_msix_gp1_handler(data);
- /* This interrupt is triggered by the firmware after updating
- * boot_stage register and image_response register
- */
- if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
- btintel_pcie_msix_gp0_handler(data);
/* For TX */
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
@@ -1487,6 +1482,12 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
btintel_pcie_msix_tx_handle(data);
}
+ /* This interrupt is triggered by the firmware after updating
+ * boot_stage register and image_response register
+ */
+ if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
+ btintel_pcie_msix_gp0_handler(data);
+
/*
* Before sending the interrupt the HW disables it to prevent a nested
* interrupt. This is done by writing 1 to the corresponding bit in
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 50abefba6d04..62db31bd6592 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -1270,6 +1270,12 @@ static void btmtksdio_reset(struct hci_dev *hdev)
sdio_claim_host(bdev->func);
+ /* set drv_pmctrl if BT is closed before doing reset */
+ if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) {
+ sdio_enable_func(bdev->func);
+ btmtksdio_drv_pmctrl(bdev);
+ }
+
sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
skb_queue_purge(&bdev->txq);
cancel_work_sync(&bdev->txrx_work);
@@ -1285,6 +1291,12 @@ static void btmtksdio_reset(struct hci_dev *hdev)
goto err;
}
+ /* set fw_pmctrl back if BT is closed after doing reset */
+ if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) {
+ btmtksdio_fw_pmctrl(bdev);
+ sdio_disable_func(bdev->func);
+ }
+
clear_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state);
err:
sdio_release_host(bdev->func);
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index d9b90ea2ad38..27aa48ff3ac2 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -79,6 +79,7 @@ struct btmtkuart_dev {
u16 stp_dlen;
const struct btmtkuart_data *data;
+ struct hci_uart hu;
};
#define btmtkuart_is_standalone(bdev) \
@@ -368,7 +369,7 @@ static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
sz_left -= adv;
p_left += adv;
- bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
+ bdev->rx_skb = h4_recv_buf(&bdev->hu, bdev->rx_skb, p_h4,
sz_h4, mtk_recv_pkts,
ARRAY_SIZE(mtk_recv_pkts));
if (IS_ERR(bdev->rx_skb)) {
@@ -858,6 +859,7 @@ static int btmtkuart_probe(struct serdev_device *serdev)
}
bdev->hdev = hdev;
+ bdev->hu.hdev = hdev;
hdev->bus = HCI_UART;
hci_set_drvdata(hdev, bdev);
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index d5153fed0518..3b1e9224e965 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -212,6 +212,7 @@ struct btnxpuart_dev {
struct ps_data psdata;
struct btnxpuart_data *nxp_data;
struct reset_control *pdn;
+ struct hci_uart hu;
};
#define NXP_V1_FW_REQ_PKT 0xa5
@@ -1756,7 +1757,7 @@ static size_t btnxpuart_receive_buf(struct serdev_device *serdev,
ps_start_timer(nxpdev);
- nxpdev->rx_skb = h4_recv_buf(nxpdev->hdev, nxpdev->rx_skb, data, count,
+ nxpdev->rx_skb = h4_recv_buf(&nxpdev->hu, nxpdev->rx_skb, data, count,
nxp_recv_pkts, ARRAY_SIZE(nxp_recv_pkts));
if (IS_ERR(nxpdev->rx_skb)) {
int err = PTR_ERR(nxpdev->rx_skb);
@@ -1875,6 +1876,7 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
reset_control_deassert(nxpdev->pdn);
nxpdev->hdev = hdev;
+ nxpdev->hu.hdev = hdev;
hdev->bus = HCI_UART;
hci_set_drvdata(hdev, nxpdev);
diff --git a/drivers/bluetooth/hci_ag6xx.c b/drivers/bluetooth/hci_ag6xx.c
index 2d40302409ff..94588676510f 100644
--- a/drivers/bluetooth/hci_ag6xx.c
+++ b/drivers/bluetooth/hci_ag6xx.c
@@ -105,7 +105,7 @@ static int ag6xx_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- ag6xx->rx_skb = h4_recv_buf(hu->hdev, ag6xx->rx_skb, data, count,
+ ag6xx->rx_skb = h4_recv_buf(hu, ag6xx->rx_skb, data, count,
ag6xx_recv_pkts,
ARRAY_SIZE(ag6xx_recv_pkts));
if (IS_ERR(ag6xx->rx_skb)) {
diff --git a/drivers/bluetooth/hci_aml.c b/drivers/bluetooth/hci_aml.c
index 707e90f80130..b1f32c5a8a3f 100644
--- a/drivers/bluetooth/hci_aml.c
+++ b/drivers/bluetooth/hci_aml.c
@@ -650,7 +650,7 @@ static int aml_recv(struct hci_uart *hu, const void *data, int count)
struct aml_data *aml_data = hu->priv;
int err;
- aml_data->rx_skb = h4_recv_buf(hu->hdev, aml_data->rx_skb, data, count,
+ aml_data->rx_skb = h4_recv_buf(hu, aml_data->rx_skb, data, count,
aml_recv_pkts,
ARRAY_SIZE(aml_recv_pkts));
if (IS_ERR(aml_data->rx_skb)) {
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index dbfe34664633..8d2b5e7f0d6a 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -191,7 +191,7 @@ static int ath_recv(struct hci_uart *hu, const void *data, int count)
{
struct ath_struct *ath = hu->priv;
- ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count,
+ ath->rx_skb = h4_recv_buf(hu, ath->rx_skb, data, count,
ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));
if (IS_ERR(ath->rx_skb)) {
int err = PTR_ERR(ath->rx_skb);
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index f96617b85d87..fff845ed44e3 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -698,7 +698,7 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- bcm->rx_skb = h4_recv_buf(hu->hdev, bcm->rx_skb, data, count,
+ bcm->rx_skb = h4_recv_buf(hu, bcm->rx_skb, data, count,
bcm_recv_pkts, ARRAY_SIZE(bcm_recv_pkts));
if (IS_ERR(bcm->rx_skb)) {
int err = PTR_ERR(bcm->rx_skb);
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 9070e31a68bf..ec017df8572c 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -112,7 +112,7 @@ static int h4_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- h4->rx_skb = h4_recv_buf(hu->hdev, h4->rx_skb, data, count,
+ h4->rx_skb = h4_recv_buf(hu, h4->rx_skb, data, count,
h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts));
if (IS_ERR(h4->rx_skb)) {
int err = PTR_ERR(h4->rx_skb);
@@ -151,12 +151,12 @@ int __exit h4_deinit(void)
return hci_uart_unregister_proto(&h4p);
}
-struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
+struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb,
const unsigned char *buffer, int count,
const struct h4_recv_pkt *pkts, int pkts_count)
{
- struct hci_uart *hu = hci_get_drvdata(hdev);
u8 alignment = hu->alignment ? hu->alignment : 1;
+ struct hci_dev *hdev = hu->hdev;
/* Check for error from previous call */
if (IS_ERR(skb))
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 9b353c3d6442..1d6e09508f1f 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -972,7 +972,7 @@ static int intel_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count,
+ intel->rx_skb = h4_recv_buf(hu, intel->rx_skb, data, count,
intel_recv_pkts,
ARRAY_SIZE(intel_recv_pkts));
if (IS_ERR(intel->rx_skb)) {
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 7044c86325ce..6f4e25917b86 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -429,7 +429,7 @@ static int ll_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- ll->rx_skb = h4_recv_buf(hu->hdev, ll->rx_skb, data, count,
+ ll->rx_skb = h4_recv_buf(hu, ll->rx_skb, data, count,
ll_recv_pkts, ARRAY_SIZE(ll_recv_pkts));
if (IS_ERR(ll->rx_skb)) {
int err = PTR_ERR(ll->rx_skb);
diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c
index e08222395772..8767522ec4c6 100644
--- a/drivers/bluetooth/hci_mrvl.c
+++ b/drivers/bluetooth/hci_mrvl.c
@@ -264,9 +264,9 @@ static int mrvl_recv(struct hci_uart *hu, const void *data, int count)
!test_bit(STATE_FW_LOADED, &mrvl->flags))
return count;
- mrvl->rx_skb = h4_recv_buf(hu->hdev, mrvl->rx_skb, data, count,
- mrvl_recv_pkts,
- ARRAY_SIZE(mrvl_recv_pkts));
+ mrvl->rx_skb = h4_recv_buf(hu, mrvl->rx_skb, data, count,
+ mrvl_recv_pkts,
+ ARRAY_SIZE(mrvl_recv_pkts));
if (IS_ERR(mrvl->rx_skb)) {
int err = PTR_ERR(mrvl->rx_skb);
bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
index cd7575c20f65..1e65b541f8ad 100644
--- a/drivers/bluetooth/hci_nokia.c
+++ b/drivers/bluetooth/hci_nokia.c
@@ -624,8 +624,8 @@ static int nokia_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- btdev->rx_skb = h4_recv_buf(hu->hdev, btdev->rx_skb, data, count,
- nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts));
+ btdev->rx_skb = h4_recv_buf(hu, btdev->rx_skb, data, count,
+ nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts));
if (IS_ERR(btdev->rx_skb)) {
err = PTR_ERR(btdev->rx_skb);
dev_err(dev, "Frame reassembly failed (%d)", err);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 4cff4d9be313..888176b0faa9 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1277,7 +1277,7 @@ static int qca_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
+ qca->rx_skb = h4_recv_buf(hu, qca->rx_skb, data, count,
qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
if (IS_ERR(qca->rx_skb)) {
int err = PTR_ERR(qca->rx_skb);
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index cbbe79b241ce..48ac7ca9334e 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -162,7 +162,7 @@ struct h4_recv_pkt {
int h4_init(void);
int h4_deinit(void);
-struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
+struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb,
const unsigned char *buffer, int count,
const struct h4_recv_pkt *pkts, int pkts_count);
#endif
diff --git a/drivers/comedi/comedi_buf.c b/drivers/comedi/comedi_buf.c
index 002c0e76baff..c7c262a2d8ca 100644
--- a/drivers/comedi/comedi_buf.c
+++ b/drivers/comedi/comedi_buf.c
@@ -317,7 +317,7 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
unsigned int count = 0;
const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
- if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
+ if (!s->munge || (async->cmd.flags & CMDF_RAWDATA) || async->cmd.chanlist_len == 0) {
async->munge_count += num_bytes;
return num_bytes;
}
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 298e92d8cc03..b44f0f7a5ba1 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1614,7 +1614,11 @@ static int amd_pstate_cpu_offline(struct cpufreq_policy *policy)
* min_perf value across kexec reboots. If this CPU is just onlined normally after this, the
* limits, epp and desired perf will get reset to the cached values in cpudata struct
*/
- return amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
+ return amd_pstate_update_perf(policy, perf.bios_min_perf,
+ FIELD_GET(AMD_CPPC_DES_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached),
+ false);
}
static int amd_pstate_suspend(struct cpufreq_policy *policy)
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 4d9aa5ce31f0..23239b0c04f9 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -188,20 +188,17 @@ again:
*
* This can deal with workloads that have long pauses interspersed
* with sporadic activity with a bunch of short pauses.
+ *
+ * However, if the number of remaining samples is too small to exclude
+ * any more outliers, allow the deepest available idle state to be
+ * selected because there are systems where the time spent by CPUs in
+ * deep idle states is correlated to the maximum frequency the CPUs
+ * can get to. On those systems, shallow idle states should be avoided
+ * unless there is a clear indication that the given CPU is most likley
+ * going to be woken up shortly.
*/
- if (divisor * 4 <= INTERVALS * 3) {
- /*
- * If there are sufficiently many data points still under
- * consideration after the outliers have been eliminated,
- * returning without a prediction would be a mistake because it
- * is likely that the next interval will not exceed the current
- * maximum, so return the latter in that case.
- */
- if (divisor >= INTERVALS / 2)
- return max;
-
+ if (divisor * 4 <= INTERVALS * 3)
return UINT_MAX;
- }
/* Update the thresholds for the next round. */
if (avg - min > max - avg)
@@ -321,10 +318,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
/*
* Use a physical idle state, not busy polling, unless a timer
- * is going to trigger soon enough.
+ * is going to trigger soon enough or the exit latency of the
+ * idle state in question is greater than the predicted idle
+ * duration.
*/
if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
- s->target_residency_ns <= data->next_timer_ns) {
+ s->target_residency_ns <= data->next_timer_ns &&
+ s->exit_latency_ns <= predicted_ns) {
predicted_ns = s->target_residency_ns;
idx = i;
break;
diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c
index 8d1c79aaca07..5993bcba9716 100644
--- a/drivers/crypto/aspeed/aspeed-acry.c
+++ b/drivers/crypto/aspeed/aspeed-acry.c
@@ -787,7 +787,6 @@ static int aspeed_acry_probe(struct platform_device *pdev)
err_engine_rsa_start:
crypto_engine_exit(acry_dev->crypt_engine_rsa);
clk_exit:
- clk_disable_unprepare(acry_dev->clk);
return rc;
}
@@ -799,7 +798,6 @@ static void aspeed_acry_remove(struct platform_device *pdev)
aspeed_acry_unregister(acry_dev);
crypto_engine_exit(acry_dev->crypt_engine_rsa);
tasklet_kill(&acry_dev->done_task);
- clk_disable_unprepare(acry_dev->clk);
}
MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 3f78c56b58dc..39e6f93dc310 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -1141,7 +1141,7 @@ const char __rcu *dma_fence_timeline_name(struct dma_fence *fence)
"RCU protection is required for safe access to returned string");
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return fence->ops->get_driver_name(fence);
+ return fence->ops->get_timeline_name(fence);
else
return "signaled-timeline";
}
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index 74c1f0ca95f2..a4153bcb6dcf 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -1559,16 +1559,18 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
return -EMSGSIZE;
}
pin = dpll_pin_find_from_nlattr(info);
- if (!IS_ERR(pin)) {
- if (!dpll_pin_available(pin)) {
- nlmsg_free(msg);
- return -ENODEV;
- }
- ret = dpll_msg_add_pin_handle(msg, pin);
- if (ret) {
- nlmsg_free(msg);
- return ret;
- }
+ if (IS_ERR(pin)) {
+ nlmsg_free(msg);
+ return PTR_ERR(pin);
+ }
+ if (!dpll_pin_available(pin)) {
+ nlmsg_free(msg);
+ return -ENODEV;
+ }
+ ret = dpll_msg_add_pin_handle(msg, pin);
+ if (ret) {
+ nlmsg_free(msg);
+ return ret;
}
genlmsg_end(msg, hdr);
@@ -1735,12 +1737,14 @@ int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info)
}
dpll = dpll_device_find_from_nlattr(info);
- if (!IS_ERR(dpll)) {
- ret = dpll_msg_add_dev_handle(msg, dpll);
- if (ret) {
- nlmsg_free(msg);
- return ret;
- }
+ if (IS_ERR(dpll)) {
+ nlmsg_free(msg);
+ return PTR_ERR(dpll);
+ }
+ ret = dpll_msg_add_dev_handle(msg, dpll);
+ if (ret) {
+ nlmsg_free(msg);
+ return ret;
}
genlmsg_end(msg, hdr);
diff --git a/drivers/dpll/zl3073x/dpll.c b/drivers/dpll/zl3073x/dpll.c
index 93dc93eec79e..f93f9a458324 100644
--- a/drivers/dpll/zl3073x/dpll.c
+++ b/drivers/dpll/zl3073x/dpll.c
@@ -1904,7 +1904,7 @@ zl3073x_dpll_pin_is_registrable(struct zl3073x_dpll *zldpll,
}
is_diff = zl3073x_out_is_diff(zldev, out);
- is_enabled = zl3073x_out_is_enabled(zldev, out);
+ is_enabled = zl3073x_output_pin_is_enabled(zldev, index);
}
/* Skip N-pin if the corresponding input/output is differential */
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index dd3656a0c1ff..c65f491c54d0 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -269,7 +269,7 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
}
static int allocate_tlabel(struct fw_card *card)
-__must_hold(&card->transactions_lock)
+__must_hold(&card->transactions.lock)
{
int tlabel;
diff --git a/drivers/firewire/init_ohci1394_dma.c b/drivers/firewire/init_ohci1394_dma.c
index 48b879e9e831..121f0c2f6401 100644
--- a/drivers/firewire/init_ohci1394_dma.c
+++ b/drivers/firewire/init_ohci1394_dma.c
@@ -167,6 +167,7 @@ static inline void __init init_ohci1394_initialize(struct ohci *ohci)
/**
* init_ohci1394_wait_for_busresets - wait until bus resets are completed
+ * @ohci: Pointer to the OHCI-1394 controller structure
*
* OHCI1394 initialization itself and any device going on- or offline
* and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec
@@ -189,6 +190,8 @@ static inline void __init init_ohci1394_wait_for_busresets(struct ohci *ohci)
/**
* init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging
+ * @ohci: Pointer to the OHCI-1394 controller structure
+ *
* This enables remote DMA access over IEEE1394 from every host for the low
* 4GB of address space. DMA accesses above 4GB are not available currently.
*/
@@ -201,6 +204,8 @@ static inline void __init init_ohci1394_enable_physical_dma(struct ohci *ohci)
/**
* init_ohci1394_reset_and_init_dma - init controller and enable DMA
+ * @ohci: Pointer to the OHCI-1394 controller structure
+ *
* This initializes the given controller and enables physical DMA engine in it.
*/
static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci)
@@ -230,6 +235,10 @@ static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci)
/**
* init_ohci1394_controller - Map the registers of the controller and init DMA
+ * @num: PCI bus number
+ * @slot: PCI device number
+ * @func: PCI function number
+ *
* This maps the registers of the specified controller and initializes it
*/
static inline void __init init_ohci1394_controller(int num, int slot, int func)
@@ -284,6 +293,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
/**
* setup_ohci1394_dma - enables early OHCI1394 DMA initialization
+ * @opt: Kernel command line parameter string
*/
static int __init setup_ohci1394_dma(char *opt)
{
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 65bf1685350a..c72ee4756585 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -649,6 +649,26 @@ static u16 ffa_memory_attributes_get(u32 func_id)
return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE;
}
+static void ffa_emad_impdef_value_init(u32 version, void *dst, void *src)
+{
+ struct ffa_mem_region_attributes *ep_mem_access;
+
+ if (FFA_EMAD_HAS_IMPDEF_FIELD(version))
+ memcpy(dst, src, sizeof(ep_mem_access->impdef_val));
+}
+
+static void
+ffa_mem_region_additional_setup(u32 version, struct ffa_mem_region *mem_region)
+{
+ if (!FFA_MEM_REGION_HAS_EP_MEM_OFFSET(version)) {
+ mem_region->ep_mem_size = 0;
+ } else {
+ mem_region->ep_mem_size = ffa_emad_size_get(version);
+ mem_region->ep_mem_offset = sizeof(*mem_region);
+ memset(mem_region->reserved, 0, 12);
+ }
+}
+
static int
ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
struct ffa_mem_ops_args *args)
@@ -667,27 +687,24 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
mem_region->flags = args->flags;
mem_region->sender_id = drv_info->vm_id;
mem_region->attributes = ffa_memory_attributes_get(func_id);
- ep_mem_access = buffer +
- ffa_mem_desc_offset(buffer, 0, drv_info->version);
composite_offset = ffa_mem_desc_offset(buffer, args->nattrs,
drv_info->version);
- for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
+ for (idx = 0; idx < args->nattrs; idx++) {
+ ep_mem_access = buffer +
+ ffa_mem_desc_offset(buffer, idx, drv_info->version);
ep_mem_access->receiver = args->attrs[idx].receiver;
ep_mem_access->attrs = args->attrs[idx].attrs;
ep_mem_access->composite_off = composite_offset;
ep_mem_access->flag = 0;
ep_mem_access->reserved = 0;
+ ffa_emad_impdef_value_init(drv_info->version,
+ ep_mem_access->impdef_val,
+ args->attrs[idx].impdef_val);
}
mem_region->handle = 0;
mem_region->ep_count = args->nattrs;
- if (drv_info->version <= FFA_VERSION_1_0) {
- mem_region->ep_mem_size = 0;
- } else {
- mem_region->ep_mem_size = sizeof(*ep_mem_access);
- mem_region->ep_mem_offset = sizeof(*mem_region);
- memset(mem_region->reserved, 0, 12);
- }
+ ffa_mem_region_additional_setup(drv_info->version, mem_region);
composite = buffer + composite_offset;
composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 07b9e629276d..7c35c95fddba 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -309,16 +309,36 @@ enum debug_counters {
SCMI_DEBUG_COUNTERS_LAST
};
-static inline void scmi_inc_count(atomic_t *arr, int stat)
+/**
+ * struct scmi_debug_info - Debug common info
+ * @top_dentry: A reference to the top debugfs dentry
+ * @name: Name of this SCMI instance
+ * @type: Type of this SCMI instance
+ * @is_atomic: Flag to state if the transport of this instance is atomic
+ * @counters: An array of atomic_c's used for tracking statistics (if enabled)
+ */
+struct scmi_debug_info {
+ struct dentry *top_dentry;
+ const char *name;
+ const char *type;
+ bool is_atomic;
+ atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
+};
+
+static inline void scmi_inc_count(struct scmi_debug_info *dbg, int stat)
{
- if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
- atomic_inc(&arr[stat]);
+ if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
+ if (dbg)
+ atomic_inc(&dbg->counters[stat]);
+ }
}
-static inline void scmi_dec_count(atomic_t *arr, int stat)
+static inline void scmi_dec_count(struct scmi_debug_info *dbg, int stat)
{
- if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
- atomic_dec(&arr[stat]);
+ if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
+ if (dbg)
+ atomic_dec(&dbg->counters[stat]);
+ }
}
enum scmi_bad_msg {
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index bd56a877fdfc..5caa9191a8d1 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -116,22 +116,6 @@ struct scmi_protocol_instance {
#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
/**
- * struct scmi_debug_info - Debug common info
- * @top_dentry: A reference to the top debugfs dentry
- * @name: Name of this SCMI instance
- * @type: Type of this SCMI instance
- * @is_atomic: Flag to state if the transport of this instance is atomic
- * @counters: An array of atomic_c's used for tracking statistics (if enabled)
- */
-struct scmi_debug_info {
- struct dentry *top_dentry;
- const char *name;
- const char *type;
- bool is_atomic;
- atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
-};
-
-/**
* struct scmi_info - Structure representing a SCMI instance
*
* @id: A sequence number starting from zero identifying this instance
@@ -610,7 +594,7 @@ scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
/* Set in-flight */
set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
- scmi_inc_count(info->dbg->counters, XFERS_INFLIGHT);
+ scmi_inc_count(info->dbg, XFERS_INFLIGHT);
xfer->pending = true;
}
@@ -819,8 +803,9 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
hash_del(&xfer->node);
xfer->pending = false;
- scmi_dec_count(info->dbg->counters, XFERS_INFLIGHT);
+ scmi_dec_count(info->dbg, XFERS_INFLIGHT);
}
+ xfer->flags = 0;
hlist_add_head(&xfer->node, &minfo->free_xfers);
}
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
@@ -839,8 +824,6 @@ void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
{
struct scmi_info *info = handle_to_scmi_info(handle);
- xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
- xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
return __scmi_xfer_put(&info->tx_minfo, xfer);
}
@@ -1034,7 +1017,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED);
- scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED);
+ scmi_inc_count(info->dbg, ERR_MSG_UNEXPECTED);
return xfer;
}
@@ -1062,7 +1045,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
msg_type, xfer_id, msg_hdr, xfer->state);
scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID);
- scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID);
+ scmi_inc_count(info->dbg, ERR_MSG_INVALID);
/* On error the refcount incremented above has to be dropped */
__scmi_xfer_put(minfo, xfer);
@@ -1107,7 +1090,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
PTR_ERR(xfer));
scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM);
- scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM);
+ scmi_inc_count(info->dbg, ERR_MSG_NOMEM);
scmi_clear_channel(info, cinfo);
return;
@@ -1123,7 +1106,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
xfer->hdr.id, "NOTI", xfer->hdr.seq,
xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
- scmi_inc_count(info->dbg->counters, NOTIFICATION_OK);
+ scmi_inc_count(info->dbg, NOTIFICATION_OK);
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
@@ -1183,10 +1166,10 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
scmi_clear_channel(info, cinfo);
complete(xfer->async_done);
- scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK);
+ scmi_inc_count(info->dbg, DELAYED_RESPONSE_OK);
} else {
complete(&xfer->done);
- scmi_inc_count(info->dbg->counters, RESPONSE_OK);
+ scmi_inc_count(info->dbg, RESPONSE_OK);
}
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
@@ -1296,7 +1279,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
"timed out in resp(caller: %pS) - polling\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
- scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT);
+ scmi_inc_count(info->dbg, XFERS_RESPONSE_POLLED_TIMEOUT);
}
}
@@ -1321,7 +1304,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
"RESP" : "resp",
xfer->hdr.seq, xfer->hdr.status,
xfer->rx.buf, xfer->rx.len);
- scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK);
+ scmi_inc_count(info->dbg, RESPONSE_POLLED_OK);
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
scmi_raw_message_report(info->raw, xfer,
@@ -1336,7 +1319,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
dev_err(dev, "timed out in resp(caller: %pS)\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
- scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT);
+ scmi_inc_count(info->dbg, XFERS_RESPONSE_TIMEOUT);
}
}
@@ -1420,13 +1403,13 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
!is_transport_polling_capable(info->desc)) {
dev_warn_once(dev,
"Polling mode is not supported by transport.\n");
- scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED);
+ scmi_inc_count(info->dbg, SENT_FAIL_POLLING_UNSUPPORTED);
return -EINVAL;
}
cinfo = idr_find(&info->tx_idr, pi->proto->id);
if (unlikely(!cinfo)) {
- scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND);
+ scmi_inc_count(info->dbg, SENT_FAIL_CHANNEL_NOT_FOUND);
return -EINVAL;
}
/* True ONLY if also supported by transport. */
@@ -1461,19 +1444,19 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
ret = info->desc->ops->send_message(cinfo, xfer);
if (ret < 0) {
dev_dbg(dev, "Failed to send message %d\n", ret);
- scmi_inc_count(info->dbg->counters, SENT_FAIL);
+ scmi_inc_count(info->dbg, SENT_FAIL);
return ret;
}
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
xfer->hdr.id, "CMND", xfer->hdr.seq,
xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
- scmi_inc_count(info->dbg->counters, SENT_OK);
+ scmi_inc_count(info->dbg, SENT_OK);
ret = scmi_wait_for_message_response(cinfo, xfer);
if (!ret && xfer->hdr.status) {
ret = scmi_to_linux_errno(xfer->hdr.status);
- scmi_inc_count(info->dbg->counters, ERR_PROTOCOL);
+ scmi_inc_count(info->dbg, ERR_PROTOCOL);
}
if (info->desc->ops->mark_txdone)
@@ -3044,9 +3027,6 @@ static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
u8 channels[SCMI_MAX_CHANNELS] = {};
DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
- if (!info->dbg)
- return -EINVAL;
-
/* Enumerate all channels to collect their ids */
idr_for_each_entry(&info->tx_idr, cinfo, id) {
/*
@@ -3218,7 +3198,7 @@ static int scmi_probe(struct platform_device *pdev)
if (!info->dbg)
dev_warn(dev, "Failed to setup SCMI debugfs.\n");
- if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ if (info->dbg && IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
ret = scmi_debugfs_raw_mode_setup(info);
if (!coex) {
if (ret)
@@ -3423,6 +3403,9 @@ int scmi_inflight_count(const struct scmi_handle *handle)
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
struct scmi_info *info = handle_to_scmi_info(handle);
+ if (!info->dbg)
+ return 0;
+
return atomic_read(&info->dbg->counters[XFERS_INFLIGHT]);
} else {
return 0;
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index ffe7e1cb6b23..fe5c10cd5c32 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -59,6 +59,7 @@ static const struct regmap_config idio_16_regmap_config = {
.reg_stride = 1,
.val_bits = 8,
.io_port = true,
+ .max_register = 0x5,
.wr_table = &idio_16_wr_table,
.rd_table = &idio_16_rd_table,
.volatile_table = &idio_16_rd_table,
diff --git a/drivers/gpio/gpio-idio-16.c b/drivers/gpio/gpio-idio-16.c
index 0103be977c66..4fbae6f6a497 100644
--- a/drivers/gpio/gpio-idio-16.c
+++ b/drivers/gpio/gpio-idio-16.c
@@ -6,6 +6,7 @@
#define DEFAULT_SYMBOL_NAMESPACE "GPIO_IDIO_16"
+#include <linux/bitmap.h>
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -107,6 +108,7 @@ int devm_idio_16_regmap_register(struct device *const dev,
struct idio_16_data *data;
struct regmap_irq_chip *chip;
struct regmap_irq_chip_data *chip_data;
+ DECLARE_BITMAP(fixed_direction_output, IDIO_16_NGPIO);
if (!config->parent)
return -EINVAL;
@@ -164,6 +166,9 @@ int devm_idio_16_regmap_register(struct device *const dev,
gpio_config.irq_domain = regmap_irq_get_domain(chip_data);
gpio_config.reg_mask_xlate = idio_16_reg_mask_xlate;
+ bitmap_from_u64(fixed_direction_output, GENMASK_U64(15, 0));
+ gpio_config.fixed_direction_output = fixed_direction_output;
+
return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
}
EXPORT_SYMBOL_GPL(devm_idio_16_regmap_register);
diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c
index 3b4f8830c741..f32d1d237795 100644
--- a/drivers/gpio/gpio-ljca.c
+++ b/drivers/gpio/gpio-ljca.c
@@ -286,22 +286,14 @@ static void ljca_gpio_event_cb(void *context, u8 cmd, const void *evt_data,
{
const struct ljca_gpio_packet *packet = evt_data;
struct ljca_gpio_dev *ljca_gpio = context;
- int i, irq;
+ int i;
if (cmd != LJCA_GPIO_INT_EVENT)
return;
for (i = 0; i < packet->num; i++) {
- irq = irq_find_mapping(ljca_gpio->gc.irq.domain,
- packet->item[i].index);
- if (!irq) {
- dev_err(ljca_gpio->gc.parent,
- "gpio_id %u does not mapped to IRQ yet\n",
- packet->item[i].index);
- return;
- }
-
- generic_handle_domain_irq(ljca_gpio->gc.irq.domain, irq);
+ generic_handle_domain_irq(ljca_gpio->gc.irq.domain,
+ packet->item[i].index);
set_bit(packet->item[i].index, ljca_gpio->reenable_irqs);
}
diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c
index 476cea1b5ed7..9d28ca8e1d6f 100644
--- a/drivers/gpio/gpio-pci-idio-16.c
+++ b/drivers/gpio/gpio-pci-idio-16.c
@@ -41,6 +41,7 @@ static const struct regmap_config idio_16_regmap_config = {
.reg_stride = 1,
.val_bits = 8,
.io_port = true,
+ .max_register = 0x7,
.wr_table = &idio_16_wr_table,
.rd_table = &idio_16_rd_table,
.volatile_table = &idio_16_rd_table,
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index ab9e4077fa60..f4267af00027 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -31,6 +31,7 @@ struct gpio_regmap {
unsigned int reg_clr_base;
unsigned int reg_dir_in_base;
unsigned int reg_dir_out_base;
+ unsigned long *fixed_direction_output;
#ifdef CONFIG_REGMAP_IRQ
int regmap_irq_line;
@@ -134,6 +135,13 @@ static int gpio_regmap_get_direction(struct gpio_chip *chip,
unsigned int base, val, reg, mask;
int invert, ret;
+ if (gpio->fixed_direction_output) {
+ if (test_bit(offset, gpio->fixed_direction_output))
+ return GPIO_LINE_DIRECTION_OUT;
+ else
+ return GPIO_LINE_DIRECTION_IN;
+ }
+
if (gpio->reg_dat_base && !gpio->reg_set_base)
return GPIO_LINE_DIRECTION_IN;
if (gpio->reg_set_base && !gpio->reg_dat_base)
@@ -284,6 +292,17 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
goto err_free_gpio;
}
+ if (config->fixed_direction_output) {
+ gpio->fixed_direction_output = bitmap_alloc(chip->ngpio,
+ GFP_KERNEL);
+ if (!gpio->fixed_direction_output) {
+ ret = -ENOMEM;
+ goto err_free_gpio;
+ }
+ bitmap_copy(gpio->fixed_direction_output,
+ config->fixed_direction_output, chip->ngpio);
+ }
+
/* if not set, assume there is only one register */
gpio->ngpio_per_reg = config->ngpio_per_reg;
if (!gpio->ngpio_per_reg)
@@ -300,7 +319,7 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
ret = gpiochip_add_data(chip, gpio);
if (ret < 0)
- goto err_free_gpio;
+ goto err_free_bitmap;
#ifdef CONFIG_REGMAP_IRQ
if (config->regmap_irq_chip) {
@@ -309,7 +328,7 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
config->regmap_irq_line, config->regmap_irq_flags,
0, config->regmap_irq_chip, &gpio->irq_chip_data);
if (ret)
- goto err_free_gpio;
+ goto err_free_bitmap;
irq_domain = regmap_irq_get_domain(gpio->irq_chip_data);
} else
@@ -326,6 +345,8 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
err_remove_gpiochip:
gpiochip_remove(chip);
+err_free_bitmap:
+ bitmap_free(gpio->fixed_direction_output);
err_free_gpio:
kfree(gpio);
return ERR_PTR(ret);
@@ -344,6 +365,7 @@ void gpio_regmap_unregister(struct gpio_regmap *gpio)
#endif
gpiochip_remove(&gpio->gpio_chip);
+ bitmap_free(gpio->fixed_direction_output);
kfree(gpio);
}
EXPORT_SYMBOL_GPL(gpio_regmap_unregister);
diff --git a/drivers/gpio/gpiolib-acpi-core.c b/drivers/gpio/gpiolib-acpi-core.c
index 284e762d92c4..d441c1236d8c 100644
--- a/drivers/gpio/gpiolib-acpi-core.c
+++ b/drivers/gpio/gpiolib-acpi-core.c
@@ -291,6 +291,19 @@ acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio, int polarity)
return GPIOD_ASIS;
}
+static void acpi_gpio_set_debounce_timeout(struct gpio_desc *desc,
+ unsigned int acpi_debounce)
+{
+ int ret;
+
+ /* ACPI uses hundredths of milliseconds units */
+ acpi_debounce *= 10;
+ ret = gpio_set_debounce_timeout(desc, acpi_debounce);
+ if (ret)
+ gpiod_warn(desc, "Failed to set debounce-timeout %u: %d\n",
+ acpi_debounce, ret);
+}
+
static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
struct acpi_resource_gpio *agpio,
unsigned int index,
@@ -300,18 +313,12 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio, polarity);
unsigned int pin = agpio->pin_table[index];
struct gpio_desc *desc;
- int ret;
desc = gpiochip_request_own_desc(chip, pin, label, polarity, flags);
if (IS_ERR(desc))
return desc;
- /* ACPI uses hundredths of milliseconds units */
- ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10);
- if (ret)
- dev_warn(chip->parent,
- "Failed to set debounce-timeout for pin 0x%04X, err %d\n",
- pin, ret);
+ acpi_gpio_set_debounce_timeout(desc, agpio->debounce_timeout);
return desc;
}
@@ -375,8 +382,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
desc = acpi_request_own_gpiod(chip, agpio, 0, "ACPI:Event");
if (IS_ERR(desc)) {
dev_err(chip->parent,
- "Failed to request GPIO for pin 0x%04X, err %ld\n",
- pin, PTR_ERR(desc));
+ "Failed to request GPIO for pin 0x%04X, err %pe\n",
+ pin, desc);
return AE_OK;
}
@@ -944,7 +951,6 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
bool can_fallback = acpi_can_fallback_to_crs(adev, con_id);
struct acpi_gpio_info info = {};
struct gpio_desc *desc;
- int ret;
desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info);
if (IS_ERR(desc))
@@ -959,10 +965,7 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
acpi_gpio_update_gpiod_flags(dflags, &info);
acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info);
- /* ACPI uses hundredths of milliseconds units */
- ret = gpio_set_debounce_timeout(desc, info.debounce * 10);
- if (ret)
- return ERR_PTR(ret);
+ acpi_gpio_set_debounce_timeout(desc, info.debounce);
return desc;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
index ef996493115f..425a3e564360 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
index bcb97d245673..353421807387 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 474bfe36c0c2..aa78c2ee9e21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -322,6 +322,26 @@ static int vpe_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
+static bool vpe_need_dpm0_at_power_down(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
+ case IP_VERSION(6, 1, 1):
+ return adev->pm.fw_version < 0x0a640500;
+ default:
+ return false;
+ }
+}
+
+static int vpe_get_dpm_level(struct amdgpu_device *adev)
+{
+ struct amdgpu_vpe *vpe = &adev->vpe;
+
+ if (!adev->pm.dpm_enabled)
+ return 0;
+
+ return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv));
+}
+
static void vpe_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
@@ -329,11 +349,17 @@ static void vpe_idle_work_handler(struct work_struct *work)
unsigned int fences = 0;
fences += amdgpu_fence_count_emitted(&adev->vpe.ring);
+ if (fences)
+ goto reschedule;
- if (fences == 0)
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
- else
- schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
+ if (vpe_need_dpm0_at_power_down(adev) && vpe_get_dpm_level(adev) != 0)
+ goto reschedule;
+
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
+ return;
+
+reschedule:
+ schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
}
static int vpe_common_init(struct amdgpu_vpe *vpe)
diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
index 96616a865aac..ed1e25661706 100644
--- a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6597475e245d..bfa3199591b6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -551,13 +551,13 @@ static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
struct dc_stream_state *stream,
struct dc_crtc_timing_adjust *adjust)
{
- struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL);
+ struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_NOWAIT);
if (!offload_work) {
drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
return;
}
- struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_KERNEL);
+ struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_NOWAIT);
if (!adjust_copy) {
drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
kfree(offload_work);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 1ec9d03ad747..38f9ea313dcb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -248,6 +248,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
struct vblank_control_work *vblank_work =
container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
+ struct amdgpu_device *adev = drm_to_adev(dm->ddev);
+ int r;
mutex_lock(&dm->dc_lock);
@@ -277,7 +279,16 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
if (dm->active_vblank_irq_count == 0) {
dc_post_update_surfaces_to_stream(dm->dc);
+
+ r = amdgpu_dpm_pause_power_profile(adev, true);
+ if (r)
+ dev_warn(adev->dev, "failed to set default power profile mode\n");
+
dc_allow_idle_optimizations(dm->dc, true);
+
+ r = amdgpu_dpm_pause_power_profile(adev, false);
+ if (r)
+ dev_warn(adev->dev, "failed to restore the power profile mode\n");
}
mutex_unlock(&dm->dc_lock);
@@ -297,8 +308,12 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
int irq_type;
int rc = 0;
- if (acrtc->otg_inst == -1)
- goto skip;
+ if (enable && !acrtc->base.enabled) {
+ drm_dbg_vbl(crtc->dev,
+ "Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n",
+ acrtc->crtc_id, acrtc->base.enabled);
+ return -EINVAL;
+ }
irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
@@ -383,7 +398,7 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
return rc;
}
#endif
-skip:
+
if (amdgpu_in_reset(adev))
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index fe100e4c9801..cc21337a182f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -83,6 +83,7 @@ static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
+ case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171):
drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.disable_colorimetry = true;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index 09be2a90cc79..4f569cd8a5d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -578,9 +578,6 @@ static void dpp3_power_on_blnd_lut(
dpp_base->ctx->dc->optimized_required = true;
dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true;
}
- } else {
- REG_SET(CM_MEM_PWR_CTRL, 0,
- BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 7c276c319086..ce3d0b45fb4c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -200,6 +200,9 @@ void dcn401_init_hw(struct dc *dc)
*/
struct dc_link *link = dc->links[i];
+ if (link->ep_type != DISPLAY_ENDPOINT_PHY)
+ continue;
+
link->link_enc->funcs->hw_init(link->link_enc);
/* Check for enabled DIG to identify enabled display */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 41c76ba9ba56..62a39204fe0b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -44,7 +44,13 @@
*/
#define MAX_PIPES 6
#define MAX_PHANTOM_PIPES (MAX_PIPES / 2)
-#define MAX_LINKS (MAX_PIPES * 2 +2)
+
+#define MAX_DPIA 6
+#define MAX_CONNECTOR 6
+#define MAX_VIRTUAL_LINKS 4
+
+#define MAX_LINKS (MAX_DPIA + MAX_CONNECTOR + MAX_VIRTUAL_LINKS)
+
#define MAX_DIG_LINK_ENCODERS 7
#define MAX_DWB_PIPES 1
#define MAX_HPO_DP2_ENCODERS 4
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 9e33bf937a69..2676ae9f6fe8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -78,6 +78,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
struct audio_output audio_output[MAX_PIPES];
struct dc_stream_state *streams_on_link[MAX_PIPES];
int num_streams_on_link = 0;
+ struct dc *dc = (struct dc *)link->dc;
needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) !=
link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings));
@@ -150,7 +151,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
if (streams_on_link[i] && streams_on_link[i]->link && streams_on_link[i]->link == link) {
stream_update.stream = streams_on_link[i];
stream_update.dpms_off = &dpms_off;
- dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, streams_on_link[i], &stream_update);
+ dc_update_planes_and_stream(dc, NULL, 0, streams_on_link[i], &stream_update);
}
}
}
diff --git a/drivers/gpu/drm/amd/include/amd_cper.h b/drivers/gpu/drm/amd/include/amd_cper.h
index 086869264425..a252ee4c7874 100644
--- a/drivers/gpu/drm/amd/include/amd_cper.h
+++ b/drivers/gpu/drm/amd/include/amd_cper.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
index 64b553e7de1a..e7fdcee22a71 100644
--- a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
index d2dbd90bb427..0a876c840c79 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
@@ -2024,7 +2024,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
table->VoltageResponseTime = 0;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
+ table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
table->PCIeGenInterval = 1;
table->VRConfig = 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
index 1f50f1e74c48..aa3ae9b115c4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
@@ -2028,7 +2028,7 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
table->VoltageResponseTime = 0;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0;
+ table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
table->PCIeGenInterval = 1;
result = iceland_populate_smc_svi2_config(hwmgr, table);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index f532f7c69259..a8961a8f5c42 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -969,7 +969,7 @@ int smu_cmn_update_table(struct smu_context *smu,
table_index);
uint32_t table_size;
int ret = 0;
- if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
+ if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
table_size = smu_table->tables[table_index].size;
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index c15aef014f69..d41bd876167c 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -282,13 +282,13 @@ static inline void __ast_write8_i(void __iomem *addr, u32 reg, u8 index, u8 val)
__ast_write8(addr, reg + 1, val);
}
-static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask,
+static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 preserve_mask,
u8 val)
{
- u8 tmp = __ast_read8_i_masked(addr, reg, index, read_mask);
+ u8 tmp = __ast_read8_i_masked(addr, reg, index, preserve_mask);
- tmp |= val;
- __ast_write8_i(addr, reg, index, tmp);
+ val &= ~preserve_mask;
+ __ast_write8_i(addr, reg, index, tmp | val);
}
static inline u32 ast_read32(struct ast_device *ast, u32 reg)
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index d502d146b177..56638814bb28 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -280,7 +280,7 @@ sanity:
GIT_STRATEGY: none
script:
# ci-fairy check-commits --junit-xml=check-commits.xml
- - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
+ # - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
- |
set -eu
image_tags=(
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index ebf305fb24f0..6fb55601252f 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -310,8 +310,12 @@ EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state);
void __drm_gem_reset_shadow_plane(struct drm_plane *plane,
struct drm_shadow_plane_state *shadow_plane_state)
{
- __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
- drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state);
+ if (shadow_plane_state) {
+ __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
+ drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state);
+ } else {
+ __drm_atomic_helper_plane_reset(plane, NULL);
+ }
}
EXPORT_SYMBOL(__drm_gem_reset_shadow_plane);
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index 1d6312fa1429..d4b6ea42db0f 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -174,6 +174,33 @@ static void drm_panic_write_pixel24(void *vaddr, unsigned int offset, u32 color)
*p = color & 0xff;
}
+/*
+ * Special case if the pixel crosses page boundaries
+ */
+static void drm_panic_write_pixel24_xpage(void *vaddr, struct page *next_page,
+ unsigned int offset, u32 color)
+{
+ u8 *vaddr2;
+ u8 *p = vaddr + offset;
+
+ vaddr2 = kmap_local_page_try_from_panic(next_page);
+
+ *p++ = color & 0xff;
+ color >>= 8;
+
+ if (offset == PAGE_SIZE - 1)
+ p = vaddr2;
+
+ *p++ = color & 0xff;
+ color >>= 8;
+
+ if (offset == PAGE_SIZE - 2)
+ p = vaddr2;
+
+ *p = color & 0xff;
+ kunmap_local(vaddr2);
+}
+
static void drm_panic_write_pixel32(void *vaddr, unsigned int offset, u32 color)
{
u32 *p = vaddr + offset;
@@ -231,7 +258,14 @@ static void drm_panic_blit_page(struct page **pages, unsigned int dpitch,
page = new_page;
vaddr = kmap_local_page_try_from_panic(pages[page]);
}
- if (vaddr)
+ if (!vaddr)
+ continue;
+
+ // Special case for 24bit, as a pixel might cross page boundaries
+ if (cpp == 3 && offset + 3 > PAGE_SIZE)
+ drm_panic_write_pixel24_xpage(vaddr, pages[page + 1],
+ offset, fg32);
+ else
drm_panic_write_pixel(vaddr, offset, fg32, cpp);
}
}
@@ -321,7 +355,15 @@ static void drm_panic_fill_page(struct page **pages, unsigned int dpitch,
page = new_page;
vaddr = kmap_local_page_try_from_panic(pages[page]);
}
- drm_panic_write_pixel(vaddr, offset, color, cpp);
+ if (!vaddr)
+ continue;
+
+ // Special case for 24bit, as a pixel might cross page boundaries
+ if (cpp == 3 && offset + 3 > PAGE_SIZE)
+ drm_panic_write_pixel24_xpage(vaddr, pages[page + 1],
+ offset, color);
+ else
+ drm_panic_write_pixel(vaddr, offset, color, cpp);
}
}
if (vaddr)
@@ -429,6 +471,9 @@ static void drm_panic_logo_rect(struct drm_rect *rect, const struct font_desc *f
static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *rect,
const struct font_desc *font, u32 fg_color)
{
+ if (rect->x2 > sb->width || rect->y2 > sb->height)
+ return;
+
if (logo_mono)
drm_panic_blit(sb, rect, logo_mono->data,
DIV_ROUND_UP(drm_rect_width(rect), 8), 1, fg_color);
@@ -477,7 +522,7 @@ static int draw_line_with_wrap(struct drm_scanout_buffer *sb, const struct font_
struct drm_panic_line *line, int yoffset, u32 fg_color)
{
int chars_per_row = sb->width / font->width;
- struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, sb->height);
+ struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, font->height);
struct drm_panic_line line_wrap;
if (line->len > chars_per_row) {
@@ -520,7 +565,7 @@ static void draw_panic_static_kmsg(struct drm_scanout_buffer *sb)
struct drm_panic_line line;
int yoffset;
- if (!font)
+ if (!font || font->width > sb->width)
return;
yoffset = sb->height - font->height - (sb->height % font->height) / 2;
@@ -733,7 +778,10 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
pr_debug("QR width %d and scale %d\n", qr_width, scale);
r_qr_canvas = DRM_RECT_INIT(0, 0, qr_canvas_width * scale, qr_canvas_width * scale);
- v_margin = (sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg)) / 5;
+ v_margin = sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg);
+ if (v_margin < 0)
+ return -ENOSPC;
+ v_margin /= 5;
drm_rect_translate(&r_qr_canvas, (sb->width - r_qr_canvas.x2) / 2, 2 * v_margin);
r_qr = DRM_RECT_INIT(r_qr_canvas.x1 + QR_MARGIN * scale, r_qr_canvas.y1 + QR_MARGIN * scale,
@@ -746,7 +794,7 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
/* Fill with the background color, and draw text on top */
drm_panic_fill(sb, &r_screen, bg_color);
- if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr))
+ if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr_canvas))
drm_panic_logo_draw(sb, &r_logo, font, fg_color);
draw_txt_rectangle(sb, font, panic_msg, panic_msg_lines, true, &r_msg, fg_color);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index b13a17276d07..88385dc3b30d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -347,7 +347,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
u32 link_target, link_dwords;
bool switch_context = gpu->exec_state != exec_state;
bool switch_mmu_context = gpu->mmu_context != mmu_context;
- unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
+ unsigned int new_flush_seq = READ_ONCE(mmu_context->flush_seq);
bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
bool has_blt = !!(gpu->identity.minor_features5 &
chipMinorFeatures5_BLT_ENGINE);
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 77a0199f9ea5..4a4cace1f879 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -546,6 +546,36 @@ static bool is_event_handler(struct intel_display *display,
REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id;
}
+static bool fixup_dmc_evt(struct intel_display *display,
+ enum intel_dmc_id dmc_id,
+ i915_reg_t reg_ctl, u32 *data_ctl,
+ i915_reg_t reg_htp, u32 *data_htp)
+{
+ if (!is_dmc_evt_ctl_reg(display, dmc_id, reg_ctl))
+ return false;
+
+ if (!is_dmc_evt_htp_reg(display, dmc_id, reg_htp))
+ return false;
+
+ /* make sure reg_ctl and reg_htp are for the same event */
+ if (i915_mmio_reg_offset(reg_ctl) - i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, 0)) !=
+ i915_mmio_reg_offset(reg_htp) - i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, 0)))
+ return false;
+
+ /*
+ * On ADL-S the HRR event handler is not restored after DC6.
+ * Clear it to zero from the beginning to avoid mismatches later.
+ */
+ if (display->platform.alderlake_s && dmc_id == DMC_FW_MAIN &&
+ is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg_ctl, *data_ctl)) {
+ *data_ctl = 0;
+ *data_htp = 0;
+ return true;
+ }
+
+ return false;
+}
+
static bool disable_dmc_evt(struct intel_display *display,
enum intel_dmc_id dmc_id,
i915_reg_t reg, u32 data)
@@ -1064,9 +1094,32 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
for (i = 0; i < mmio_count; i++) {
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
dmc_info->mmiodata[i] = mmiodata[i];
+ }
+
+ for (i = 0; i < mmio_count - 1; i++) {
+ u32 orig_mmiodata[2] = {
+ dmc_info->mmiodata[i],
+ dmc_info->mmiodata[i+1],
+ };
+
+ if (!fixup_dmc_evt(display, dmc_id,
+ dmc_info->mmioaddr[i], &dmc_info->mmiodata[i],
+ dmc_info->mmioaddr[i+1], &dmc_info->mmiodata[i+1]))
+ continue;
+
+ drm_dbg_kms(display->drm,
+ " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_CTL)\n",
+ i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]),
+ orig_mmiodata[0], dmc_info->mmiodata[i]);
+ drm_dbg_kms(display->drm,
+ " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_HTP)\n",
+ i+1, i915_mmio_reg_offset(dmc_info->mmioaddr[i+1]),
+ orig_mmiodata[1], dmc_info->mmiodata[i+1]);
+ }
+ for (i = 0; i < mmio_count; i++) {
drm_dbg_kms(display->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n",
- i, mmioaddr[i], mmiodata[i],
+ i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), dmc_info->mmiodata[i],
is_dmc_evt_ctl_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" :
is_dmc_evt_htp_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "",
disable_dmc_evt(display, dmc_id, dmc_info->mmioaddr[i],
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index b817ff44c043..c48384e58ea1 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -2117,6 +2117,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
intel_frontbuffer_put(intel_fb->frontbuffer);
+ kfree(intel_fb->panic);
kfree(intel_fb);
}
@@ -2215,16 +2216,22 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
struct intel_display *display = to_intel_display(obj->dev);
struct drm_framebuffer *fb = &intel_fb->base;
u32 max_stride;
- int ret = -EINVAL;
+ int ret;
int i;
+ intel_fb->panic = intel_panic_alloc();
+ if (!intel_fb->panic)
+ return -ENOMEM;
+
/*
* intel_frontbuffer_get() must be done before
* intel_fb_bo_framebuffer_init() to avoid set_tiling vs. addfb race.
*/
intel_fb->frontbuffer = intel_frontbuffer_get(obj);
- if (!intel_fb->frontbuffer)
- return -ENOMEM;
+ if (!intel_fb->frontbuffer) {
+ ret = -ENOMEM;
+ goto err_free_panic;
+ }
ret = intel_fb_bo_framebuffer_init(fb, obj, mode_cmd);
if (ret)
@@ -2323,6 +2330,9 @@ err_bo_framebuffer_fini:
intel_fb_bo_framebuffer_fini(obj);
err_frontbuffer_put:
intel_frontbuffer_put(intel_fb->frontbuffer);
+err_free_panic:
+ kfree(intel_fb->panic);
+
return ret;
}
@@ -2349,20 +2359,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct intel_framebuffer *intel_framebuffer_alloc(void)
{
struct intel_framebuffer *intel_fb;
- struct intel_panic *panic;
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb)
return NULL;
- panic = intel_panic_alloc();
- if (!panic) {
- kfree(intel_fb);
- return NULL;
- }
-
- intel_fb->panic = panic;
-
return intel_fb;
}
diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
index 6d8325c76697..7fc6af703307 100644
--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
@@ -25,19 +25,18 @@
struct imx_parallel_display_encoder {
struct drm_encoder encoder;
- struct drm_bridge bridge;
- struct imx_parallel_display *pd;
};
struct imx_parallel_display {
struct device *dev;
u32 bus_format;
struct drm_bridge *next_bridge;
+ struct drm_bridge bridge;
};
static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
{
- return container_of(b, struct imx_parallel_display_encoder, bridge)->pd;
+ return container_of(b, struct imx_parallel_display, bridge);
}
static const u32 imx_pd_bus_fmts[] = {
@@ -195,15 +194,13 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(imxpd_encoder))
return PTR_ERR(imxpd_encoder);
- imxpd_encoder->pd = imxpd;
encoder = &imxpd_encoder->encoder;
- bridge = &imxpd_encoder->bridge;
+ bridge = &imxpd->bridge;
ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node);
if (ret)
return ret;
- bridge->funcs = &imx_pd_bridge_funcs;
drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
connector = drm_bridge_connector_init(drm, encoder);
@@ -228,9 +225,10 @@ static int imx_pd_probe(struct platform_device *pdev)
u32 bus_format = 0;
const char *fmt;
- imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
- if (!imxpd)
- return -ENOMEM;
+ imxpd = devm_drm_bridge_alloc(dev, struct imx_parallel_display, bridge,
+ &imx_pd_bridge_funcs);
+ if (IS_ERR(imxpd))
+ return PTR_ERR(imxpd);
/* port@1 is the output port */
imxpd->next_bridge = devm_drm_of_get_bridge(dev, np, 1, 0);
@@ -258,6 +256,8 @@ static int imx_pd_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imxpd);
+ devm_drm_bridge_add(dev, &imxpd->bridge);
+
return component_add(dev, &imx_pd_ops);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index eb5537f0ac90..31ff2922758a 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -686,10 +686,6 @@ err_free:
for (i = 0; i < private->data->mmsys_dev_num; i++)
private->all_drm_private[i]->drm = NULL;
err_put_dev:
- for (i = 0; i < private->data->mmsys_dev_num; i++) {
- /* For device_find_child in mtk_drm_get_all_priv() */
- put_device(private->all_drm_private[i]->dev);
- }
put_device(private->mutex_dev);
return ret;
}
@@ -697,18 +693,12 @@ err_put_dev:
static void mtk_drm_unbind(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
- int i;
/* for multi mmsys dev, unregister drm dev in mmsys master */
if (private->drm_master) {
drm_dev_unregister(private->drm);
mtk_drm_kms_deinit(private->drm);
drm_dev_put(private->drm);
-
- for (i = 0; i < private->data->mmsys_dev_num; i++) {
- /* For device_find_child in mtk_drm_get_all_priv() */
- put_device(private->all_drm_private[i]->dev);
- }
put_device(private->mutex_dev);
}
private->mtk_drm_bound = false;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index fc62fef2fed8..4e6dc16e4a4c 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -780,6 +780,9 @@ static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
return true;
}
+#define NEXT_BLK(blk) \
+ ((const struct block_header *)((const char *)(blk) + sizeof(*(blk)) + (blk)->size))
+
static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
@@ -811,7 +814,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
for (blk = (const struct block_header *) fw_image->data;
(const u8*) blk < fw_image->data + fw_image->size;
- blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
+ blk = NEXT_BLK(blk)) {
if (blk->size == 0)
continue;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index afaa3cfefd35..4b5a4edd0702 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -348,13 +348,6 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
return 0;
}
-static bool
-adreno_smmu_has_prr(struct msm_gpu *gpu)
-{
- struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
- return adreno_smmu && adreno_smmu->set_prr_addr;
-}
-
int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t *value, uint32_t *len)
{
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 4b970a59deaf..2f8156051d9b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -1545,6 +1545,9 @@ static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,
adjusted_mode_clk = dpu_core_perf_adjusted_mode_clk(mode->clock,
dpu_kms->perf.perf_cfg);
+ if (dpu_kms->catalog->caps->has_3d_merge)
+ adjusted_mode_clk /= 2;
+
/*
* The given mode, adjusted for the perf clock factor, should not exceed
* the max core clock rate
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 6641455c4ec6..9f8d1bba9139 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -267,8 +267,8 @@ static const u32 wb2_formats_rgb_yuv[] = {
.base = 0x200, .len = 0xa0,}, \
.csc_blk = {.name = "csc", \
.base = 0x320, .len = 0x100,}, \
- .format_list = plane_formats_yuv, \
- .num_formats = ARRAY_SIZE(plane_formats_yuv), \
+ .format_list = plane_formats, \
+ .num_formats = ARRAY_SIZE(plane_formats), \
.rotation_cfg = NULL, \
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index f54cf0faa1c7..905524ceeb1f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -500,13 +500,15 @@ static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg,
int i;
for (i = 0; i < DPU_MAX_PLANES; i++) {
+ uint32_t w = src_w, h = src_h;
+
if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
- src_w /= chroma_subsmpl_h;
- src_h /= chroma_subsmpl_v;
+ w /= chroma_subsmpl_h;
+ h /= chroma_subsmpl_v;
}
- pixel_ext->num_ext_pxls_top[i] = src_h;
- pixel_ext->num_ext_pxls_left[i] = src_w;
+ pixel_ext->num_ext_pxls_top[i] = h;
+ pixel_ext->num_ext_pxls_left[i] = w;
}
}
@@ -740,7 +742,7 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
* We already have verified scaling against platform limitations.
* Now check if the SSPP supports scaling at all.
*/
- if (!sblk->scaler_blk.len &&
+ if (!(sblk->scaler_blk.len && pipe->sspp->ops.setup_scaler) &&
((drm_rect_width(&new_plane_state->src) >> 16 !=
drm_rect_width(&new_plane_state->dst)) ||
(drm_rect_height(&new_plane_state->src) >> 16 !=
@@ -1278,7 +1280,7 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
state, plane_state,
prev_adjacent_plane_state);
if (ret)
- break;
+ return ret;
prev_adjacent_plane_state = plane_state;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 2c77c74fac0f..d9c3b0a1d091 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -842,7 +842,7 @@ struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,
if (!reqs->scale && !reqs->yuv)
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA);
- if (!hw_sspp && reqs->scale)
+ if (!hw_sspp && !reqs->yuv)
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB);
if (!hw_sspp)
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
index cd73468e369a..7545c0293efb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
@@ -72,6 +72,9 @@ static int dpu_wb_conn_atomic_check(struct drm_connector *connector,
DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
fb->width, dpu_wb_conn->maxlinewidth);
return -EINVAL;
+ } else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+ DPU_ERROR("unsupported fb modifier:%#llx\n", fb->modifier);
+ return -EINVAL;
}
return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index e391505fdaf0..3cbf08231492 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -109,7 +109,6 @@ struct msm_dsi_phy {
struct msm_dsi_dphy_timing timing;
const struct msm_dsi_phy_cfg *cfg;
void *tuning_cfg;
- void *pll_data;
enum msm_dsi_phy_usecase usecase;
bool regulator_ldo_mode;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 32f06edd21a9..c5e1d2016bcc 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -426,11 +426,8 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
u32 data;
spin_lock_irqsave(&pll->pll_enable_lock, flags);
- if (pll->pll_enable_cnt++) {
- spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
- WARN_ON(pll->pll_enable_cnt == INT_MAX);
- return;
- }
+ pll->pll_enable_cnt++;
+ WARN_ON(pll->pll_enable_cnt == INT_MAX);
data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
data |= DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
@@ -876,7 +873,6 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
spin_lock_init(&pll_7nm->pll_enable_lock);
pll_7nm->phy = phy;
- phy->pll_data = pll_7nm;
ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws);
if (ret) {
@@ -965,10 +961,8 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
u32 const delay_us = 5;
u32 const timeout_us = 1000;
struct msm_dsi_dphy_timing *timing = &phy->timing;
- struct dsi_pll_7nm *pll = phy->pll_data;
void __iomem *base = phy->base;
bool less_than_1500_mhz;
- unsigned long flags;
u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0;
u32 glbl_pemph_ctrl_0;
u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
@@ -1090,13 +1084,10 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
glbl_rescode_bot_ctrl = 0x3c;
}
- spin_lock_irqsave(&pll->pll_enable_lock, flags);
- pll->pll_enable_cnt = 1;
/* de-assert digital and pll power down */
data = DSI_7nm_PHY_CMN_CTRL_0_DIGTOP_PWRDN_B |
DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
- spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
/* Assert PLL core reset */
writel(0x00, base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
@@ -1209,9 +1200,7 @@ static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
{
- struct dsi_pll_7nm *pll = phy->pll_data;
void __iomem *base = phy->base;
- unsigned long flags;
u32 data;
DBG("");
@@ -1238,11 +1227,8 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
writel(0, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0);
- spin_lock_irqsave(&pll->pll_enable_lock, flags);
- pll->pll_enable_cnt = 0;
/* Turn off all PHY blocks */
writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
- spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
/* make sure phy is turned off */
wmb();
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 07d8cdd6bb2e..9f7fbe577abb 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -1120,12 +1120,16 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
put_pages(obj);
}
- if (obj->resv != &obj->_resv) {
+ /*
+ * In error paths, we could end up here before msm_gem_new_handle()
+ * has changed obj->resv to point to the shared resv. In this case,
+ * we don't want to drop a ref to the shared r_obj that we haven't
+ * taken yet.
+ */
+ if ((msm_obj->flags & MSM_BO_NO_SHARE) && (obj->resv != &obj->_resv)) {
struct drm_gem_object *r_obj =
container_of(obj->resv, struct drm_gem_object, _resv);
- WARN_ON(!(msm_obj->flags & MSM_BO_NO_SHARE));
-
/* Drop reference we hold to shared resv obj: */
drm_gem_object_put(r_obj);
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 3ab3b27134f9..75d9f3574370 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -414,6 +414,11 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
submit->user_fence,
DMA_RESV_USAGE_BOOKKEEP,
DMA_RESV_USAGE_BOOKKEEP);
+
+ last_fence = vm->last_fence;
+ vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
+ dma_fence_put(last_fence);
+
return;
}
@@ -427,10 +432,6 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
dma_resv_add_fence(obj->resv, submit->user_fence,
DMA_RESV_USAGE_READ);
}
-
- last_fence = vm->last_fence;
- vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
- dma_fence_put(last_fence);
}
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 8316af1723c2..89a95977f41e 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -971,6 +971,7 @@ static int
lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
{
struct drm_device *dev = job->vm->drm;
+ struct msm_drm_private *priv = dev->dev_private;
int i = job->nr_ops++;
int ret = 0;
@@ -1017,6 +1018,11 @@ lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
break;
}
+ if ((op->op == MSM_VM_BIND_OP_MAP_NULL) &&
+ !adreno_smmu_has_prr(priv->gpu)) {
+ ret = UERR(EINVAL, dev, "PRR not supported\n");
+ }
+
return ret;
}
@@ -1421,7 +1427,7 @@ msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)
* Maybe we could allow just UNMAP ops? OTOH userspace should just
* immediately close the device file and all will be torn down.
*/
- if (to_msm_vm(ctx->vm)->unusable)
+ if (to_msm_vm(msm_context_vm(dev, ctx))->unusable)
return UERR(EPIPE, dev, "context is unusable");
/*
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index a597f2bee30b..2894fc118485 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -299,6 +299,17 @@ static inline struct msm_gpu *dev_to_gpu(struct device *dev)
return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
}
+static inline bool
+adreno_smmu_has_prr(struct msm_gpu *gpu)
+{
+ struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
+
+ if (!adreno_smmu)
+ return false;
+
+ return adreno_smmu && adreno_smmu->set_prr_addr;
+}
+
/* It turns out that all targets use the same ringbuffer size */
#define MSM_GPU_RINGBUFFER_SZ SZ_32K
#define MSM_GPU_RINGBUFFER_BLKSIZE 32
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 0e18619f96cb..a188617653e8 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -338,6 +338,8 @@ msm_iommu_pagetable_prealloc_allocate(struct msm_mmu *mmu, struct msm_mmu_preall
ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages);
if (ret != p->count) {
+ kfree(p->pages);
+ p->pages = NULL;
p->count = ret;
return -ENOMEM;
}
@@ -351,6 +353,9 @@ msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_preallo
struct kmem_cache *pt_cache = get_pt_cache(mmu);
uint32_t remaining_pt_count = p->count - p->ptr;
+ if (!p->pages)
+ return;
+
if (p->count > 0)
trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index e60f7892f5ce..a7bf539e5d86 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -482,6 +482,17 @@ nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
return 0;
}
+static bool
+nouveau_sched_job_list_empty(struct nouveau_sched *sched)
+{
+ bool empty;
+
+ spin_lock(&sched->job.list.lock);
+ empty = list_empty(&sched->job.list.head);
+ spin_unlock(&sched->job.list.lock);
+
+ return empty;
+}
static void
nouveau_sched_fini(struct nouveau_sched *sched)
@@ -489,8 +500,7 @@ nouveau_sched_fini(struct nouveau_sched *sched)
struct drm_gpu_scheduler *drm_sched = &sched->base;
struct drm_sched_entity *entity = &sched->entity;
- rmb(); /* for list_empty to work without lock */
- wait_event(sched->job.wq, list_empty(&sched->job.list.head));
+ wait_event(sched->job.wq, nouveau_sched_job_list_empty(sched));
drm_sched_entity_fini(entity);
drm_sched_fini(drm_sched);
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
index 2fc7b0779b37..893af9b16756 100644
--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
+++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
@@ -359,7 +359,7 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
- MIPI_DSI_MODE_LPM;
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
kingdisplay = devm_drm_panel_alloc(&dsi->dev, __typeof(*kingdisplay), base,
&kingdisplay_panel_funcs,
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 04d91929eedd..d5f821d6b23c 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -249,6 +249,11 @@ static const struct drm_display_mode default_mode = {
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
+/*
+ * The mode data for this panel has been reverse engineered without access
+ * to the panel datasheet / manual. Using DRM_MODE_FLAG_PHSYNC like all
+ * other panels results in garbage data on the display.
+ */
static const struct drm_display_mode t28cp45tn89_mode = {
.clock = 6008,
.hdisplay = 240,
@@ -261,7 +266,7 @@ static const struct drm_display_mode t28cp45tn89_mode = {
.vtotal = 320 + 8 + 4 + 4,
.width_mm = 43,
.height_mm = 57,
- .flags = DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC,
};
static const struct drm_display_mode et028013dma_mode = {
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 6dec4354e378..7870e7dbaa5d 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -1175,10 +1175,14 @@ panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx)
break;
case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
- /* Partial unmaps might trigger a remap with either a prev or a next VA,
- * but not both.
+ /* Two VMAs can be needed for an unmap, as an unmap can happen
+ * in the middle of a drm_gpuva, requiring a remap with both
+ * prev & next VA. Or an unmap can span more than one drm_gpuva
+ * where the first and last ones are covered partially, requring
+ * a remap for the first with a prev VA and remap for the last
+ * with a next VA.
*/
- vma_count = 1;
+ vma_count = 2;
break;
default:
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 88e821d67af7..9c8907bc61d9 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -314,17 +314,17 @@ static int radeon_pci_probe(struct pci_dev *pdev,
ret = pci_enable_device(pdev);
if (ret)
- goto err_free;
+ return ret;
pci_set_drvdata(pdev, ddev);
ret = radeon_driver_load_kms(ddev, flags);
if (ret)
- goto err_agp;
+ goto err;
ret = drm_dev_register(ddev, flags);
if (ret)
- goto err_agp;
+ goto err;
if (rdev->mc.real_vram_size <= (8 * 1024 * 1024))
format = drm_format_info(DRM_FORMAT_C8);
@@ -337,30 +337,14 @@ static int radeon_pci_probe(struct pci_dev *pdev,
return 0;
-err_agp:
+err:
pci_disable_device(pdev);
-err_free:
- drm_dev_put(ddev);
return ret;
}
static void
-radeon_pci_remove(struct pci_dev *pdev)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
-
- drm_put_dev(dev);
-}
-
-static void
radeon_pci_shutdown(struct pci_dev *pdev)
{
- /* if we are running in a VM, make sure the device
- * torn down properly on reboot/shutdown
- */
- if (radeon_device_is_virtual())
- radeon_pci_remove(pdev);
-
#if defined(CONFIG_PPC64) || defined(CONFIG_MACH_LOONGSON64)
/*
* Some adapters need to be suspended before a
@@ -613,7 +597,6 @@ static struct pci_driver radeon_kms_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = radeon_pci_probe,
- .remove = radeon_pci_remove,
.shutdown = radeon_pci_shutdown,
.driver.pm = &radeon_pm_ops,
};
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 645e33bf7947..ba1446acd703 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -84,7 +84,6 @@ void radeon_driver_unload_kms(struct drm_device *dev)
rdev->agp = NULL;
done_free:
- kfree(rdev);
dev->dev_private = NULL;
}
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 7b613997bb50..727cdf768161 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -361,7 +361,7 @@ static void dw_hdmi_rk3228_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
regmap_write(hdmi->regmap, RK3228_GRF_SOC_CON2,
FIELD_PREP_WM16(RK3228_HDMI_SDAIN_MSK, 1) |
- FIELD_PREP_WM16(RK3328_HDMI_SCLIN_MSK, 1));
+ FIELD_PREP_WM16(RK3228_HDMI_SCLIN_MSK, 1));
}
static enum drm_connector_status
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 5a4697f636f2..c8e949f4a568 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -70,6 +70,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
entity->guilty = guilty;
entity->num_sched_list = num_sched_list;
entity->priority = priority;
+ entity->last_user = current->group_leader;
/*
* It's perfectly valid to initialize an entity without having a valid
* scheduler attached. It's just not valid to use the scheduler before it
@@ -302,7 +303,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
/* For a killed process disallow further enqueueing of jobs. */
last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
- if ((!last_user || last_user == current->group_leader) &&
+ if (last_user == current->group_leader &&
(current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
drm_sched_entity_kill(entity);
@@ -552,10 +553,11 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
}
- spin_unlock(&entity->lock);
if (entity->num_sched_list == 1)
entity->sched_list = NULL;
+
+ spin_unlock(&entity->lock);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 7fdd0a97a628..5edc0cad47e2 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -292,6 +292,9 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
ggtt->pt_ops = &xelp_pt_ops;
ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM);
+ if (!ggtt->wq)
+ return -ENOMEM;
+
__xe_ggtt_init_early(ggtt, xe_wopcm_size(xe));
err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 3e0ad7e5b5df..6d3db5e55d98 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -813,12 +813,16 @@ static int gt_reset(struct xe_gt *gt)
unsigned int fw_ref;
int err;
- if (xe_device_wedged(gt_to_xe(gt)))
- return -ECANCELED;
+ if (xe_device_wedged(gt_to_xe(gt))) {
+ err = -ECANCELED;
+ goto err_pm_put;
+ }
/* We only support GT resets with GuC submission */
- if (!xe_device_uc_enabled(gt_to_xe(gt)))
- return -ENODEV;
+ if (!xe_device_uc_enabled(gt_to_xe(gt))) {
+ err = -ENODEV;
+ goto err_pm_put;
+ }
xe_gt_info(gt, "reset started\n");
@@ -826,8 +830,6 @@ static int gt_reset(struct xe_gt *gt)
if (!err)
xe_gt_warn(gt, "reset block failed to get lifted");
- xe_pm_runtime_get(gt_to_xe(gt));
-
if (xe_fault_inject_gt_reset()) {
err = -ECANCELED;
goto err_fail;
@@ -874,6 +876,7 @@ err_fail:
xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
xe_device_declare_wedged(gt_to_xe(gt));
+err_pm_put:
xe_pm_runtime_put(gt_to_xe(gt));
return err;
@@ -895,7 +898,9 @@ void xe_gt_reset_async(struct xe_gt *gt)
return;
xe_gt_info(gt, "reset queued\n");
- queue_work(gt->ordered_wq, &gt->reset.worker);
+ xe_pm_runtime_get_noresume(gt_to_xe(gt));
+ if (!queue_work(gt->ordered_wq, &gt->reset.worker))
+ xe_pm_runtime_put(gt_to_xe(gt));
}
void xe_gt_suspend_prepare(struct xe_gt *gt)
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index a1c88f9a6c76..07f96bda638a 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -2022,7 +2022,7 @@ static int op_prepare(struct xe_vm *vm,
case DRM_GPUVA_OP_MAP:
if ((!op->map.immediate && xe_vm_in_fault_mode(vm) &&
!op->map.invalidate_on_bind) ||
- op->map.is_cpu_addr_mirror)
+ (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))
break;
err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma,
@@ -2252,7 +2252,7 @@ static void op_commit(struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
- op->map.is_cpu_addr_mirror)
+ (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))
break;
bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index da2a412f80c0..129e7818565c 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -302,6 +302,11 @@ static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64
if (!vma)
return -EINVAL;
+ if (!(vma->gpuva.flags & XE_VMA_MADV_AUTORESET)) {
+ drm_dbg(&vm->xe->drm, "Skipping madvise reset for vma.\n");
+ return 0;
+ }
+
if (xe_vma_has_default_mem_attrs(vma))
return 0;
diff --git a/drivers/gpu/drm/xe/xe_validation.h b/drivers/gpu/drm/xe/xe_validation.h
index fec331d791e7..b2d09c596714 100644
--- a/drivers/gpu/drm/xe/xe_validation.h
+++ b/drivers/gpu/drm/xe/xe_validation.h
@@ -166,10 +166,10 @@ xe_validation_device_init(struct xe_validation_device *val)
*/
DEFINE_CLASS(xe_validation, struct xe_validation_ctx *,
if (_T) xe_validation_ctx_fini(_T);,
- ({_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags);
- _ret ? NULL : _ctx; }),
+ ({*_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags);
+ *_ret ? NULL : _ctx; }),
struct xe_validation_ctx *_ctx, struct xe_validation_device *_val,
- struct drm_exec *_exec, const struct xe_val_flags _flags, int _ret);
+ struct drm_exec *_exec, const struct xe_val_flags _flags, int *_ret);
static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T)
{return *_T; }
#define class_xe_validation_is_conditional true
@@ -186,7 +186,7 @@ static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T)
* exhaustive eviction.
*/
#define xe_validation_guard(_ctx, _val, _exec, _flags, _ret) \
- scoped_guard(xe_validation, _ctx, _val, _exec, _flags, _ret) \
+ scoped_guard(xe_validation, _ctx, _val, _exec, _flags, &_ret) \
drm_exec_until_all_locked(_exec)
#endif
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index f602b874e054..63c65e3d207b 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -616,6 +616,13 @@ static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask,
vops->pt_update_ops[i].num_ops += inc_val;
}
+#define XE_VMA_CREATE_MASK ( \
+ XE_VMA_READ_ONLY | \
+ XE_VMA_DUMPABLE | \
+ XE_VMA_SYSTEM_ALLOCATOR | \
+ DRM_GPUVA_SPARSE | \
+ XE_VMA_MADV_AUTORESET)
+
static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
u8 tile_mask)
{
@@ -628,8 +635,7 @@ static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
op->base.map.gem.offset = vma->gpuva.gem.offset;
op->map.vma = vma;
op->map.immediate = true;
- op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE;
- op->map.is_null = xe_vma_is_null(vma);
+ op->map.vma_flags = vma->gpuva.flags & XE_VMA_CREATE_MASK;
}
static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
@@ -932,11 +938,6 @@ static void xe_vma_free(struct xe_vma *vma)
kfree(vma);
}
-#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
-#define VMA_CREATE_FLAG_IS_NULL BIT(1)
-#define VMA_CREATE_FLAG_DUMPABLE BIT(2)
-#define VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR BIT(3)
-
static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo,
u64 bo_offset_or_userptr,
@@ -947,11 +948,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_vma *vma;
struct xe_tile *tile;
u8 id;
- bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
- bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
- bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
- bool is_cpu_addr_mirror =
- (flags & VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR);
+ bool is_null = (flags & DRM_GPUVA_SPARSE);
+ bool is_cpu_addr_mirror = (flags & XE_VMA_SYSTEM_ALLOCATOR);
xe_assert(vm->xe, start < end);
xe_assert(vm->xe, end < vm->size);
@@ -972,10 +970,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
if (!vma)
return ERR_PTR(-ENOMEM);
- if (is_cpu_addr_mirror)
- vma->gpuva.flags |= XE_VMA_SYSTEM_ALLOCATOR;
- if (is_null)
- vma->gpuva.flags |= DRM_GPUVA_SPARSE;
if (bo)
vma->gpuva.gem.obj = &bo->ttm.base;
}
@@ -986,10 +980,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vma->gpuva.vm = &vm->gpuvm;
vma->gpuva.va.addr = start;
vma->gpuva.va.range = end - start + 1;
- if (read_only)
- vma->gpuva.flags |= XE_VMA_READ_ONLY;
- if (dumpable)
- vma->gpuva.flags |= XE_VMA_DUMPABLE;
+ vma->gpuva.flags = flags;
for_each_tile(tile, vm->xe, id)
vma->tile_mask |= 0x1 << id;
@@ -2272,12 +2263,16 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
if (__op->op == DRM_GPUVA_OP_MAP) {
op->map.immediate =
flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
- op->map.read_only =
- flags & DRM_XE_VM_BIND_FLAG_READONLY;
- op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
- op->map.is_cpu_addr_mirror = flags &
- DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
- op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
+ if (flags & DRM_XE_VM_BIND_FLAG_READONLY)
+ op->map.vma_flags |= XE_VMA_READ_ONLY;
+ if (flags & DRM_XE_VM_BIND_FLAG_NULL)
+ op->map.vma_flags |= DRM_GPUVA_SPARSE;
+ if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
+ op->map.vma_flags |= XE_VMA_SYSTEM_ALLOCATOR;
+ if (flags & DRM_XE_VM_BIND_FLAG_DUMPABLE)
+ op->map.vma_flags |= XE_VMA_DUMPABLE;
+ if (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
+ op->map.vma_flags |= XE_VMA_MADV_AUTORESET;
op->map.pat_index = pat_index;
op->map.invalidate_on_bind =
__xe_vm_needs_clear_scratch_pages(vm, flags);
@@ -2590,14 +2585,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
.pat_index = op->map.pat_index,
};
- flags |= op->map.read_only ?
- VMA_CREATE_FLAG_READ_ONLY : 0;
- flags |= op->map.is_null ?
- VMA_CREATE_FLAG_IS_NULL : 0;
- flags |= op->map.dumpable ?
- VMA_CREATE_FLAG_DUMPABLE : 0;
- flags |= op->map.is_cpu_addr_mirror ?
- VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
+ flags |= op->map.vma_flags & XE_VMA_CREATE_MASK;
vma = new_vma(vm, &op->base.map, &default_attr,
flags);
@@ -2606,7 +2594,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
op->map.vma = vma;
if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
- !op->map.is_cpu_addr_mirror) ||
+ !(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) ||
op->map.invalidate_on_bind)
xe_vma_ops_incr_pt_update_ops(vops,
op->tile_mask, 1);
@@ -2637,18 +2625,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
op->remap.start = xe_vma_start(old);
op->remap.range = xe_vma_size(old);
- flags |= op->base.remap.unmap->va->flags &
- XE_VMA_READ_ONLY ?
- VMA_CREATE_FLAG_READ_ONLY : 0;
- flags |= op->base.remap.unmap->va->flags &
- DRM_GPUVA_SPARSE ?
- VMA_CREATE_FLAG_IS_NULL : 0;
- flags |= op->base.remap.unmap->va->flags &
- XE_VMA_DUMPABLE ?
- VMA_CREATE_FLAG_DUMPABLE : 0;
- flags |= xe_vma_is_cpu_addr_mirror(old) ?
- VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
-
+ flags |= op->base.remap.unmap->va->flags & XE_VMA_CREATE_MASK;
if (op->base.remap.prev) {
vma = new_vma(vm, op->base.remap.prev,
&old->attr, flags);
@@ -3279,7 +3256,8 @@ ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
DRM_XE_VM_BIND_FLAG_NULL | \
DRM_XE_VM_BIND_FLAG_DUMPABLE | \
DRM_XE_VM_BIND_FLAG_CHECK_PXP | \
- DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR | \
+ DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
#ifdef TEST_VM_OPS_ERROR
#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
@@ -3394,7 +3372,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
!(BIT(prefetch_region) & xe->info.mem_region_mask))) ||
XE_IOCTL_DBG(xe, obj &&
- op == DRM_XE_VM_BIND_OP_UNMAP)) {
+ op == DRM_XE_VM_BIND_OP_UNMAP) ||
+ XE_IOCTL_DBG(xe, (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) &&
+ (!is_cpu_addr_mirror || op != DRM_XE_VM_BIND_OP_MAP))) {
err = -EINVAL;
goto free_bind_ops;
}
@@ -4212,7 +4192,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
struct xe_vma_ops vops;
struct drm_gpuva_ops *ops = NULL;
struct drm_gpuva_op *__op;
- bool is_cpu_addr_mirror = false;
+ unsigned int vma_flags = 0;
bool remap_op = false;
struct xe_vma_mem_attr tmp_attr;
u16 default_pat;
@@ -4242,15 +4222,17 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
vma = gpuva_to_vma(op->base.unmap.va);
XE_WARN_ON(!xe_vma_has_default_mem_attrs(vma));
default_pat = vma->attr.default_pat_index;
+ vma_flags = vma->gpuva.flags;
}
if (__op->op == DRM_GPUVA_OP_REMAP) {
vma = gpuva_to_vma(op->base.remap.unmap->va);
default_pat = vma->attr.default_pat_index;
+ vma_flags = vma->gpuva.flags;
}
if (__op->op == DRM_GPUVA_OP_MAP) {
- op->map.is_cpu_addr_mirror = true;
+ op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
op->map.pat_index = default_pat;
}
} else {
@@ -4259,11 +4241,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
xe_assert(vm->xe, !remap_op);
xe_assert(vm->xe, xe_vma_has_no_bo(vma));
remap_op = true;
-
- if (xe_vma_is_cpu_addr_mirror(vma))
- is_cpu_addr_mirror = true;
- else
- is_cpu_addr_mirror = false;
+ vma_flags = vma->gpuva.flags;
}
if (__op->op == DRM_GPUVA_OP_MAP) {
@@ -4272,10 +4250,10 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
/*
* In case of madvise ops DRM_GPUVA_OP_MAP is
* always after DRM_GPUVA_OP_REMAP, so ensure
- * we assign op->map.is_cpu_addr_mirror true
- * if REMAP is for xe_vma_is_cpu_addr_mirror vma
+ * to propagate the flags from the vma we're
+ * unmapping.
*/
- op->map.is_cpu_addr_mirror = is_cpu_addr_mirror;
+ op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
}
}
print_op(vm->xe, __op);
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 413353e1c225..d6e2a0fdd4b3 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -46,6 +46,7 @@ struct xe_vm_pgtable_update_op;
#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 7)
#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8)
#define XE_VMA_SYSTEM_ALLOCATOR (DRM_GPUVA_USERBITS << 9)
+#define XE_VMA_MADV_AUTORESET (DRM_GPUVA_USERBITS << 10)
/**
* struct xe_vma_mem_attr - memory attributes associated with vma
@@ -345,17 +346,10 @@ struct xe_vm {
struct xe_vma_op_map {
/** @vma: VMA to map */
struct xe_vma *vma;
+ unsigned int vma_flags;
/** @immediate: Immediate bind */
bool immediate;
/** @read_only: Read only */
- bool read_only;
- /** @is_null: is NULL binding */
- bool is_null;
- /** @is_cpu_addr_mirror: is CPU address mirror binding */
- bool is_cpu_addr_mirror;
- /** @dumpable: whether BO is dumped on GPU hang */
- bool dumpable;
- /** @invalidate: invalidate the VMA before bind */
bool invalidate_on_bind;
/** @pat_index: The pat index to use for this operation. */
u16 pat_index;
diff --git a/drivers/hwmon/cgbc-hwmon.c b/drivers/hwmon/cgbc-hwmon.c
index 772f44d56ccf..3aff4e092132 100644
--- a/drivers/hwmon/cgbc-hwmon.c
+++ b/drivers/hwmon/cgbc-hwmon.c
@@ -107,6 +107,9 @@ static int cgbc_hwmon_probe_sensors(struct device *dev, struct cgbc_hwmon_data *
nb_sensors = data[0];
hwmon->sensors = devm_kzalloc(dev, sizeof(*hwmon->sensors) * nb_sensors, GFP_KERNEL);
+ if (!hwmon->sensors)
+ return -ENOMEM;
+
sensor = hwmon->sensors;
for (i = 0; i < nb_sensors; i++) {
diff --git a/drivers/hwmon/gpd-fan.c b/drivers/hwmon/gpd-fan.c
index 644dc3ca9df7..321794807e8d 100644
--- a/drivers/hwmon/gpd-fan.c
+++ b/drivers/hwmon/gpd-fan.c
@@ -615,14 +615,14 @@ static int gpd_fan_probe(struct platform_device *pdev)
const struct device *hwdev;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (IS_ERR(res))
- return dev_err_probe(dev, PTR_ERR(res),
+ if (!res)
+ return dev_err_probe(dev, -EINVAL,
"Failed to get platform resource\n");
region = devm_request_region(dev, res->start,
resource_size(res), DRIVER_NAME);
- if (IS_ERR(region))
- return dev_err_probe(dev, PTR_ERR(region),
+ if (!region)
+ return dev_err_probe(dev, -EBUSY,
"Failed to request region\n");
hwdev = devm_hwmon_device_register_with_info(dev,
@@ -631,7 +631,7 @@ static int gpd_fan_probe(struct platform_device *pdev)
&gpd_fan_chip_info,
NULL);
if (IS_ERR(hwdev))
- return dev_err_probe(dev, PTR_ERR(region),
+ return dev_err_probe(dev, PTR_ERR(hwdev),
"Failed to register hwmon device\n");
return 0;
diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
index 52cf62e45a86..6bba9b50c51b 100644
--- a/drivers/hwmon/pmbus/isl68137.c
+++ b/drivers/hwmon/pmbus/isl68137.c
@@ -336,10 +336,9 @@ static int isl68137_probe_from_dt(struct device *dev,
struct isl68137_data *data)
{
const struct device_node *np = dev->of_node;
- struct device_node *child;
int err;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
if (strcmp(child->name, "channel"))
continue;
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index 56834d26f8ef..ef981ed97da8 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -336,18 +336,18 @@ static struct pmbus_driver_info max34440_info[] = {
.format[PSC_CURRENT_IN] = direct,
.format[PSC_CURRENT_OUT] = direct,
.format[PSC_TEMPERATURE] = direct,
- .m[PSC_VOLTAGE_IN] = 1,
+ .m[PSC_VOLTAGE_IN] = 125,
.b[PSC_VOLTAGE_IN] = 0,
.R[PSC_VOLTAGE_IN] = 0,
- .m[PSC_VOLTAGE_OUT] = 1,
+ .m[PSC_VOLTAGE_OUT] = 125,
.b[PSC_VOLTAGE_OUT] = 0,
.R[PSC_VOLTAGE_OUT] = 0,
- .m[PSC_CURRENT_IN] = 1,
+ .m[PSC_CURRENT_IN] = 250,
.b[PSC_CURRENT_IN] = 0,
- .R[PSC_CURRENT_IN] = 2,
- .m[PSC_CURRENT_OUT] = 1,
+ .R[PSC_CURRENT_IN] = -1,
+ .m[PSC_CURRENT_OUT] = 250,
.b[PSC_CURRENT_OUT] = 0,
- .R[PSC_CURRENT_OUT] = 2,
+ .R[PSC_CURRENT_OUT] = -1,
.m[PSC_TEMPERATURE] = 1,
.b[PSC_TEMPERATURE] = 0,
.R[PSC_TEMPERATURE] = 2,
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
index 557ad3e7752a..f36c0229328f 100644
--- a/drivers/hwmon/sht3x.c
+++ b/drivers/hwmon/sht3x.c
@@ -291,24 +291,26 @@ out:
return data;
}
-static int temp1_input_read(struct device *dev)
+static int temp1_input_read(struct device *dev, long *temp)
{
struct sht3x_data *data = sht3x_update_client(dev);
if (IS_ERR(data))
return PTR_ERR(data);
- return data->temperature;
+ *temp = data->temperature;
+ return 0;
}
-static int humidity1_input_read(struct device *dev)
+static int humidity1_input_read(struct device *dev, long *humidity)
{
struct sht3x_data *data = sht3x_update_client(dev);
if (IS_ERR(data))
return PTR_ERR(data);
- return data->humidity;
+ *humidity = data->humidity;
+ return 0;
}
/*
@@ -706,6 +708,7 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
enum sht3x_limits index;
+ int ret;
switch (type) {
case hwmon_chip:
@@ -720,10 +723,12 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
- *val = temp1_input_read(dev);
- break;
+ return temp1_input_read(dev, val);
case hwmon_temp_alarm:
- *val = temp1_alarm_read(dev);
+ ret = temp1_alarm_read(dev);
+ if (ret < 0)
+ return ret;
+ *val = ret;
break;
case hwmon_temp_max:
index = limit_max;
@@ -748,10 +753,12 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_humidity:
switch (attr) {
case hwmon_humidity_input:
- *val = humidity1_input_read(dev);
- break;
+ return humidity1_input_read(dev, val);
case hwmon_humidity_alarm:
- *val = humidity1_alarm_read(dev);
+ ret = humidity1_alarm_read(dev);
+ if (ret < 0)
+ return ret;
+ *val = ret;
break;
case hwmon_humidity_max:
index = limit_max;
diff --git a/drivers/misc/amd-sbi/Kconfig b/drivers/misc/amd-sbi/Kconfig
index 4aae0733d0fc..ab594908cb4a 100644
--- a/drivers/misc/amd-sbi/Kconfig
+++ b/drivers/misc/amd-sbi/Kconfig
@@ -2,9 +2,11 @@
config AMD_SBRMI_I2C
tristate "AMD side band RMI support"
depends on I2C
+ depends on ARM || ARM64 || COMPILE_TEST
select REGMAP_I2C
help
Side band RMI over I2C support for AMD out of band management.
+ This driver is intended to run on the BMC, not the managed node.
This driver can also be built as a module. If so, the module will
be called sbrmi-i2c.
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 621bce7e101c..ee652ef01534 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -381,6 +381,8 @@ static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
}
spin_unlock(&fl->lock);
+ dma_buf_put(buf);
+
return ret;
}
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index bc40b940ae21..a4f75dc36929 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -120,6 +120,8 @@
#define MEI_DEV_ID_PTL_H 0xE370 /* Panther Lake H */
#define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */
+#define MEI_DEV_ID_WCL_P 0x4D70 /* Wildcat Lake P */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/mei_lb.c b/drivers/misc/mei/mei_lb.c
index 77686b108d3c..78717ee8ac9a 100644
--- a/drivers/misc/mei/mei_lb.c
+++ b/drivers/misc/mei/mei_lb.c
@@ -134,8 +134,7 @@ static bool mei_lb_check_response(const struct device *dev, ssize_t bytes,
return true;
}
-static int mei_lb_push_payload(struct device *dev,
- enum intel_lb_type type, u32 flags,
+static int mei_lb_push_payload(struct device *dev, u32 type, u32 flags,
const void *payload, size_t payload_size)
{
struct mei_cl_device *cldev;
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index b108a7c22388..b017ff29dbd1 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -127,6 +127,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_PTL_H, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_WCL_P, MEI_ME_PCH15_CFG)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index c9eb5c5393e4..06b55a891c6b 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -109,19 +109,19 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto end;
}
+ err = mei_register(dev, &pdev->dev);
+ if (err)
+ goto release_irq;
+
if (mei_start(dev)) {
dev_err(&pdev->dev, "init hw failure.\n");
err = -ENODEV;
- goto release_irq;
+ goto deregister;
}
pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
- err = mei_register(dev, &pdev->dev);
- if (err)
- goto stop;
-
pci_set_drvdata(pdev, dev);
/*
@@ -144,8 +144,8 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-stop:
- mei_stop(dev);
+deregister:
+ mei_deregister(dev);
release_irq:
mei_cancel_work(dev);
mei_disable_interrupts(dev);
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 6df51ee8db62..cc1d18b3df5c 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -1737,7 +1737,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
{
unsigned long status, flags;
struct vmballoon *b;
- int ret;
+ int ret = 0;
b = container_of(b_dev_info, struct vmballoon, b_dev_info);
@@ -1796,17 +1796,15 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
* A failure happened. While we can deflate the page we just
* inflated, this deflation can also encounter an error. Instead
* we will decrease the size of the balloon to reflect the
- * change and report failure.
+ * change.
*/
atomic64_dec(&b->size);
- ret = -EBUSY;
} else {
/*
* Success. Take a reference for the page, and we will add it to
* the list after acquiring the lock.
*/
get_page(newpage);
- ret = 0;
}
/* Update the balloon list under the @pages_lock */
@@ -1817,7 +1815,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
* If we succeed just insert it to the list and update the statistics
* under the lock.
*/
- if (!ret) {
+ if (status == VMW_BALLOON_SUCCESS) {
balloon_page_insert(&b->b_dev_info, newpage);
__count_vm_event(BALLOON_MIGRATE);
}
diff --git a/drivers/most/most_usb.c b/drivers/most/most_usb.c
index cf5be9c449a5..10064d7b7249 100644
--- a/drivers/most/most_usb.c
+++ b/drivers/most/most_usb.c
@@ -929,6 +929,10 @@ static void release_mdev(struct device *dev)
{
struct most_dev *mdev = to_mdev_from_dev(dev);
+ kfree(mdev->busy_urbs);
+ kfree(mdev->cap);
+ kfree(mdev->conf);
+ kfree(mdev->ep_address);
kfree(mdev);
}
/**
@@ -1093,7 +1097,7 @@ err_free_cap:
err_free_conf:
kfree(mdev->conf);
err_free_mdev:
- put_device(&mdev->dev);
+ kfree(mdev);
return ret;
}
@@ -1121,13 +1125,6 @@ static void hdm_disconnect(struct usb_interface *interface)
if (mdev->dci)
device_unregister(&mdev->dci->dev);
most_deregister_interface(&mdev->iface);
-
- kfree(mdev->busy_urbs);
- kfree(mdev->cap);
- kfree(mdev->conf);
- kfree(mdev->ep_address);
- put_device(&mdev->dci->dev);
- put_device(&mdev->dev);
}
static int hdm_suspend(struct usb_interface *interface, pm_message_t message)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 4da619210c1f..e95e593cd12d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2287,7 +2287,9 @@ skip_mac_set:
unblock_netpoll_tx();
}
- if (bond_mode_can_use_xmit_hash(bond))
+ /* broadcast mode uses the all_slaves to loop through slaves. */
+ if (bond_mode_can_use_xmit_hash(bond) ||
+ BOND_MODE(bond) == BOND_MODE_BROADCAST)
bond_update_slave_arr(bond, NULL);
if (!slave_dev->netdev_ops->ndo_bpf ||
@@ -2463,7 +2465,8 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_upper_dev_unlink(bond, slave);
- if (bond_mode_can_use_xmit_hash(bond))
+ if (bond_mode_can_use_xmit_hash(bond) ||
+ BOND_MODE(bond) == BOND_MODE_BROADCAST)
bond_update_slave_arr(bond, slave);
slave_info(bond_dev, slave_dev, "Releasing %s interface\n",
@@ -2871,7 +2874,7 @@ static void bond_mii_monitor(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mii_work.work);
- bool should_notify_peers = false;
+ bool should_notify_peers;
bool commit;
unsigned long delay;
struct slave *slave;
@@ -2883,30 +2886,33 @@ static void bond_mii_monitor(struct work_struct *work)
goto re_arm;
rcu_read_lock();
+
should_notify_peers = bond_should_notify_peers(bond);
commit = !!bond_miimon_inspect(bond);
- if (bond->send_peer_notif) {
- rcu_read_unlock();
- if (rtnl_trylock()) {
- bond->send_peer_notif--;
- rtnl_unlock();
- }
- } else {
- rcu_read_unlock();
- }
- if (commit) {
+ rcu_read_unlock();
+
+ if (commit || bond->send_peer_notif) {
/* Race avoidance with bond_close cancel of workqueue */
if (!rtnl_trylock()) {
delay = 1;
- should_notify_peers = false;
goto re_arm;
}
- bond_for_each_slave(bond, slave, iter) {
- bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
+ if (commit) {
+ bond_for_each_slave(bond, slave, iter) {
+ bond_commit_link_state(slave,
+ BOND_SLAVE_NOTIFY_LATER);
+ }
+ bond_miimon_commit(bond);
+ }
+
+ if (bond->send_peer_notif) {
+ bond->send_peer_notif--;
+ if (should_notify_peers)
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
+ bond->dev);
}
- bond_miimon_commit(bond);
rtnl_unlock(); /* might sleep, hold no other locks */
}
@@ -2914,13 +2920,6 @@ static void bond_mii_monitor(struct work_struct *work)
re_arm:
if (bond->params.miimon)
queue_delayed_work(bond->wq, &bond->mii_work, delay);
-
- if (should_notify_peers) {
- if (!rtnl_trylock())
- return;
- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
- rtnl_unlock();
- }
}
static int bond_upper_dev_walk(struct net_device *upper,
diff --git a/drivers/net/can/bxcan.c b/drivers/net/can/bxcan.c
index bfc60eb33dc3..333ad42ea73b 100644
--- a/drivers/net/can/bxcan.c
+++ b/drivers/net/can/bxcan.c
@@ -842,7 +842,7 @@ static netdev_tx_t bxcan_start_xmit(struct sk_buff *skb,
u32 id;
int i, j;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (bxcan_tx_busy(priv))
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 0591406b6f32..6f83b87d54fc 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -452,7 +452,9 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
}
if (data[IFLA_CAN_RESTART_MS]) {
- if (!priv->do_set_mode) {
+ unsigned int restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
+
+ if (restart_ms != 0 && !priv->do_set_mode) {
NL_SET_ERR_MSG(extack,
"Device doesn't support restart from Bus Off");
return -EOPNOTSUPP;
@@ -461,7 +463,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
/* Do not allow changing restart delay while running */
if (dev->flags & IFF_UP)
return -EBUSY;
- priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
+ priv->restart_ms = restart_ms;
}
if (data[IFLA_CAN_RESTART]) {
diff --git a/drivers/net/can/esd/esdacc.c b/drivers/net/can/esd/esdacc.c
index c80032bc1a52..73e66f9a3781 100644
--- a/drivers/net/can/esd/esdacc.c
+++ b/drivers/net/can/esd/esdacc.c
@@ -254,7 +254,7 @@ netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
u32 acc_id;
u32 acc_dlc;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* Access core->tx_fifo_tail only once because it may be changed
diff --git a/drivers/net/can/rockchip/rockchip_canfd-tx.c b/drivers/net/can/rockchip/rockchip_canfd-tx.c
index 865a15e033a9..12200dcfd338 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-tx.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-tx.c
@@ -72,7 +72,7 @@ netdev_tx_t rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev)
int err;
u8 i;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (!netif_subqueue_maybe_stop(priv->ndev, 0,
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index ecd9a0bd5e18..49b57bb5fac1 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -290,9 +290,15 @@ static int ch_ipsec_xfrm_add_state(struct net_device *dev,
return -EINVAL;
}
+ if (unlikely(!try_module_get(THIS_MODULE))) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire module reference");
+ return -ENODEV;
+ }
+
sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
if (!sa_entry) {
res = -ENOMEM;
+ module_put(THIS_MODULE);
goto out;
}
@@ -301,7 +307,6 @@ static int ch_ipsec_xfrm_add_state(struct net_device *dev,
sa_entry->esn = 1;
ch_ipsec_setkey(x, sa_entry);
x->xso.offload_handle = (unsigned long)sa_entry;
- try_module_get(THIS_MODULE);
out:
return res;
}
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 7077d705e471..6e4f17142519 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -733,7 +733,7 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
u64 tfc_vlan_tag = 0;
if (np->link_status == 0) { /* Link Down */
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
entry = np->cur_tx % TX_RING_SIZE;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index c96d1d6ba8fe..18d86badd6ea 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1077,8 +1077,7 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
dma_addr_t addr;
buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
- aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
- DPAA2_ETH_TX_BUF_ALIGN);
+ aligned_start = PTR_ALIGN(buffer_start, DPAA2_ETH_TX_BUF_ALIGN);
if (aligned_start >= skb->head)
buffer_start = aligned_start;
else
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index aae462a0cf5a..0535e92404e3 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1595,6 +1595,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
/* next descriptor to process */
i = rx_ring->next_to_clean;
+ enetc_lock_mdio();
+
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd;
struct sk_buff *skb;
@@ -1630,7 +1632,9 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
rx_byte_cnt += skb->len + ETH_HLEN;
rx_frm_cnt++;
+ enetc_unlock_mdio();
napi_gro_receive(napi, skb);
+ enetc_lock_mdio();
}
rx_ring->next_to_clean = i;
@@ -1638,6 +1642,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
rx_ring->stats.packets += rx_frm_cnt;
rx_ring->stats.bytes += rx_byte_cnt;
+ enetc_unlock_mdio();
+
return rx_frm_cnt;
}
@@ -1947,6 +1953,8 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
/* next descriptor to process */
i = rx_ring->next_to_clean;
+ enetc_lock_mdio();
+
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd, *orig_rxbd;
struct xdp_buff xdp_buff;
@@ -2010,7 +2018,9 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
*/
enetc_bulk_flip_buff(rx_ring, orig_i, i);
+ enetc_unlock_mdio();
napi_gro_receive(napi, skb);
+ enetc_lock_mdio();
break;
case XDP_TX:
tx_ring = priv->xdp_tx_ring[rx_ring->index];
@@ -2045,7 +2055,9 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
}
break;
case XDP_REDIRECT:
+ enetc_unlock_mdio();
err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
+ enetc_lock_mdio();
if (unlikely(err)) {
enetc_xdp_drop(rx_ring, orig_i, i);
rx_ring->stats.xdp_redirect_failures++;
@@ -2065,8 +2077,11 @@ out:
rx_ring->stats.packets += rx_frm_cnt;
rx_ring->stats.bytes += rx_byte_cnt;
- if (xdp_redirect_frm_cnt)
+ if (xdp_redirect_frm_cnt) {
+ enetc_unlock_mdio();
xdp_do_flush();
+ enetc_lock_mdio();
+ }
if (xdp_tx_frm_cnt)
enetc_update_tx_ring_tail(tx_ring);
@@ -2075,6 +2090,8 @@ out:
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
rx_ring->xdp.xdp_tx_in_flight);
+ enetc_unlock_mdio();
+
return rx_frm_cnt;
}
@@ -2093,6 +2110,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
for (i = 0; i < v->count_tx_rings; i++)
if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
complete = false;
+ enetc_unlock_mdio();
prog = rx_ring->xdp.prog;
if (prog)
@@ -2104,10 +2122,8 @@ static int enetc_poll(struct napi_struct *napi, int budget)
if (work_done)
v->rx_napi_work = true;
- if (!complete) {
- enetc_unlock_mdio();
+ if (!complete)
return budget;
- }
napi_complete_done(napi, work_done);
@@ -2116,6 +2132,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
v->rx_napi_work = false;
+ enetc_lock_mdio();
/* enable interrupts */
enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 0ec010a7d640..f279fa597991 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -76,7 +76,7 @@ struct enetc_lso_t {
#define ENETC_LSO_MAX_DATA_LEN SZ_256K
#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
-#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */
+#define ENETC_RXB_TRUESIZE (PAGE_SIZE >> 1)
#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
#define ENETC_RXB_DMA_SIZE \
(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 65302c41bfb1..38875c196cb6 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -148,6 +148,7 @@ config HIBMCGE
tristate "Hisilicon BMC Gigabit Ethernet Device Support"
depends on PCI && PCI_MSI
select PHYLIB
+ select FIXED_PHY
select MOTORCOMM_PHY
select REALTEK_PHY
help
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
index ea09a09c451b..2097e4c2b3d7 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
@@ -17,6 +17,7 @@
#define HBG_PCU_CACHE_LINE_SIZE 32
#define HBG_TX_TIMEOUT_BUF_LEN 1024
#define HBG_RX_DESCR 0x01
+#define HBG_NO_PHY 0xFF
#define HBG_PACKET_HEAD_SIZE ((HBG_RX_SKIP1 + HBG_RX_SKIP2 + \
HBG_RX_DESCR) * HBG_PCU_CACHE_LINE_SIZE)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
index 83cf75bf7a17..e11495b7ee98 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
@@ -136,12 +136,11 @@ static pci_ers_result_t hbg_pci_err_detected(struct pci_dev *pdev,
{
struct net_device *netdev = pci_get_drvdata(pdev);
- netif_device_detach(netdev);
-
- if (state == pci_channel_io_perm_failure)
+ if (state == pci_channel_io_perm_failure) {
+ netif_device_detach(netdev);
return PCI_ERS_RESULT_DISCONNECT;
+ }
- pci_disable_device(pdev);
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -150,6 +149,9 @@ static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct hbg_priv *priv = netdev_priv(netdev);
+ netif_device_detach(netdev);
+ pci_disable_device(pdev);
+
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
"failed to re-enable PCI device after reset\n");
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
index d0aa0661ecd4..d6e8ce8e351a 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
@@ -244,6 +244,9 @@ void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
+ if (priv->mac.phy_addr == HBG_NO_PHY)
+ return;
+
/* wait MAC link up */
ret = readl_poll_timeout(priv->io_base + HBG_REG_AN_NEG_STATE_ADDR,
link_status,
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
index 8af0bc4cca21..ae4cb35186d8 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
@@ -32,6 +32,7 @@ static void hbg_irq_handle_rx_buf_val(struct hbg_priv *priv,
const struct hbg_irq_info *irq_info)
{
priv->stats.rx_fifo_less_empty_thrsld_cnt++;
+ hbg_hw_irq_enable(priv, irq_info->mask, true);
}
#define HBG_IRQ_I(name, handle) \
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
index 37791de47f6f..b6f0a2780ea8 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
@@ -20,7 +20,6 @@
#define HBG_MDIO_OP_INTERVAL_US (5 * 1000)
#define HBG_NP_LINK_FAIL_RETRY_TIMES 5
-#define HBG_NO_PHY 0xFF
static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 9d34d28ff168..782bb48c9f3d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -9429,8 +9429,7 @@ static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
/* this command reads phy id and register at the same time */
fallthrough;
case SIOCGMIIREG:
- data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
- return 0;
+ return hclge_read_phy_reg(hdev, data->reg_num, &data->val_out);
case SIOCSMIIREG:
return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 96553109f44c..cf881108fa57 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -274,7 +274,7 @@ void hclge_mac_stop_phy(struct hclge_dev *hdev)
phy_stop(phydev);
}
-u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr)
+int hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 *val)
{
struct hclge_phy_reg_cmd *req;
struct hclge_desc desc;
@@ -286,11 +286,14 @@ u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr)
req->reg_addr = cpu_to_le16(reg_addr);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
+ if (ret) {
dev_err(&hdev->pdev->dev,
"failed to read phy reg, ret = %d.\n", ret);
+ return ret;
+ }
- return le16_to_cpu(req->reg_val);
+ *val = le16_to_cpu(req->reg_val);
+ return 0;
}
int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
index 4200d0b6d931..21d434c82475 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -13,7 +13,7 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle);
void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
void hclge_mac_start_phy(struct hclge_dev *hdev);
void hclge_mac_stop_phy(struct hclge_dev *hdev);
-u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr);
+int hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 *val);
int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 2250426ec91b..2532b6f82e97 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -4382,6 +4382,15 @@ int ice_get_phy_lane_number(struct ice_hw *hw)
unsigned int lane;
int err;
+ /* E82X does not have sequential IDs, lane number is PF ID.
+ * For E825 device, the exception is the variant with external
+ * PHY (0x579F), in which there is also 1:1 pf_id -> lane_number
+ * mapping.
+ */
+ if (hw->mac_type == ICE_MAC_GENERIC ||
+ hw->device_id == ICE_DEV_ID_E825C_SGMII)
+ return hw->pf_id;
+
options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL);
if (!options)
return -ENOMEM;
@@ -6497,6 +6506,28 @@ u32 ice_get_link_speed(u16 index)
}
/**
+ * ice_get_dest_cgu - get destination CGU dev for given HW
+ * @hw: pointer to the HW struct
+ *
+ * Get CGU client id for CGU register read/write operations.
+ *
+ * Return: CGU device id to use in SBQ transactions.
+ */
+static enum ice_sbq_dev_id ice_get_dest_cgu(struct ice_hw *hw)
+{
+ /* On dual complex E825 only complex 0 has functional CGU powering all
+ * the PHYs.
+ * SBQ destination device cgu points to CGU on a current complex and to
+ * access primary CGU from the secondary complex, the driver should use
+ * cgu_peer as a destination device.
+ */
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) &&
+ !ice_is_primary(hw))
+ return ice_sbq_dev_cgu_peer;
+ return ice_sbq_dev_cgu;
+}
+
+/**
* ice_read_cgu_reg - Read a CGU register
* @hw: Pointer to the HW struct
* @addr: Register address to read
@@ -6510,8 +6541,8 @@ u32 ice_get_link_speed(u16 index)
int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val)
{
struct ice_sbq_msg_input cgu_msg = {
+ .dest_dev = ice_get_dest_cgu(hw),
.opcode = ice_sbq_msg_rd,
- .dest_dev = ice_sbq_dev_cgu,
.msg_addr_low = addr
};
int err;
@@ -6542,8 +6573,8 @@ int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val)
int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val)
{
struct ice_sbq_msg_input cgu_msg = {
+ .dest_dev = ice_get_dest_cgu(hw),
.opcode = ice_sbq_msg_wr,
- .dest_dev = ice_sbq_dev_cgu,
.msg_addr_low = addr,
.data = val
};
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 363ae79a3620..013c93b6605e 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -1479,7 +1479,7 @@ static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
hw->blk[blk].masks.count = per_pf;
- hw->blk[blk].masks.first = hw->pf_id * per_pf;
+ hw->blk[blk].masks.first = hw->logical_pf_id * per_pf;
memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
index 183dd5457d6a..21bb861febbf 100644
--- a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
@@ -50,6 +50,7 @@ enum ice_sbq_dev_id {
ice_sbq_dev_phy_0 = 0x02,
ice_sbq_dev_cgu = 0x06,
ice_sbq_dev_phy_0_peer = 0x0D,
+ ice_sbq_dev_cgu_peer = 0x0F,
};
enum ice_sbq_msg_opcode {
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index f8a208c84f15..10e2445e0ded 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2281,7 +2281,7 @@ static int igb_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_PRIV_FLAGS:
return IGB_PRIV_FLAGS_STR_LEN;
default:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
}
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index f3e7218ba6f3..bb783042d1af 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -810,7 +810,7 @@ static int igc_ethtool_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_PRIV_FLAGS:
return IGC_PRIV_FLAGS_STR_LEN;
default:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
}
@@ -2094,6 +2094,9 @@ static void igc_ethtool_diag_test(struct net_device *netdev,
netdev_info(adapter->netdev, "Offline testing starting");
set_bit(__IGC_TESTING, &adapter->state);
+ /* power up PHY for link test */
+ igc_power_up_phy_copper(&adapter->hw);
+
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ca1ccc630001..3190ce7e44c7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -11507,10 +11507,10 @@ static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter)
shutdown_aci:
mutex_destroy(&adapter->hw.aci.lock);
ixgbe_release_hw_control(adapter);
- devlink_free(adapter->devlink);
clean_up_probe:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
+ devlink_free(adapter->devlink);
pci_release_mem_regions(pdev);
if (disable_dev)
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 114dd88fc71c..6885d2343c48 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -641,7 +641,7 @@ static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp,
* disabled
*/
if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_setup_sdp)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (on)
adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 3692298e10f2..c9bdee9a8b30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -100,7 +100,7 @@ u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode)
return sizeof(struct mlx5_ksm) * 4;
}
WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", mode);
- return 0;
+ return 1;
}
u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 5d7c15abfcaf..f8eaaf37963b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -342,6 +342,7 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
struct mlx5e_priv *master_priv);
void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event);
+void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv);
static inline struct mlx5_core_dev *
mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
@@ -387,6 +388,10 @@ static inline void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *sl
static inline void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
{
}
+
+static inline void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv)
+{
+}
#endif
#endif /* __MLX5E_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index bf1d2769d4f1..feef86fff4bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -2893,9 +2893,30 @@ void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
{
- if (!priv->ipsec)
- return; /* IPsec not supported */
+ if (!priv->ipsec || mlx5_devcom_comp_get_size(priv->devcom) < 2)
+ return; /* IPsec not supported or no peers */
mlx5_devcom_send_event(priv->devcom, event, event, priv);
wait_for_completion(&priv->ipsec->comp);
}
+
+void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv)
+{
+ struct mlx5_devcom_comp_dev *tmp = NULL;
+ struct mlx5e_priv *peer_priv;
+
+ if (!priv->devcom)
+ return;
+
+ if (!mlx5_devcom_for_each_peer_begin(priv->devcom))
+ goto out;
+
+ peer_priv = mlx5_devcom_get_next_peer_data(priv->devcom, &tmp);
+ if (peer_priv)
+ complete_all(&peer_priv->ipsec->comp);
+
+ mlx5_devcom_for_each_peer_end(priv->devcom);
+out:
+ mlx5_devcom_unregister_component(priv->devcom);
+ priv->devcom = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index d7a11ff9bbdb..da2d1eb52c13 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -320,7 +320,6 @@ err_dma_unmap:
err_free:
kfree(buf);
err_out:
- priv_rx->rq_stats->tls_resync_req_skip++;
return err;
}
@@ -339,14 +338,19 @@ static void resync_handle_work(struct work_struct *work)
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
mlx5e_ktls_priv_rx_put(priv_rx);
+ priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(&resync->core);
return;
}
c = resync->priv->channels.c[priv_rx->rxq];
sq = &c->async_icosq;
- if (resync_post_get_progress_params(sq, priv_rx))
+ if (resync_post_get_progress_params(sq, priv_rx)) {
+ priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(&resync->core);
mlx5e_ktls_priv_rx_put(priv_rx);
+ }
}
static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
@@ -425,14 +429,21 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
{
struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf;
struct mlx5e_ktls_offload_context_rx *priv_rx;
+ struct tls_offload_resync_async *async_resync;
+ struct tls_offload_context_rx *rx_ctx;
u8 tracker_state, auth_state, *ctx;
struct device *dev;
u32 hw_seq;
priv_rx = buf->priv_rx;
dev = mlx5_core_dma_dev(sq->channel->mdev);
- if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
+ rx_ctx = tls_offload_ctx_rx(tls_get_ctx(priv_rx->sk));
+ async_resync = rx_ctx->resync_async;
+ if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
+ priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(async_resync);
goto out;
+ }
dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE,
DMA_FROM_DEVICE);
@@ -443,11 +454,13 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||
auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) {
priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(async_resync);
goto out;
}
hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn);
- tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
+ tls_offload_rx_resync_async_request_end(async_resync,
+ cpu_to_be32(hw_seq));
priv_rx->rq_stats->tls_resync_req_end++;
out:
mlx5e_ktls_priv_rx_put(priv_rx);
@@ -472,8 +485,10 @@ static bool resync_queue_get_psv(struct sock *sk)
resync = &priv_rx->resync;
mlx5e_ktls_priv_rx_get(priv_rx);
- if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work)))
+ if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) {
mlx5e_ktls_priv_rx_put(priv_rx);
+ return false;
+ }
return true;
}
@@ -482,6 +497,7 @@ static bool resync_queue_get_psv(struct sock *sk)
static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ struct tls_offload_resync_async *resync_async;
struct net_device *netdev = rq->netdev;
struct net *net = dev_net(netdev);
struct sock *sk = NULL;
@@ -527,7 +543,8 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
seq = th->seq;
datalen = skb->len - depth;
- tls_offload_rx_resync_async_request_start(sk, seq, datalen);
+ resync_async = tls_offload_ctx_rx(tls_get_ctx(sk))->resync_async;
+ tls_offload_rx_resync_async_request_start(resync_async, seq, datalen);
rq->stats->tls_resync_req_start++;
unref:
@@ -556,6 +573,18 @@ void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,
resync_handle_seq_match(priv_rx, c);
}
+void
+mlx5e_ktls_rx_resync_async_request_cancel(struct mlx5e_icosq_wqe_info *wi)
+{
+ struct mlx5e_ktls_offload_context_rx *priv_rx;
+ struct mlx5e_ktls_rx_resync_buf *buf;
+
+ buf = wi->tls_get_params.buf;
+ priv_rx = buf->priv_rx;
+ priv_rx->rq_stats->tls_resync_req_skip++;
+ tls_offload_rx_resync_async_request_cancel(&priv_rx->resync.core);
+}
+
/* End of resync section */
void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index f87b65c560ea..cb08799769ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -29,6 +29,10 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
u32 *dma_fifo_cc);
+
+void
+mlx5e_ktls_rx_resync_async_request_cancel(struct mlx5e_icosq_wqe_info *wi);
+
static inline bool
mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index a56825921c23..9c46511e7b43 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -242,8 +242,8 @@ static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
&attr,
mlx5e_devcom_event_mpv,
priv);
- if (IS_ERR(priv->devcom))
- return PTR_ERR(priv->devcom);
+ if (!priv->devcom)
+ return -EINVAL;
if (mlx5_core_is_mp_master(priv->mdev)) {
mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP,
@@ -256,7 +256,7 @@ static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv)
{
- if (IS_ERR_OR_NULL(priv->devcom))
+ if (!priv->devcom)
return;
if (mlx5_core_is_mp_master(priv->mdev)) {
@@ -266,6 +266,7 @@ static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv)
}
mlx5_devcom_unregister_component(priv->devcom);
+ priv->devcom = NULL;
}
static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
@@ -6120,6 +6121,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_cleanup(priv);
+ mlx5e_ipsec_disable_events(priv);
mlx5e_disable_blocking_events(priv);
mlx5e_disable_async_events(priv);
mlx5_lag_remove_netdev(mdev, priv->netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 263d5628ee44..26621a2972ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1036,6 +1036,10 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
netdev_WARN_ONCE(cq->netdev,
"Bad OP in ICOSQ CQE: 0x%x\n",
get_cqe_opcode(cqe));
+#ifdef CONFIG_MLX5_EN_TLS
+ if (wi->wqe_type == MLX5E_ICOSQ_WQE_GET_PSV_TLS)
+ mlx5e_ktls_rx_resync_async_request_cancel(wi);
+#endif
mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
(struct mlx5_err_cqe *)cqe);
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
@@ -1794,14 +1798,27 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
}
prog = rcu_dereference(rq->xdp_prog);
- if (prog && mlx5e_xdp_handle(rq, prog, mxbuf)) {
- if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
- struct mlx5e_wqe_frag_info *pwi;
+ if (prog) {
+ u8 nr_frags_free, old_nr_frags = sinfo->nr_frags;
+
+ if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
+ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT,
+ rq->flags)) {
+ struct mlx5e_wqe_frag_info *pwi;
+
+ wi -= old_nr_frags - sinfo->nr_frags;
+
+ for (pwi = head_wi; pwi < wi; pwi++)
+ pwi->frag_page->frags++;
+ }
+ return NULL; /* page/packet was consumed by XDP */
+ }
- for (pwi = head_wi; pwi < wi; pwi++)
- pwi->frag_page->frags++;
+ nr_frags_free = old_nr_frags - sinfo->nr_frags;
+ if (unlikely(nr_frags_free)) {
+ wi -= nr_frags_free;
+ truesize -= nr_frags_free * frag_info->frag_stride;
}
- return NULL; /* page/packet was consumed by XDP */
}
skb = mlx5e_build_linear_skb(
@@ -2027,6 +2044,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
u32 byte_cnt = cqe_bcnt;
struct skb_shared_info *sinfo;
unsigned int truesize = 0;
+ u32 pg_consumed_bytes;
struct bpf_prog *prog;
struct sk_buff *skb;
u32 linear_frame_sz;
@@ -2080,7 +2098,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
while (byte_cnt) {
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
- u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
+ pg_consumed_bytes =
+ min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
truesize += pg_consumed_bytes;
@@ -2096,10 +2115,15 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
}
if (prog) {
+ u8 nr_frags_free, old_nr_frags = sinfo->nr_frags;
+ u32 len;
+
if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
struct mlx5e_frag_page *pfp;
+ frag_page -= old_nr_frags - sinfo->nr_frags;
+
for (pfp = head_page; pfp < frag_page; pfp++)
pfp->frags++;
@@ -2110,9 +2134,19 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
return NULL; /* page/packet was consumed by XDP */
}
+ nr_frags_free = old_nr_frags - sinfo->nr_frags;
+ if (unlikely(nr_frags_free)) {
+ frag_page -= nr_frags_free;
+ truesize -= (nr_frags_free - 1) * PAGE_SIZE +
+ ALIGN(pg_consumed_bytes,
+ BIT(rq->mpwqe.log_stride_sz));
+ }
+
+ len = mxbuf->xdp.data_end - mxbuf->xdp.data;
+
skb = mlx5e_build_linear_skb(
rq, mxbuf->xdp.data_hard_start, linear_frame_sz,
- mxbuf->xdp.data - mxbuf->xdp.data_hard_start, 0,
+ mxbuf->xdp.data - mxbuf->xdp.data_hard_start, len,
mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb)) {
mlx5e_page_release_fragmented(rq->page_pool,
@@ -2137,8 +2171,11 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
do
pagep->frags++;
while (++pagep < frag_page);
+
+ headlen = min_t(u16, MLX5E_RX_MAX_HEAD - len,
+ skb->data_len);
+ __pskb_pull_tail(skb, headlen);
}
- __pskb_pull_tail(skb, headlen);
} else {
dma_addr_t addr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 7c029a7d0fd7..a2802cfc9b98 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -1614,7 +1614,9 @@ void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
fec_set_corrected_bits_total(priv, fec_stats);
fec_set_block_stats(priv, mode, fec_stats);
- fec_set_histograms_stats(priv, mode, hist);
+
+ if (MLX5_CAP_PCAM_REG(priv->mdev, pphcr))
+ fec_set_histograms_stats(priv, mode, hist);
}
#define PPORT_ETH_EXT_OFF(c) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index b7227afcb51d..2702b3885f06 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -256,7 +256,7 @@ mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u8 mode;
#ifdef CONFIG_MLX5_EN_TLS
- if (accel && accel->tls.tls_tisn)
+ if (accel->tls.tls_tisn)
return MLX5_INLINE_MODE_TCP_UDP;
#endif
@@ -982,6 +982,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_attr attr;
struct mlx5i_tx_wqe *wqe;
+ struct mlx5e_accel_tx_state accel = {};
struct mlx5_wqe_datagram_seg *datagram;
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5_wqe_eth_seg *eseg;
@@ -992,7 +993,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
int num_dma;
u16 pi;
- mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
+ mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr);
mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
@@ -1009,7 +1010,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
- mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg);
+ mlx5e_txwqe_build_eseg_csum(sq, skb, &accel, eseg);
eseg->mss = attr.mss;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index 76382626ad41..929adeb50a98 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -66,7 +66,6 @@ static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
esw->fdb_table.legacy.addr_grp = NULL;
esw->fdb_table.legacy.allmulti_grp = NULL;
esw->fdb_table.legacy.promisc_grp = NULL;
- atomic64_set(&esw->user_count, 0);
}
static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 4cf995be127d..44a142a041b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1978,7 +1978,6 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
MLX5_FLOW_STEERING_MODE_DMFS);
- atomic64_set(&esw->user_count, 0);
}
static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
@@ -3129,7 +3128,7 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
attr,
mlx5_esw_offloads_devcom_event,
esw);
- if (IS_ERR(esw->devcom))
+ if (!esw->devcom)
return;
mlx5_devcom_send_event(esw->devcom,
@@ -3140,7 +3139,7 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
{
- if (IS_ERR_OR_NULL(esw->devcom))
+ if (!esw->devcom)
return;
mlx5_devcom_send_event(esw->devcom,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 59c00c911275..3db0387bf6dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -1430,11 +1430,10 @@ static int mlx5_lag_register_hca_devcom_comp(struct mlx5_core_dev *dev)
mlx5_devcom_register_component(dev->priv.devc,
MLX5_DEVCOM_HCA_PORTS,
&attr, NULL, dev);
- if (IS_ERR(dev->priv.hca_devcom_comp)) {
+ if (!dev->priv.hca_devcom_comp) {
mlx5_core_err(dev,
- "Failed to register devcom HCA component, err: %ld\n",
- PTR_ERR(dev->priv.hca_devcom_comp));
- return PTR_ERR(dev->priv.hca_devcom_comp);
+ "Failed to register devcom HCA component.");
+ return -EINVAL;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index d0ba83d77cd1..29e7fa09c32c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -1444,7 +1444,7 @@ static void mlx5_shared_clock_register(struct mlx5_core_dev *mdev, u64 key)
compd = mlx5_devcom_register_component(mdev->priv.devc,
MLX5_DEVCOM_SHARED_CLOCK,
&attr, NULL, mdev);
- if (IS_ERR(compd))
+ if (!compd)
return;
mdev->clock_state->compdev = compd;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index faa2833602c8..e749618229bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -76,20 +76,18 @@ mlx5_devcom_dev_alloc(struct mlx5_core_dev *dev)
struct mlx5_devcom_dev *
mlx5_devcom_register_device(struct mlx5_core_dev *dev)
{
- struct mlx5_devcom_dev *devc;
+ struct mlx5_devcom_dev *devc = NULL;
mutex_lock(&dev_list_lock);
if (devcom_dev_exists(dev)) {
- devc = ERR_PTR(-EEXIST);
+ mlx5_core_err(dev, "devcom device already exists");
goto out;
}
devc = mlx5_devcom_dev_alloc(dev);
- if (!devc) {
- devc = ERR_PTR(-ENOMEM);
+ if (!devc)
goto out;
- }
list_add_tail(&devc->list, &devcom_dev_list);
out:
@@ -110,8 +108,10 @@ mlx5_devcom_dev_release(struct kref *ref)
void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc)
{
- if (!IS_ERR_OR_NULL(devc))
- kref_put(&devc->ref, mlx5_devcom_dev_release);
+ if (!devc)
+ return;
+
+ kref_put(&devc->ref, mlx5_devcom_dev_release);
}
static struct mlx5_devcom_comp *
@@ -122,7 +122,7 @@ mlx5_devcom_comp_alloc(u64 id, const struct mlx5_devcom_match_attr *attr,
comp = kzalloc(sizeof(*comp), GFP_KERNEL);
if (!comp)
- return ERR_PTR(-ENOMEM);
+ return NULL;
comp->id = id;
comp->key.key = attr->key;
@@ -160,7 +160,7 @@ devcom_alloc_comp_dev(struct mlx5_devcom_dev *devc,
devcom = kzalloc(sizeof(*devcom), GFP_KERNEL);
if (!devcom)
- return ERR_PTR(-ENOMEM);
+ return NULL;
kref_get(&devc->ref);
devcom->devc = devc;
@@ -240,31 +240,28 @@ mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
mlx5_devcom_event_handler_t handler,
void *data)
{
- struct mlx5_devcom_comp_dev *devcom;
+ struct mlx5_devcom_comp_dev *devcom = NULL;
struct mlx5_devcom_comp *comp;
- if (IS_ERR_OR_NULL(devc))
- return ERR_PTR(-EINVAL);
+ if (!devc)
+ return NULL;
mutex_lock(&comp_list_lock);
comp = devcom_component_get(devc, id, attr, handler);
- if (IS_ERR(comp)) {
- devcom = ERR_PTR(-EINVAL);
+ if (IS_ERR(comp))
goto out_unlock;
- }
if (!comp) {
comp = mlx5_devcom_comp_alloc(id, attr, handler);
- if (IS_ERR(comp)) {
- devcom = ERR_CAST(comp);
+ if (!comp)
goto out_unlock;
- }
+
list_add_tail(&comp->comp_list, &devcom_comp_list);
}
mutex_unlock(&comp_list_lock);
devcom = devcom_alloc_comp_dev(devc, comp, data);
- if (IS_ERR(devcom))
+ if (!devcom)
kref_put(&comp->ref, mlx5_devcom_comp_release);
return devcom;
@@ -276,8 +273,10 @@ out_unlock:
void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom)
{
- if (!IS_ERR_OR_NULL(devcom))
- devcom_free_comp_dev(devcom);
+ if (!devcom)
+ return;
+
+ devcom_free_comp_dev(devcom);
}
int mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev *devcom)
@@ -296,7 +295,7 @@ int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
int err = 0;
void *data;
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return -ENODEV;
comp = devcom->comp;
@@ -338,7 +337,7 @@ void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready)
bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return false;
return READ_ONCE(devcom->comp->ready);
@@ -348,7 +347,7 @@ bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom)
{
struct mlx5_devcom_comp *comp;
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return false;
comp = devcom->comp;
@@ -421,21 +420,21 @@ void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return;
down_write(&devcom->comp->sem);
}
void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return;
up_write(&devcom->comp->sem);
}
int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return 0;
return down_write_trylock(&devcom->comp->sem);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
index f5c2701f6e87..8e17daae48af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -221,8 +221,8 @@ static int sd_register(struct mlx5_core_dev *dev)
attr.net = mlx5_core_net(dev);
devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP,
&attr, NULL, dev);
- if (IS_ERR(devcom))
- return PTR_ERR(devcom);
+ if (!devcom)
+ return -EINVAL;
sd->devcom = devcom;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index df93625c9dfa..70c156591b0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -978,9 +978,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
int err;
dev->priv.devc = mlx5_devcom_register_device(dev);
- if (IS_ERR(dev->priv.devc))
- mlx5_core_warn(dev, "failed to register devcom device %pe\n",
- dev->priv.devc);
+ if (!dev->priv.devc)
+ mlx5_core_warn(dev, "failed to register devcom device\n");
err = mlx5_query_board_id(dev);
if (err) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 132626a3f9f7..9ef72f294117 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2557,14 +2557,16 @@ nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
&nn->tlv_caps);
if (err)
- goto err_free_nn;
+ goto err_free_xsk_pools;
err = nfp_ccm_mbox_alloc(nn);
if (err)
- goto err_free_nn;
+ goto err_free_xsk_pools;
return nn;
+err_free_xsk_pools:
+ kfree(nn->dp.xsk_pools);
err_free_nn:
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 9d3bd65b85ff..e2d7ce1a85e8 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2211,15 +2211,35 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_tx_timestamp(skb);
}
- /* Descriptor type must be set after all the above writes */
- dma_wmb();
+
if (num_tx_desc > 1) {
desc->die_dt = DT_FEND;
desc--;
+ /* When using multi-descriptors, DT_FEND needs to get written
+ * before DT_FSTART, but the compiler may reorder the memory
+ * writes in an attempt to optimize the code.
+ * Use a dma_wmb() barrier to make sure DT_FEND and DT_FSTART
+ * are written exactly in the order shown in the code.
+ * This is particularly important for cases where the DMA engine
+ * is already running when we are running this code. If the DMA
+ * sees DT_FSTART without the corresponding DT_FEND it will enter
+ * an error condition.
+ */
+ dma_wmb();
desc->die_dt = DT_FSTART;
} else {
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
desc->die_dt = DT_FSINGLE;
}
+
+ /* Before ringing the doorbell we need to make sure that the latest
+ * writes have been committed to memory, otherwise it could delay
+ * things until the doorbell is rang again.
+ * This is in replacement of the read operation mentioned in the HW
+ * manuals.
+ */
+ dma_wmb();
ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
priv->cur_tx[q] += num_tx_desc;
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 6fd0c1e9a7d5..7cfd9000f79d 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -1090,6 +1090,9 @@ void efx_mae_remove_mport(void *desc, void *arg)
kfree(mport);
}
+/*
+ * Takes ownership of @desc, even if it returns an error
+ */
static int efx_mae_process_mport(struct efx_nic *efx,
struct mae_mport_desc *desc)
{
@@ -1100,6 +1103,7 @@ static int efx_mae_process_mport(struct efx_nic *efx,
if (!IS_ERR_OR_NULL(mport)) {
netif_err(efx, drv, efx->net_dev,
"mport with id %u does exist!!!\n", desc->mport_id);
+ kfree(desc);
return -EEXIST;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 51ea0caf16c1..0786816e05f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1446,14 +1446,15 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
}
} else {
if (bsp_priv->clk_enabled) {
+ if (bsp_priv->ops && bsp_priv->ops->set_clock_selection) {
+ bsp_priv->ops->set_clock_selection(bsp_priv,
+ bsp_priv->clock_input, false);
+ }
+
clk_bulk_disable_unprepare(bsp_priv->num_clks,
bsp_priv->clks);
clk_disable_unprepare(bsp_priv->clk_phy);
- if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
- bsp_priv->ops->set_clock_selection(bsp_priv,
- bsp_priv->clock_input, false);
-
bsp_priv->clk_enabled = false;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 650d75b73e0b..7b90ecd3a55e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4089,18 +4089,11 @@ static int stmmac_release(struct net_device *dev)
static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
struct stmmac_tx_queue *tx_q)
{
- u16 tag = 0x0, inner_tag = 0x0;
- u32 inner_type = 0x0;
struct dma_desc *p;
+ u16 tag = 0x0;
- if (!priv->dma_cap.vlins)
+ if (!priv->dma_cap.vlins || !skb_vlan_tag_present(skb))
return false;
- if (!skb_vlan_tag_present(skb))
- return false;
- if (skb->vlan_proto == htons(ETH_P_8021AD)) {
- inner_tag = skb_vlan_tag_get(skb);
- inner_type = STMMAC_VLAN_INSERT;
- }
tag = skb_vlan_tag_get(skb);
@@ -4109,7 +4102,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
else
p = &tx_q->dma_tx[tx_q->cur_tx];
- if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
+ if (stmmac_set_desc_vlan_tag(priv, p, tag, 0x0, 0x0))
return false;
stmmac_set_tx_owner(priv, p);
@@ -4507,6 +4500,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
bool has_vlan, set_ic;
int entry, first_tx;
dma_addr_t des;
+ u32 sdu_len;
tx_q = &priv->dma_conf.tx_queue[queue];
txq_stats = &priv->xstats.txq_stats[queue];
@@ -4524,10 +4518,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (priv->est && priv->est->enable &&
- priv->est->max_sdu[queue] &&
- skb->len > priv->est->max_sdu[queue]){
- priv->xstats.max_sdu_txq_drop[queue]++;
- goto max_sdu_err;
+ priv->est->max_sdu[queue]) {
+ sdu_len = skb->len;
+ /* Add VLAN tag length if VLAN tag insertion offload is requested */
+ if (priv->dma_cap.vlins && skb_vlan_tag_present(skb))
+ sdu_len += VLAN_HLEN;
+ if (sdu_len > priv->est->max_sdu[queue]) {
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ goto max_sdu_err;
+ }
}
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
@@ -7573,11 +7572,8 @@ int stmmac_dvr_probe(struct device *device,
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
}
- if (priv->dma_cap.vlins) {
+ if (priv->dma_cap.vlins)
ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
- if (priv->dma_cap.dvlan)
- ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
- }
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 97e89a604abd..3b4d4696afe9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -981,7 +981,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
if (qopt->cmd == TAPRIO_CMD_DESTROY)
goto disable;
- if (qopt->num_entries >= dep)
+ if (qopt->num_entries > dep)
return -EINVAL;
if (!qopt->cycle_time)
return -ERANGE;
@@ -1012,7 +1012,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
s64 delta_ns = qopt->entries[i].interval;
u32 gates = qopt->entries[i].gate_mask;
- if (delta_ns > GENMASK(wid, 0))
+ if (delta_ns > GENMASK(wid - 1, 0))
return -ERANGE;
if (gates > GENMASK(31 - wid, 0))
return -ERANGE;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
index 0b6f6228ae35..ff02a79c00d4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
@@ -212,7 +212,7 @@ static void vlan_enable(struct mac_device_info *hw, u32 type)
value = readl(ioaddr + VLAN_INCL);
value |= VLAN_VLTI;
- value |= VLAN_CSVL; /* Only use SVLAN */
+ value &= ~VLAN_CSVL; /* Only use CVLAN */
value &= ~VLAN_VLC;
value |= (type << VLAN_VLC_SHIFT) & VLAN_VLC;
writel(value, ioaddr + VLAN_INCL);
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 59d6ab989c55..8ffbfaa3ab18 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -163,7 +163,9 @@ struct am65_cpts {
struct device_node *clk_mux_np;
struct clk *refclk;
u32 refclk_freq;
- struct list_head events;
+ /* separate lists to handle TX and RX timestamp independently */
+ struct list_head events_tx;
+ struct list_head events_rx;
struct list_head pool;
struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS];
spinlock_t lock; /* protects events lists*/
@@ -227,6 +229,24 @@ static void am65_cpts_disable(struct am65_cpts *cpts)
am65_cpts_write32(cpts, 0, int_enable);
}
+static int am65_cpts_purge_event_list(struct am65_cpts *cpts,
+ struct list_head *events)
+{
+ struct list_head *this, *next;
+ struct am65_cpts_event *event;
+ int removed = 0;
+
+ list_for_each_safe(this, next, events) {
+ event = list_entry(this, struct am65_cpts_event, list);
+ if (time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ ++removed;
+ }
+ }
+ return removed;
+}
+
static int am65_cpts_event_get_port(struct am65_cpts_event *event)
{
return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >>
@@ -239,20 +259,12 @@ static int am65_cpts_event_get_type(struct am65_cpts_event *event)
AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT;
}
-static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts)
+static int am65_cpts_purge_events(struct am65_cpts *cpts)
{
- struct list_head *this, *next;
- struct am65_cpts_event *event;
int removed = 0;
- list_for_each_safe(this, next, &cpts->events) {
- event = list_entry(this, struct am65_cpts_event, list);
- if (time_after(jiffies, event->tmo)) {
- list_del_init(&event->list);
- list_add(&event->list, &cpts->pool);
- ++removed;
- }
- }
+ removed += am65_cpts_purge_event_list(cpts, &cpts->events_tx);
+ removed += am65_cpts_purge_event_list(cpts, &cpts->events_rx);
if (removed)
dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed);
@@ -287,7 +299,7 @@ static int __am65_cpts_fifo_read(struct am65_cpts *cpts)
struct am65_cpts_event, list);
if (!event) {
- if (am65_cpts_cpts_purge_events(cpts)) {
+ if (am65_cpts_purge_events(cpts)) {
dev_err(cpts->dev, "cpts: event pool empty\n");
ret = -1;
goto out;
@@ -306,11 +318,21 @@ static int __am65_cpts_fifo_read(struct am65_cpts *cpts)
cpts->timestamp);
break;
case AM65_CPTS_EV_RX:
+ event->tmo = jiffies +
+ msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
+
+ list_move_tail(&event->list, &cpts->events_rx);
+
+ dev_dbg(cpts->dev,
+ "AM65_CPTS_EV_RX e1:%08x e2:%08x t:%lld\n",
+ event->event1, event->event2,
+ event->timestamp);
+ break;
case AM65_CPTS_EV_TX:
event->tmo = jiffies +
msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
- list_move_tail(&event->list, &cpts->events);
+ list_move_tail(&event->list, &cpts->events_tx);
dev_dbg(cpts->dev,
"AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n",
@@ -828,7 +850,7 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
return found;
}
-static void am65_cpts_find_ts(struct am65_cpts *cpts)
+static void am65_cpts_find_tx_ts(struct am65_cpts *cpts)
{
struct am65_cpts_event *event;
struct list_head *this, *next;
@@ -837,7 +859,7 @@ static void am65_cpts_find_ts(struct am65_cpts *cpts)
LIST_HEAD(events);
spin_lock_irqsave(&cpts->lock, flags);
- list_splice_init(&cpts->events, &events);
+ list_splice_init(&cpts->events_tx, &events);
spin_unlock_irqrestore(&cpts->lock, flags);
list_for_each_safe(this, next, &events) {
@@ -850,7 +872,7 @@ static void am65_cpts_find_ts(struct am65_cpts *cpts)
}
spin_lock_irqsave(&cpts->lock, flags);
- list_splice_tail(&events, &cpts->events);
+ list_splice_tail(&events, &cpts->events_tx);
list_splice_tail(&events_free, &cpts->pool);
spin_unlock_irqrestore(&cpts->lock, flags);
}
@@ -861,7 +883,7 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
unsigned long flags;
long delay = -1;
- am65_cpts_find_ts(cpts);
+ am65_cpts_find_tx_ts(cpts);
spin_lock_irqsave(&cpts->txq.lock, flags);
if (!skb_queue_empty(&cpts->txq))
@@ -905,7 +927,7 @@ static u64 am65_cpts_find_rx_ts(struct am65_cpts *cpts, u32 skb_mtype_seqid)
spin_lock_irqsave(&cpts->lock, flags);
__am65_cpts_fifo_read(cpts);
- list_for_each_safe(this, next, &cpts->events) {
+ list_for_each_safe(this, next, &cpts->events_rx) {
event = list_entry(this, struct am65_cpts_event, list);
if (time_after(jiffies, event->tmo)) {
list_move(&event->list, &cpts->pool);
@@ -1155,7 +1177,8 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
return ERR_PTR(ret);
mutex_init(&cpts->ptp_clk_lock);
- INIT_LIST_HEAD(&cpts->events);
+ INIT_LIST_HEAD(&cpts->events_tx);
+ INIT_LIST_HEAD(&cpts->events_rx);
INIT_LIST_HEAD(&cpts->pool);
spin_lock_init(&cpts->lock);
skb_queue_head_init(&cpts->txq);
diff --git a/drivers/net/mctp/mctp-usb.c b/drivers/net/mctp/mctp-usb.c
index 36ccc53b1797..ef860cfc629f 100644
--- a/drivers/net/mctp/mctp-usb.c
+++ b/drivers/net/mctp/mctp-usb.c
@@ -96,11 +96,13 @@ static netdev_tx_t mctp_usb_start_xmit(struct sk_buff *skb,
skb->data, skb->len,
mctp_usb_out_complete, skb);
+ /* Stops TX queue first to prevent race condition with URB complete */
+ netif_stop_queue(dev);
rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (rc)
+ if (rc) {
+ netif_wake_queue(dev);
goto err_drop;
- else
- netif_stop_queue(dev);
+ }
return NETDEV_TX_OK;
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 194570443493..5d8d0214786c 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -886,8 +886,11 @@ static ssize_t userdatum_value_show(struct config_item *item, char *buf)
static void update_userdata(struct netconsole_target *nt)
{
- int complete_idx = 0, child_count = 0;
struct list_head *entry;
+ int child_count = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&target_list_lock, flags);
/* Clear the current string in case the last userdatum was deleted */
nt->userdata_length = 0;
@@ -897,8 +900,11 @@ static void update_userdata(struct netconsole_target *nt)
struct userdatum *udm_item;
struct config_item *item;
- if (WARN_ON_ONCE(child_count >= MAX_EXTRADATA_ITEMS))
- break;
+ if (child_count >= MAX_EXTRADATA_ITEMS) {
+ spin_unlock_irqrestore(&target_list_lock, flags);
+ WARN_ON_ONCE(1);
+ return;
+ }
child_count++;
item = container_of(entry, struct config_item, ci_entry);
@@ -912,12 +918,11 @@ static void update_userdata(struct netconsole_target *nt)
* one entry length (1/MAX_EXTRADATA_ITEMS long), entry count is
* checked to not exceed MAX items with child_count above
*/
- complete_idx += scnprintf(&nt->extradata_complete[complete_idx],
- MAX_EXTRADATA_ENTRY_LEN, " %s=%s\n",
- item->ci_name, udm_item->value);
+ nt->userdata_length += scnprintf(&nt->extradata_complete[nt->userdata_length],
+ MAX_EXTRADATA_ENTRY_LEN, " %s=%s\n",
+ item->ci_name, udm_item->value);
}
- nt->userdata_length = strnlen(nt->extradata_complete,
- sizeof(nt->extradata_complete));
+ spin_unlock_irqrestore(&target_list_lock, flags);
}
static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
diff --git a/drivers/net/ovpn/tcp.c b/drivers/net/ovpn/tcp.c
index 289f62c5d2c7..0d7f30360d87 100644
--- a/drivers/net/ovpn/tcp.c
+++ b/drivers/net/ovpn/tcp.c
@@ -560,16 +560,34 @@ static void ovpn_tcp_close(struct sock *sk, long timeout)
static __poll_t ovpn_tcp_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
- __poll_t mask = datagram_poll(file, sock, wait);
+ struct sk_buff_head *queue = &sock->sk->sk_receive_queue;
struct ovpn_socket *ovpn_sock;
+ struct ovpn_peer *peer = NULL;
+ __poll_t mask;
rcu_read_lock();
ovpn_sock = rcu_dereference_sk_user_data(sock->sk);
- if (ovpn_sock && ovpn_sock->peer &&
- !skb_queue_empty(&ovpn_sock->peer->tcp.user_queue))
- mask |= EPOLLIN | EPOLLRDNORM;
+ /* if we landed in this callback, we expect to have a
+ * meaningful state. The ovpn_socket lifecycle would
+ * prevent it otherwise.
+ */
+ if (WARN(!ovpn_sock || !ovpn_sock->peer,
+ "ovpn: null state in ovpn_tcp_poll!")) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ if (ovpn_peer_hold(ovpn_sock->peer)) {
+ peer = ovpn_sock->peer;
+ queue = &peer->tcp.user_queue;
+ }
rcu_read_unlock();
+ mask = datagram_poll_queue(file, sock, wait, queue);
+
+ if (peer)
+ ovpn_peer_put(peer);
+
return mask;
}
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index deeefb962566..36a0c1b7f59c 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -738,6 +738,12 @@ static int dp83867_config_init(struct phy_device *phydev)
return ret;
}
+ /* Although the DP83867 reports EEE capability through the
+ * MDIO_PCS_EEE_ABLE and MDIO_AN_EEE_ADV registers, the feature
+ * is not actually implemented in hardware.
+ */
+ phy_disable_eee(phydev);
+
if (phy_interface_is_rgmii(phydev) ||
phydev->interface == PHY_INTERFACE_MODE_SGMII) {
val = phy_read(phydev, MII_DP83867_PHYCTRL);
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index a2cd1cc35cde..1f381d7b13ff 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -84,7 +84,7 @@
#define DP83869_CLK_DELAY_DEF 7
/* STRAP_STS1 bits */
-#define DP83869_STRAP_OP_MODE_MASK GENMASK(2, 0)
+#define DP83869_STRAP_OP_MODE_MASK GENMASK(11, 9)
#define DP83869_STRAP_STS1_RESERVED BIT(11)
#define DP83869_STRAP_MIRROR_ENABLED BIT(12)
@@ -528,7 +528,7 @@ static int dp83869_set_strapped_mode(struct phy_device *phydev)
if (val < 0)
return val;
- dp83869->mode = val & DP83869_STRAP_OP_MODE_MASK;
+ dp83869->mode = FIELD_GET(DP83869_STRAP_OP_MODE_MASK, val);
return 0;
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 79ce3eb6752b..604b5de0c158 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -4262,6 +4262,8 @@ static int __lan8814_ptp_probe_once(struct phy_device *phydev, char *pin_name,
{
struct lan8814_shared_priv *shared = phy_package_get_priv(phydev);
+ shared->phydev = phydev;
+
/* Initialise shared lock for clock*/
mutex_init(&shared->shared_lock);
@@ -4317,8 +4319,6 @@ static int __lan8814_ptp_probe_once(struct phy_device *phydev, char *pin_name,
phydev_dbg(phydev, "successfully registered ptp clock\n");
- shared->phydev = phydev;
-
/* The EP.4 is shared between all the PHYs in the package and also it
* can be accessed by any of the PHYs
*/
diff --git a/drivers/net/phy/realtek/realtek_main.c b/drivers/net/phy/realtek/realtek_main.c
index a724b21b4fe7..16a347084293 100644
--- a/drivers/net/phy/realtek/realtek_main.c
+++ b/drivers/net/phy/realtek/realtek_main.c
@@ -154,7 +154,7 @@
#define RTL_8211FVD_PHYID 0x001cc878
#define RTL_8221B 0x001cc840
#define RTL_8221B_VB_CG 0x001cc849
-#define RTL_8221B_VN_CG 0x001cc84a
+#define RTL_8221B_VM_CG 0x001cc84a
#define RTL_8251B 0x001cc862
#define RTL_8261C 0x001cc890
@@ -1523,16 +1523,16 @@ static int rtl8221b_vb_cg_c45_match_phy_device(struct phy_device *phydev,
return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, true);
}
-static int rtl8221b_vn_cg_c22_match_phy_device(struct phy_device *phydev,
+static int rtl8221b_vm_cg_c22_match_phy_device(struct phy_device *phydev,
const struct phy_driver *phydrv)
{
- return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, false);
+ return rtlgen_is_c45_match(phydev, RTL_8221B_VM_CG, false);
}
-static int rtl8221b_vn_cg_c45_match_phy_device(struct phy_device *phydev,
+static int rtl8221b_vm_cg_c45_match_phy_device(struct phy_device *phydev,
const struct phy_driver *phydrv)
{
- return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, true);
+ return rtlgen_is_c45_match(phydev, RTL_8221B_VM_CG, true);
}
static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev,
@@ -1879,7 +1879,7 @@ static struct phy_driver realtek_drvs[] = {
.suspend = genphy_c45_pma_suspend,
.resume = rtlgen_c45_resume,
}, {
- .match_phy_device = rtl8221b_vn_cg_c22_match_phy_device,
+ .match_phy_device = rtl8221b_vm_cg_c22_match_phy_device,
.name = "RTL8221B-VM-CG 2.5Gbps PHY (C22)",
.probe = rtl822x_probe,
.get_features = rtl822x_get_features,
@@ -1892,8 +1892,8 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
- .match_phy_device = rtl8221b_vn_cg_c45_match_phy_device,
- .name = "RTL8221B-VN-CG 2.5Gbps PHY (C45)",
+ .match_phy_device = rtl8221b_vm_cg_c45_match_phy_device,
+ .name = "RTL8221B-VM-CG 2.5Gbps PHY (C45)",
.probe = rtl822x_probe,
.config_init = rtl822xb_config_init,
.get_rate_matching = rtl822xb_get_rate_matching,
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 85bd5d845409..232bbd79a4de 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -230,7 +230,9 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
int i;
unsigned long gpio_bits = dev->driver_info->data;
- usbnet_get_endpoints(dev,intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ goto out;
/* Toggle the GPIOs in a manufacturer/model specific way */
for (i = 2; i >= 0; i--) {
@@ -848,7 +850,9 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
dev->driver_priv = priv;
- usbnet_get_endpoints(dev, intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ return ret;
/* Maybe the boot loader passed the MAC address via device tree */
if (!eth_platform_get_mac_address(&dev->udev->dev, buf)) {
@@ -1281,7 +1285,9 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
int ret;
u8 buf[ETH_ALEN] = {0};
- usbnet_get_endpoints(dev,intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ return ret;
/* Get the MAC address */
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 92add3daadbb..278e6cb6f4d9 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -685,9 +685,16 @@ static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb,
rtl8150_t *dev = netdev_priv(netdev);
int count, res;
+ /* pad the frame and ensure terminating USB packet, datasheet 9.2.3 */
+ count = max(skb->len, ETH_ZLEN);
+ if (count % 64 == 0)
+ count++;
+ if (skb_padto(skb, count)) {
+ netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
netif_stop_queue(netdev);
- count = (skb->len < 60) ? 60 : skb->len;
- count = (count & 0x3f) ? count : count + 1;
dev->tx_skb = skb;
usb_fill_bulk_urb(dev->tx_urb, dev->udev, usb_sndbulkpipe(dev->udev, 2),
skb->data, count, write_bulk_callback, dev);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index bf01f2728531..697cd9d866d3 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1659,6 +1659,8 @@ void usbnet_disconnect (struct usb_interface *intf)
net = dev->net;
unregister_netdev (net);
+ cancel_work_sync(&dev->kevent);
+
while ((urb = usb_get_from_anchor(&dev->deferred))) {
dev_kfree_skb(urb->context);
kfree(urb->sg);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index a757cbcab87f..8e8a179aaa49 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1379,9 +1379,14 @@ static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct
ret = XDP_PASS;
rcu_read_lock();
prog = rcu_dereference(rq->xdp_prog);
- /* TODO: support multi buffer. */
- if (prog && num_buf == 1)
- ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
+ if (prog) {
+ /* TODO: support multi buffer. */
+ if (num_buf == 1)
+ ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit,
+ stats);
+ else
+ ret = XDP_ABORTED;
+ }
rcu_read_unlock();
switch (ret) {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index e595b0979a56..b3b00d324075 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1937,6 +1937,7 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
if (cmd_id == WMI_CMD_UNSUPPORTED) {
ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
cmd_id);
+ dev_kfree_skb_any(skb);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 2810752260f2..812686173ac8 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -912,42 +912,84 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
static const struct dmi_system_id ath11k_pm_quirk_table[] = {
{
.driver_data = (void *)ATH11K_PM_WOW,
- .matches = {
+ .matches = { /* X13 G4 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21J3"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* X13 G4 AMD #2 */
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21J4"),
},
},
{
.driver_data = (void *)ATH11K_PM_WOW,
- .matches = {
+ .matches = { /* T14 G4 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K3"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T14 G4 AMD #2 */
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21K4"),
},
},
{
.driver_data = (void *)ATH11K_PM_WOW,
- .matches = {
+ .matches = { /* P14s G4 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K5"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* P14s G4 AMD #2 */
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21K6"),
},
},
{
.driver_data = (void *)ATH11K_PM_WOW,
- .matches = {
+ .matches = { /* T16 G2 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K7"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T16 G2 AMD #2 */
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21K8"),
},
},
{
.driver_data = (void *)ATH11K_PM_WOW,
- .matches = {
+ .matches = { /* P16s G2 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K9"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* P16s G2 AMD #2 */
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21KA"),
},
},
{
.driver_data = (void *)ATH11K_PM_WOW,
- .matches = {
+ .matches = { /* T14s G4 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21F8"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T14s G4 AMD #2 */
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21F9"),
},
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 106e2530b64e..0e41b5a91d66 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <net/mac80211.h>
@@ -4417,9 +4417,9 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
- flags |= WMI_KEY_PAIRWISE;
+ flags = WMI_KEY_PAIRWISE;
else
- flags |= WMI_KEY_GROUP;
+ flags = WMI_KEY_GROUP;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"%s for peer %pM on vdev %d flags 0x%X, type = %d, num_sta %d\n",
@@ -4456,7 +4456,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
is_ap_with_no_sta = (vif->type == NL80211_IFTYPE_AP &&
!arvif->num_stations);
- if ((flags & WMI_KEY_PAIRWISE) || cmd == SET_KEY || is_ap_with_no_sta) {
+ if (flags == WMI_KEY_PAIRWISE || cmd == SET_KEY || is_ap_with_no_sta) {
ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags);
if (ret) {
ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret);
@@ -4470,7 +4470,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
goto exit;
}
- if ((flags & WMI_KEY_GROUP) && cmd == SET_KEY && is_ap_with_no_sta)
+ if (flags == WMI_KEY_GROUP && cmd == SET_KEY && is_ap_with_no_sta)
arvif->reinstall_group_keys = true;
}
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 1d7b60aa5cb0..eacab798630a 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -8290,23 +8290,32 @@ static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb)
wake_up(&ar->txmgmt_empty_waitq);
}
-int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
+static void ath12k_mac_tx_mgmt_free(struct ath12k *ar, int buf_id)
{
- struct sk_buff *msdu = skb;
+ struct sk_buff *msdu;
struct ieee80211_tx_info *info;
- struct ath12k *ar = ctx;
- struct ath12k_base *ab = ar->ab;
spin_lock_bh(&ar->txmgmt_idr_lock);
- idr_remove(&ar->txmgmt_idr, buf_id);
+ msdu = idr_remove(&ar->txmgmt_idr, buf_id);
spin_unlock_bh(&ar->txmgmt_idr_lock);
- dma_unmap_single(ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len,
+
+ if (!msdu)
+ return;
+
+ dma_unmap_single(ar->ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len,
DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
- ath12k_mgmt_over_wmi_tx_drop(ar, skb);
+ ath12k_mgmt_over_wmi_tx_drop(ar, msdu);
+}
+
+int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
+{
+ struct ath12k *ar = ctx;
+
+ ath12k_mac_tx_mgmt_free(ar, buf_id);
return 0;
}
@@ -8315,17 +8324,10 @@ static int ath12k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx)
{
struct ieee80211_vif *vif = ctx;
struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
- struct sk_buff *msdu = skb;
struct ath12k *ar = skb_cb->ar;
- struct ath12k_base *ab = ar->ab;
- if (skb_cb->vif == vif) {
- spin_lock_bh(&ar->txmgmt_idr_lock);
- idr_remove(&ar->txmgmt_idr, buf_id);
- spin_unlock_bh(&ar->txmgmt_idr_lock);
- dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len,
- DMA_TO_DEVICE);
- }
+ if (skb_cb->vif == vif)
+ ath12k_mac_tx_mgmt_free(ar, buf_id);
return 0;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 8afaffe31031..bb96b87b2a6e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5627,8 +5627,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
*cookie, le16_to_cpu(action_frame->len),
le32_to_cpu(af_params->channel));
- ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg),
- af_params);
+ ack = brcmf_p2p_send_action_frame(vif->ifp, af_params);
cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack,
GFP_KERNEL);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 0dc9d28cd77b..e1752a513c73 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -1529,6 +1529,7 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
/**
* brcmf_p2p_tx_action_frame() - send action frame over fil.
*
+ * @ifp: interface to transmit on.
* @p2p: p2p info struct for vif.
* @af_params: action frame data/info.
*
@@ -1538,12 +1539,11 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
* The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action
* frame is transmitted.
*/
-static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
+static s32 brcmf_p2p_tx_action_frame(struct brcmf_if *ifp,
+ struct brcmf_p2p_info *p2p,
struct brcmf_fil_af_params_le *af_params)
{
struct brcmf_pub *drvr = p2p->cfg->pub;
- struct brcmf_cfg80211_vif *vif;
- struct brcmf_p2p_action_frame *p2p_af;
s32 err = 0;
brcmf_dbg(TRACE, "Enter\n");
@@ -1552,14 +1552,7 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
- /* check if it is a p2p_presence response */
- p2p_af = (struct brcmf_p2p_action_frame *)af_params->action_frame.data;
- if (p2p_af->subtype == P2P_AF_PRESENCE_RSP)
- vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif;
- else
- vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
-
- err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params,
+ err = brcmf_fil_bsscfg_data_set(ifp, "actframe", af_params,
sizeof(*af_params));
if (err) {
bphy_err(drvr, " sending action frame has failed\n");
@@ -1711,16 +1704,14 @@ static bool brcmf_p2p_check_dwell_overflow(u32 requested_dwell,
/**
* brcmf_p2p_send_action_frame() - send action frame .
*
- * @cfg: driver private data for cfg80211 interface.
- * @ndev: net device to transmit on.
+ * @ifp: interface to transmit on.
* @af_params: configuration data for action frame.
*/
-bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
- struct net_device *ndev,
+bool brcmf_p2p_send_action_frame(struct brcmf_if *ifp,
struct brcmf_fil_af_params_le *af_params)
{
+ struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
struct brcmf_p2p_info *p2p = &cfg->p2p;
- struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_fil_action_frame_le *action_frame;
struct brcmf_config_af_params config_af_params;
struct afx_hdl *afx_hdl = &p2p->afx_hdl;
@@ -1857,7 +1848,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
if (af_params->channel)
msleep(P2P_AF_RETRY_DELAY_TIME);
- ack = !brcmf_p2p_tx_action_frame(p2p, af_params);
+ ack = !brcmf_p2p_tx_action_frame(ifp, p2p, af_params);
tx_retry++;
dwell_overflow = brcmf_p2p_check_dwell_overflow(requested_dwell,
dwell_jiffies);
@@ -2217,7 +2208,6 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
WARN_ON(p2p_ifp->bsscfgidx != bsscfgidx);
- init_completion(&p2p->send_af_done);
INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler);
init_completion(&p2p->afx_hdl.act_frm_scan);
init_completion(&p2p->wait_next_af);
@@ -2513,6 +2503,8 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced)
pri_ifp = brcmf_get_ifp(cfg->pub, 0);
p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif;
+ init_completion(&p2p->send_af_done);
+
if (p2pdev_forced) {
err_ptr = brcmf_p2p_create_p2pdev(p2p, NULL, NULL);
if (IS_ERR(err_ptr)) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
index d2ecee565bf2..d3137ebd7158 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
@@ -168,8 +168,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
const struct brcmf_event_msg *e,
void *data);
-bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
- struct net_device *ndev,
+bool brcmf_p2p_send_action_frame(struct brcmf_if *ifp,
struct brcmf_fil_af_params_le *af_params);
bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
struct brcmf_bss_info_le *bi);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.c b/drivers/net/wireless/intel/iwlwifi/mld/link.c
index 738f80fe0c50..60d814bf5779 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/link.c
@@ -501,6 +501,7 @@ void iwl_mld_remove_link(struct iwl_mld *mld,
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_conf->vif);
struct iwl_mld_link *link = iwl_mld_link_from_mac80211(bss_conf);
bool is_deflink = link == &mld_vif->deflink;
+ u8 fw_id = link->fw_id;
if (WARN_ON(!link || link->active))
return;
@@ -513,10 +514,10 @@ void iwl_mld_remove_link(struct iwl_mld *mld,
RCU_INIT_POINTER(mld_vif->link[bss_conf->link_id], NULL);
- if (WARN_ON(link->fw_id >= mld->fw->ucode_capa.num_links))
+ if (WARN_ON(fw_id >= mld->fw->ucode_capa.num_links))
return;
- RCU_INIT_POINTER(mld->fw_id_to_bss_conf[link->fw_id], NULL);
+ RCU_INIT_POINTER(mld->fw_id_to_bss_conf[fw_id], NULL);
}
void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c916176bd9f0..72fb675a696f 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1042,7 +1042,7 @@ static blk_status_t nvme_map_data(struct request *req)
return nvme_pci_setup_data_prp(req, &iter);
}
-static blk_status_t nvme_pci_setup_meta_sgls(struct request *req)
+static blk_status_t nvme_pci_setup_meta_iter(struct request *req)
{
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
unsigned int entries = req->nr_integrity_segments;
@@ -1072,8 +1072,12 @@ static blk_status_t nvme_pci_setup_meta_sgls(struct request *req)
* descriptor provides an explicit length, so we're relying on that
* mechanism to catch any misunderstandings between the application and
* device.
+ *
+ * P2P DMA also needs to use the blk_dma_iter method, so mptr setup
+ * leverages this routine when that happens.
*/
- if (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD)) {
+ if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl) ||
+ (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD))) {
iod->cmd.common.metadata = cpu_to_le64(iter.addr);
iod->meta_total_len = iter.len;
iod->meta_dma = iter.addr;
@@ -1114,6 +1118,9 @@ static blk_status_t nvme_pci_setup_meta_mptr(struct request *req)
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct bio_vec bv = rq_integrity_vec(req);
+ if (is_pci_p2pdma_page(bv.bv_page))
+ return nvme_pci_setup_meta_iter(req);
+
iod->meta_dma = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0);
if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma))
return BLK_STS_IOERR;
@@ -1128,7 +1135,7 @@ static blk_status_t nvme_map_metadata(struct request *req)
if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) &&
nvme_pci_metadata_use_sgls(req))
- return nvme_pci_setup_meta_sgls(req);
+ return nvme_pci_setup_meta_iter(req);
return nvme_pci_setup_meta_mptr(req);
}
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index b340380f3892..ceba21684e82 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -298,7 +298,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
const char *hash_name;
u8 *challenge = req->sq->dhchap_c1;
struct nvme_dhchap_key *transformed_key;
- u8 buf[4];
+ u8 buf[4], sc_c = ctrl->concat ? 1 : 0;
int ret;
hash_name = nvme_auth_hmac_name(ctrl->shash_id);
@@ -367,13 +367,14 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
ret = crypto_shash_update(shash, buf, 2);
if (ret)
goto out;
- memset(buf, 0, 4);
+ *buf = sc_c;
ret = crypto_shash_update(shash, buf, 1);
if (ret)
goto out;
ret = crypto_shash_update(shash, "HostHost", 8);
if (ret)
goto out;
+ memset(buf, 0, 4);
ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));
if (ret)
goto out;
diff --git a/drivers/nvmem/rcar-efuse.c b/drivers/nvmem/rcar-efuse.c
index f24bdb9cb5a7..d9a96a1d59c8 100644
--- a/drivers/nvmem/rcar-efuse.c
+++ b/drivers/nvmem/rcar-efuse.c
@@ -127,6 +127,7 @@ static const struct of_device_id rcar_fuse_match[] = {
{ .compatible = "renesas,r8a779h0-otp", .data = &rcar_fuse_v4m },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, rcar_fuse_match);
static struct platform_driver rcar_fuse_driver = {
.probe = rcar_fuse_probe,
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 65c3c23255b7..1cd93549d093 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -671,6 +671,36 @@ err:
}
}
+static int of_check_msi_parent(struct device_node *dev_node, struct device_node **msi_node)
+{
+ struct of_phandle_args msi_spec;
+ int ret;
+
+ /*
+ * An msi-parent phandle with a missing or == 0 #msi-cells
+ * property identifies a 1:1 ID translation mapping.
+ *
+ * Set the msi controller node if the firmware matches this
+ * condition.
+ */
+ ret = of_parse_phandle_with_optional_args(dev_node, "msi-parent", "#msi-cells",
+ 0, &msi_spec);
+ if (ret)
+ return ret;
+
+ if ((*msi_node && *msi_node != msi_spec.np) || msi_spec.args_count != 0)
+ ret = -EINVAL;
+
+ if (!ret) {
+ /* Return with a node reference held */
+ *msi_node = msi_spec.np;
+ return 0;
+ }
+ of_node_put(msi_spec.np);
+
+ return ret;
+}
+
/**
* of_msi_xlate - map a MSI ID and find relevant MSI controller node
* @dev: device for which the mapping is to be done.
@@ -678,7 +708,7 @@ err:
* @id_in: Device ID.
*
* Walk up the device hierarchy looking for devices with a "msi-map"
- * property. If found, apply the mapping to @id_in.
+ * or "msi-parent" property. If found, apply the mapping to @id_in.
* If @msi_np points to a non-NULL device node pointer, only entries targeting
* that node will be matched; if it points to a NULL value, it will receive the
* device node of the first matching target phandle, with a reference held.
@@ -692,14 +722,18 @@ u32 of_msi_xlate(struct device *dev, struct device_node **msi_np, u32 id_in)
/*
* Walk up the device parent links looking for one with a
- * "msi-map" property.
+ * "msi-map" or an "msi-parent" property.
*/
- for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent)
+ for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent) {
if (!of_map_id(parent_dev->of_node, id_in, "msi-map",
"msi-map-mask", msi_np, &id_out))
break;
+ if (!of_check_msi_parent(parent_dev->of_node, msi_np))
+ break;
+ }
return id_out;
}
+EXPORT_SYMBOL_GPL(of_msi_xlate);
/**
* of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain
@@ -741,8 +775,10 @@ struct irq_domain *of_msi_get_domain(struct device *dev,
of_for_each_phandle(&it, err, np, "msi-parent", "#msi-cells", 0) {
d = irq_find_matching_host(it.node, token);
- if (d)
+ if (d) {
+ of_node_put(it.node);
return d;
+ }
}
return NULL;
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 20c9333bcb1c..e92513c5bda5 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -23,6 +23,7 @@
#include "pcie-designware.h"
static struct pci_ops dw_pcie_ops;
+static struct pci_ops dw_pcie_ecam_ops;
static struct pci_ops dw_child_pcie_ops;
#define DW_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
@@ -471,9 +472,6 @@ static int dw_pcie_create_ecam_window(struct dw_pcie_rp *pp, struct resource *re
if (IS_ERR(pp->cfg))
return PTR_ERR(pp->cfg);
- pci->dbi_base = pp->cfg->win;
- pci->dbi_phys_addr = res->start;
-
return 0;
}
@@ -529,7 +527,7 @@ static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)
if (ret)
return ret;
- pp->bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops;
+ pp->bridge->ops = &dw_pcie_ecam_ops;
pp->bridge->sysdata = pp->cfg;
pp->cfg->priv = pp;
} else {
@@ -842,12 +840,34 @@ void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
}
EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
+static void __iomem *dw_pcie_ecam_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct dw_pcie_rp *pp = cfg->priv;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ unsigned int busn = bus->number;
+
+ if (busn > 0)
+ return pci_ecam_map_bus(bus, devfn, where);
+
+ if (PCI_SLOT(devfn) > 0)
+ return NULL;
+
+ return pci->dbi_base + where;
+}
+
static struct pci_ops dw_pcie_ops = {
.map_bus = dw_pcie_own_conf_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
};
+static struct pci_ops dw_pcie_ecam_ops = {
+ .map_bus = dw_pcie_ecam_conf_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 805edbbfe7eb..c48a20602d7f 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -55,7 +55,6 @@
#define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8
#define PARF_Q2A_FLUSH 0x1ac
#define PARF_LTSSM 0x1b0
-#define PARF_SLV_DBI_ELBI 0x1b4
#define PARF_INT_ALL_STATUS 0x224
#define PARF_INT_ALL_CLEAR 0x228
#define PARF_INT_ALL_MASK 0x22c
@@ -65,16 +64,6 @@
#define PARF_DBI_BASE_ADDR_V2_HI 0x354
#define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358
#define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c
-#define PARF_BLOCK_SLV_AXI_WR_BASE 0x360
-#define PARF_BLOCK_SLV_AXI_WR_BASE_HI 0x364
-#define PARF_BLOCK_SLV_AXI_WR_LIMIT 0x368
-#define PARF_BLOCK_SLV_AXI_WR_LIMIT_HI 0x36c
-#define PARF_BLOCK_SLV_AXI_RD_BASE 0x370
-#define PARF_BLOCK_SLV_AXI_RD_BASE_HI 0x374
-#define PARF_BLOCK_SLV_AXI_RD_LIMIT 0x378
-#define PARF_BLOCK_SLV_AXI_RD_LIMIT_HI 0x37c
-#define PARF_ECAM_BASE 0x380
-#define PARF_ECAM_BASE_HI 0x384
#define PARF_NO_SNOOP_OVERRIDE 0x3d4
#define PARF_ATU_BASE_ADDR 0x634
#define PARF_ATU_BASE_ADDR_HI 0x638
@@ -98,7 +87,6 @@
/* PARF_SYS_CTRL register fields */
#define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29)
-#define PCIE_ECAM_BLOCKER_EN BIT(26)
#define MST_WAKEUP_EN BIT(13)
#define SLV_WAKEUP_EN BIT(12)
#define MSTR_ACLK_CGC_DIS BIT(10)
@@ -146,9 +134,6 @@
/* PARF_LTSSM register fields */
#define LTSSM_EN BIT(8)
-/* PARF_SLV_DBI_ELBI */
-#define SLV_DBI_ELBI_ADDR_BASE GENMASK(11, 0)
-
/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
#define PARF_INT_ALL_LINK_UP BIT(13)
#define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23)
@@ -262,6 +247,7 @@ struct qcom_pcie_ops {
int (*get_resources)(struct qcom_pcie *pcie);
int (*init)(struct qcom_pcie *pcie);
int (*post_init)(struct qcom_pcie *pcie);
+ void (*host_post_init)(struct qcom_pcie *pcie);
void (*deinit)(struct qcom_pcie *pcie);
void (*ltssm_enable)(struct qcom_pcie *pcie);
int (*config_sid)(struct qcom_pcie *pcie);
@@ -326,47 +312,6 @@ static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
qcom_perst_assert(pcie, false);
}
-static void qcom_pci_config_ecam(struct dw_pcie_rp *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct qcom_pcie *pcie = to_qcom_pcie(pci);
- u64 addr, addr_end;
- u32 val;
-
- writel_relaxed(lower_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE);
- writel_relaxed(upper_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE_HI);
-
- /*
- * The only device on the root bus is a single Root Port. If we try to
- * access any devices other than Device/Function 00.0 on Bus 0, the TLP
- * will go outside of the controller to the PCI bus. But with CFG Shift
- * Feature (ECAM) enabled in iATU, there is no guarantee that the
- * response is going to be all F's. Hence, to make sure that the
- * requester gets all F's response for accesses other than the Root
- * Port, configure iATU to block the transactions starting from
- * function 1 of the root bus to the end of the root bus (i.e., from
- * dbi_base + 4KB to dbi_base + 1MB).
- */
- addr = pci->dbi_phys_addr + SZ_4K;
- writel_relaxed(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE);
- writel_relaxed(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE_HI);
-
- writel_relaxed(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE);
- writel_relaxed(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE_HI);
-
- addr_end = pci->dbi_phys_addr + SZ_1M - 1;
-
- writel_relaxed(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT);
- writel_relaxed(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT_HI);
-
- writel_relaxed(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT);
- writel_relaxed(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT_HI);
-
- val = readl_relaxed(pcie->parf + PARF_SYS_CTRL);
- val |= PCIE_ECAM_BLOCKER_EN;
- writel_relaxed(val, pcie->parf + PARF_SYS_CTRL);
-}
-
static int qcom_pcie_start_link(struct dw_pcie *pci)
{
struct qcom_pcie *pcie = to_qcom_pcie(pci);
@@ -1094,6 +1039,25 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
return 0;
}
+static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata)
+{
+ /*
+ * Downstream devices need to be in D0 state before enabling PCI PM
+ * substates.
+ */
+ pci_set_power_state_locked(pdev, PCI_D0);
+ pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
+
+ return 0;
+}
+
+static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie)
+{
+ struct dw_pcie_rp *pp = &pcie->pci->pp;
+
+ pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL);
+}
+
static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
@@ -1320,7 +1284,6 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct qcom_pcie *pcie = to_qcom_pcie(pci);
- u16 offset;
int ret;
qcom_ep_reset_assert(pcie);
@@ -1329,17 +1292,6 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
if (ret)
return ret;
- if (pp->ecam_enabled) {
- /*
- * Override ELBI when ECAM is enabled, as when ECAM is enabled,
- * ELBI moves under the 'config' space.
- */
- offset = FIELD_GET(SLV_DBI_ELBI_ADDR_BASE, readl(pcie->parf + PARF_SLV_DBI_ELBI));
- pci->elbi_base = pci->dbi_base + offset;
-
- qcom_pci_config_ecam(pp);
- }
-
ret = qcom_pcie_phy_power_on(pcie);
if (ret)
goto err_deinit;
@@ -1380,9 +1332,19 @@ static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
pcie->cfg->ops->deinit(pcie);
}
+static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+
+ if (pcie->cfg->ops->host_post_init)
+ pcie->cfg->ops->host_post_init(pcie);
+}
+
static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
.init = qcom_pcie_host_init,
.deinit = qcom_pcie_host_deinit,
+ .post_init = qcom_pcie_host_post_init,
};
/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
@@ -1444,6 +1406,7 @@ static const struct qcom_pcie_ops ops_1_9_0 = {
.get_resources = qcom_pcie_get_resources_2_7_0,
.init = qcom_pcie_init_2_7_0,
.post_init = qcom_pcie_post_init_2_7_0,
+ .host_post_init = qcom_pcie_host_post_init_2_7_0,
.deinit = qcom_pcie_deinit_2_7_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
.config_sid = qcom_pcie_config_sid_1_9_0,
@@ -1454,6 +1417,7 @@ static const struct qcom_pcie_ops ops_1_21_0 = {
.get_resources = qcom_pcie_get_resources_2_7_0,
.init = qcom_pcie_init_2_7_0,
.post_init = qcom_pcie_post_init_2_7_0,
+ .host_post_init = qcom_pcie_host_post_init_2_7_0,
.deinit = qcom_pcie_deinit_2_7_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 7cc8281e7011..79b965158473 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -243,8 +243,7 @@ struct pcie_link_state {
/* Clock PM state */
u32 clkpm_capable:1; /* Clock PM capable? */
u32 clkpm_enabled:1; /* Current Clock PM state */
- u32 clkpm_default:1; /* Default Clock PM state by BIOS or
- override */
+ u32 clkpm_default:1; /* Default Clock PM state by BIOS */
u32 clkpm_disable:1; /* Clock PM disabled */
};
@@ -376,18 +375,6 @@ static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
pcie_set_clkpm_nocheck(link, enable);
}
-static void pcie_clkpm_override_default_link_state(struct pcie_link_state *link,
- int enabled)
-{
- struct pci_dev *pdev = link->downstream;
-
- /* For devicetree platforms, enable ClockPM by default */
- if (of_have_populated_dt() && !enabled) {
- link->clkpm_default = 1;
- pci_info(pdev, "ASPM: DT platform, enabling ClockPM\n");
- }
-}
-
static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
{
int capable = 1, enabled = 1;
@@ -410,7 +397,6 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
}
link->clkpm_enabled = enabled;
link->clkpm_default = enabled;
- pcie_clkpm_override_default_link_state(link, enabled);
link->clkpm_capable = capable;
link->clkpm_disable = blacklist ? 1 : 0;
}
@@ -811,19 +797,17 @@ static void pcie_aspm_override_default_link_state(struct pcie_link_state *link)
struct pci_dev *pdev = link->downstream;
u32 override;
- /* For devicetree platforms, enable all ASPM states by default */
+ /* For devicetree platforms, enable L0s and L1 by default */
if (of_have_populated_dt()) {
- link->aspm_default = PCIE_LINK_STATE_ASPM_ALL;
+ if (link->aspm_support & PCIE_LINK_STATE_L0S)
+ link->aspm_default |= PCIE_LINK_STATE_L0S;
+ if (link->aspm_support & PCIE_LINK_STATE_L1)
+ link->aspm_default |= PCIE_LINK_STATE_L1;
override = link->aspm_default & ~link->aspm_enabled;
if (override)
- pci_info(pdev, "ASPM: DT platform, enabling%s%s%s%s%s%s%s\n",
- FLAG(override, L0S_UP, " L0s-up"),
- FLAG(override, L0S_DW, " L0s-dw"),
- FLAG(override, L1, " L1"),
- FLAG(override, L1_1, " ASPM-L1.1"),
- FLAG(override, L1_2, " ASPM-L1.2"),
- FLAG(override, L1_1_PCIPM, " PCI-PM-L1.1"),
- FLAG(override, L1_2_PCIPM, " PCI-PM-L1.2"));
+ pci_info(pdev, "ASPM: default states%s%s\n",
+ FLAG(override, L0S, " L0s"),
+ FLAG(override, L1, " L1"));
}
}
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 4a8735b275e4..3645f392a9fd 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1604,7 +1604,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
pbus_size_io(bus, realloc_head ? 0 : additional_io_size,
additional_io_size, realloc_head);
- if (pref) {
+ if (pref && (pref->flags & IORESOURCE_PREFETCH)) {
pbus_size_mem(bus,
IORESOURCE_MEM | IORESOURCE_PREFETCH |
(pref->flags & IORESOURCE_MEM_64),
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index 4776013e0764..16a2fd9fdd9b 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -2015,6 +2015,7 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, unsigned int blk_
if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_CRSPACE) {
/* Program crspace counters to count clock cycles using "count_clock" sysfs */
attr = &pmc->block[blk_num].attr_count_clock;
+ sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.mode = 0644;
attr->dev_attr.show = mlxbf_pmc_count_clock_show;
attr->dev_attr.store = mlxbf_pmc_count_clock_store;
diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c
index 31f9643a6a3b..f417dcc9af35 100644
--- a/drivers/platform/x86/dell/alienware-wmi-wmax.c
+++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c
@@ -210,6 +210,14 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = {
.driver_data = &g_series_quirks,
},
{
+ .ident = "Dell Inc. G15 5530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5530"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+ {
.ident = "Dell Inc. G16 7630",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -1639,7 +1647,7 @@ static int wmax_wmi_probe(struct wmi_device *wdev, const void *context)
static int wmax_wmi_suspend(struct device *dev)
{
- if (awcc->hwmon)
+ if (awcc && awcc->hwmon)
awcc_hwmon_suspend(dev);
return 0;
@@ -1647,7 +1655,7 @@ static int wmax_wmi_suspend(struct device *dev)
static int wmax_wmi_resume(struct device *dev)
{
- if (awcc->hwmon)
+ if (awcc && awcc->hwmon)
awcc_hwmon_resume(dev);
return 0;
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 794ec6e71990..a5c363252986 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -2548,7 +2548,7 @@ ptp_ocp_sma_fb_init(struct ptp_ocp *bp)
for (i = 0; i < OCP_SMA_NUM; i++) {
bp->sma[i].fixed_fcn = true;
bp->sma[i].fixed_dir = true;
- bp->sma[1].dpll_prop.capabilities &=
+ bp->sma[i].dpll_prop.capabilities &=
~DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE;
}
return;
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index 022d98f3c32a..ea9c4058ee6a 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -1613,6 +1613,8 @@ static int setup_feedback_loop(struct device *dev, struct device_node *np,
step /= r1;
new[j].min = min;
+ new[j].min_sel = desc->linear_ranges[j].min_sel;
+ new[j].max_sel = desc->linear_ranges[j].max_sel;
new[j].step = step;
dev_dbg(dev, "%s: old range min %d, step %d\n",
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index cc5d05dc395c..17173239301e 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -611,8 +611,9 @@ int scsi_host_busy(struct Scsi_Host *shost)
{
int cnt = 0;
- blk_mq_tagset_busy_iter(&shost->tag_set,
- scsi_host_check_in_flight, &cnt);
+ if (shost->tag_set.ops)
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ scsi_host_check_in_flight, &cnt);
return cnt;
}
EXPORT_SYMBOL(scsi_host_busy);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 16d0f02af1e4..31d08c115521 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -503,7 +503,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
host_bcode = FC_ERROR;
goto err;
}
- if (offset + len > fsp->data_len) {
+ if (size_add(offset, len) > fsp->data_len) {
/* this should never happen */
if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
fc_frame_crc_check(fp))
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index a761c0aa5127..83ff66f954e6 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -4104,7 +4104,7 @@ void qla4xxx_srb_compl(struct kref *ref)
* The mid-level driver tries to ensure that queuecommand never gets
* invoked concurrently with itself or the interrupt handler (although
* the interrupt handler may call this routine as part of request-
- * completion handling). Unfortunely, it sometimes calls the scheduler
+ * completion handling). Unfortunately, it sometimes calls the scheduler
* in interrupt context which is a big NO! NO!.
**/
static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
@@ -4647,7 +4647,7 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
cmd = scsi_host_find_tag(ha->host, index);
/*
* We cannot just check if the index is valid,
- * becase if we are run from the scsi eh, then
+ * because if we are run from the scsi eh, then
* the scsi/block layer is going to prevent
* the tag from being released.
*/
@@ -4952,7 +4952,7 @@ recover_ha_init_adapter:
/* Upon successful firmware/chip reset, re-initialize the adapter */
if (status == QLA_SUCCESS) {
/* For ISP-4xxx, force function 1 to always initialize
- * before function 3 to prevent both funcions from
+ * before function 3 to prevent both functions from
* stepping on top of the other */
if (is_qla40XX(ha) && (ha->mac_index == 3))
ssleep(6);
@@ -6914,7 +6914,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry = NULL;
/* Create session object, with INVALID_ENTRY,
- * the targer_id would get set when we issue the login
+ * the target_id would get set when we issue the login
*/
cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
cmds_max, sizeof(struct ddb_entry),
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 746ff6a1f309..1c13812a3f03 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -554,9 +554,9 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
* happened, even if someone else gets the sense data.
*/
if (sshdr.asc == 0x28)
- scmd->device->ua_new_media_ctr++;
+ atomic_inc(&sdev->ua_new_media_ctr);
else if (sshdr.asc == 0x29)
- scmd->device->ua_por_ctr++;
+ atomic_inc(&sdev->ua_por_ctr);
}
if (scsi_sense_is_deferred(&sshdr))
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 567f9cd29102..6e4112143c76 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1406,14 +1406,19 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
}
/*
- * Our channel array is sparsley populated and we
+ * Our channel array could be sparsley populated and we
* initiated I/O on a processor/hw-q that does not
* currently have a designated channel. Fix this.
* The strategy is simple:
- * I. Ensure NUMA locality
- * II. Distribute evenly (best effort)
+ * I. Prefer the channel associated with the current CPU
+ * II. Ensure NUMA locality
+ * III. Distribute evenly (best effort)
*/
+ /* Prefer the channel on the I/O issuing processor/hw-q */
+ if (cpumask_test_cpu(q_num, &stor_device->alloced_cpus))
+ return stor_device->stor_chns[q_num];
+
node_mask = cpumask_of_node(cpu_to_node(q_num));
num_channels = 0;
@@ -1469,59 +1474,48 @@ static int storvsc_do_io(struct hv_device *device,
/* See storvsc_change_target_cpu(). */
outgoing_channel = READ_ONCE(stor_device->stor_chns[q_num]);
if (outgoing_channel != NULL) {
- if (outgoing_channel->target_cpu == q_num) {
- /*
- * Ideally, we want to pick a different channel if
- * available on the same NUMA node.
- */
- node_mask = cpumask_of_node(cpu_to_node(q_num));
- for_each_cpu_wrap(tgt_cpu,
- &stor_device->alloced_cpus, q_num + 1) {
- if (!cpumask_test_cpu(tgt_cpu, node_mask))
- continue;
- if (tgt_cpu == q_num)
- continue;
- channel = READ_ONCE(
- stor_device->stor_chns[tgt_cpu]);
- if (channel == NULL)
- continue;
- if (hv_get_avail_to_write_percent(
- &channel->outbound)
- > ring_avail_percent_lowater) {
- outgoing_channel = channel;
- goto found_channel;
- }
- }
+ if (hv_get_avail_to_write_percent(&outgoing_channel->outbound)
+ > ring_avail_percent_lowater)
+ goto found_channel;
- /*
- * All the other channels on the same NUMA node are
- * busy. Try to use the channel on the current CPU
- */
- if (hv_get_avail_to_write_percent(
- &outgoing_channel->outbound)
- > ring_avail_percent_lowater)
+ /*
+ * Channel is busy, try to find a channel on the same NUMA node
+ */
+ node_mask = cpumask_of_node(cpu_to_node(q_num));
+ for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus,
+ q_num + 1) {
+ if (!cpumask_test_cpu(tgt_cpu, node_mask))
+ continue;
+ channel = READ_ONCE(stor_device->stor_chns[tgt_cpu]);
+ if (!channel)
+ continue;
+ if (hv_get_avail_to_write_percent(&channel->outbound)
+ > ring_avail_percent_lowater) {
+ outgoing_channel = channel;
goto found_channel;
+ }
+ }
- /*
- * If we reach here, all the channels on the current
- * NUMA node are busy. Try to find a channel in
- * other NUMA nodes
- */
- for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) {
- if (cpumask_test_cpu(tgt_cpu, node_mask))
- continue;
- channel = READ_ONCE(
- stor_device->stor_chns[tgt_cpu]);
- if (channel == NULL)
- continue;
- if (hv_get_avail_to_write_percent(
- &channel->outbound)
- > ring_avail_percent_lowater) {
- outgoing_channel = channel;
- goto found_channel;
- }
+ /*
+ * If we reach here, all the channels on the current
+ * NUMA node are busy. Try to find a channel in
+ * all NUMA nodes
+ */
+ for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus,
+ q_num + 1) {
+ channel = READ_ONCE(stor_device->stor_chns[tgt_cpu]);
+ if (!channel)
+ continue;
+ if (hv_get_avail_to_write_percent(&channel->outbound)
+ > ring_avail_percent_lowater) {
+ outgoing_channel = channel;
+ goto found_channel;
}
}
+ /*
+ * If we reach here, all the channels are busy. Use the
+ * original channel found.
+ */
} else {
spin_lock_irqsave(&stor_device->lock, flags);
outgoing_channel = stor_device->stor_chns[q_num];
diff --git a/drivers/spi/spi-airoha-snfi.c b/drivers/spi/spi-airoha-snfi.c
index dbe640986825..b78163eaed61 100644
--- a/drivers/spi/spi-airoha-snfi.c
+++ b/drivers/spi/spi-airoha-snfi.c
@@ -192,6 +192,14 @@
#define SPI_NAND_OP_RESET 0xff
#define SPI_NAND_OP_DIE_SELECT 0xc2
+/* SNAND FIFO commands */
+#define SNAND_FIFO_TX_BUSWIDTH_SINGLE 0x08
+#define SNAND_FIFO_TX_BUSWIDTH_DUAL 0x09
+#define SNAND_FIFO_TX_BUSWIDTH_QUAD 0x0a
+#define SNAND_FIFO_RX_BUSWIDTH_SINGLE 0x0c
+#define SNAND_FIFO_RX_BUSWIDTH_DUAL 0x0e
+#define SNAND_FIFO_RX_BUSWIDTH_QUAD 0x0f
+
#define SPI_NAND_CACHE_SIZE (SZ_4K + SZ_256)
#define SPI_MAX_TRANSFER_SIZE 511
@@ -387,10 +395,26 @@ static int airoha_snand_set_mode(struct airoha_snand_ctrl *as_ctrl,
return regmap_write(as_ctrl->regmap_ctrl, REG_SPI_CTRL_DUMMY, 0);
}
-static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, u8 cmd,
- const u8 *data, int len)
+static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl,
+ const u8 *data, int len, int buswidth)
{
int i, data_len;
+ u8 cmd;
+
+ switch (buswidth) {
+ case 0:
+ case 1:
+ cmd = SNAND_FIFO_TX_BUSWIDTH_SINGLE;
+ break;
+ case 2:
+ cmd = SNAND_FIFO_TX_BUSWIDTH_DUAL;
+ break;
+ case 4:
+ cmd = SNAND_FIFO_TX_BUSWIDTH_QUAD;
+ break;
+ default:
+ return -EINVAL;
+ }
for (i = 0; i < len; i += data_len) {
int err;
@@ -409,16 +433,32 @@ static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, u8 cmd,
return 0;
}
-static int airoha_snand_read_data(struct airoha_snand_ctrl *as_ctrl, u8 *data,
- int len)
+static int airoha_snand_read_data(struct airoha_snand_ctrl *as_ctrl,
+ u8 *data, int len, int buswidth)
{
int i, data_len;
+ u8 cmd;
+
+ switch (buswidth) {
+ case 0:
+ case 1:
+ cmd = SNAND_FIFO_RX_BUSWIDTH_SINGLE;
+ break;
+ case 2:
+ cmd = SNAND_FIFO_RX_BUSWIDTH_DUAL;
+ break;
+ case 4:
+ cmd = SNAND_FIFO_RX_BUSWIDTH_QUAD;
+ break;
+ default:
+ return -EINVAL;
+ }
for (i = 0; i < len; i += data_len) {
int err;
data_len = min(len - i, SPI_MAX_TRANSFER_SIZE);
- err = airoha_snand_set_fifo_op(as_ctrl, 0xc, data_len);
+ err = airoha_snand_set_fifo_op(as_ctrl, cmd, data_len);
if (err)
return err;
@@ -618,6 +658,10 @@ static int airoha_snand_dirmap_create(struct spi_mem_dirmap_desc *desc)
if (desc->info.offset + desc->info.length > U32_MAX)
return -EINVAL;
+ /* continuous reading is not supported */
+ if (desc->info.length > SPI_NAND_CACHE_SIZE)
+ return -E2BIG;
+
if (!airoha_snand_supports_op(desc->mem, &desc->info.op_tmpl))
return -EOPNOTSUPP;
@@ -654,13 +698,13 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
err = airoha_snand_nfi_config(as_ctrl);
if (err)
- return err;
+ goto error_dma_mode_off;
dma_addr = dma_map_single(as_ctrl->dev, txrx_buf, SPI_NAND_CACHE_SIZE,
DMA_FROM_DEVICE);
err = dma_mapping_error(as_ctrl->dev, dma_addr);
if (err)
- return err;
+ goto error_dma_mode_off;
/* set dma addr */
err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_STRADDR,
@@ -689,8 +733,9 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
if (err)
goto error_dma_unmap;
- /* set read addr */
- err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL3, 0x0);
+ /* set read addr: zero page offset + descriptor read offset */
+ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL3,
+ desc->info.offset);
if (err)
goto error_dma_unmap;
@@ -760,6 +805,8 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
error_dma_unmap:
dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE,
DMA_FROM_DEVICE);
+error_dma_mode_off:
+ airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
return err;
}
@@ -824,7 +871,9 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc,
if (err)
goto error_dma_unmap;
- err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL2, 0x0);
+ /* set write addr: zero page offset + descriptor write offset */
+ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL2,
+ desc->info.offset);
if (err)
goto error_dma_unmap;
@@ -892,18 +941,35 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc,
error_dma_unmap:
dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE,
DMA_TO_DEVICE);
+ airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
return err;
}
static int airoha_snand_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
- u8 data[8], cmd, opcode = op->cmd.opcode;
struct airoha_snand_ctrl *as_ctrl;
+ int op_len, addr_len, dummy_len;
+ u8 buf[20], *data;
int i, err;
as_ctrl = spi_controller_get_devdata(mem->spi->controller);
+ op_len = op->cmd.nbytes;
+ addr_len = op->addr.nbytes;
+ dummy_len = op->dummy.nbytes;
+
+ if (op_len + dummy_len + addr_len > sizeof(buf))
+ return -EIO;
+
+ data = buf;
+ for (i = 0; i < op_len; i++)
+ *data++ = op->cmd.opcode >> (8 * (op_len - i - 1));
+ for (i = 0; i < addr_len; i++)
+ *data++ = op->addr.val >> (8 * (addr_len - i - 1));
+ for (i = 0; i < dummy_len; i++)
+ *data++ = 0xff;
+
/* switch to manual mode */
err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
if (err < 0)
@@ -914,40 +980,40 @@ static int airoha_snand_exec_op(struct spi_mem *mem,
return err;
/* opcode */
- err = airoha_snand_write_data(as_ctrl, 0x8, &opcode, sizeof(opcode));
+ data = buf;
+ err = airoha_snand_write_data(as_ctrl, data, op_len,
+ op->cmd.buswidth);
if (err)
return err;
/* addr part */
- cmd = opcode == SPI_NAND_OP_GET_FEATURE ? 0x11 : 0x8;
- put_unaligned_be64(op->addr.val, data);
-
- for (i = ARRAY_SIZE(data) - op->addr.nbytes;
- i < ARRAY_SIZE(data); i++) {
- err = airoha_snand_write_data(as_ctrl, cmd, &data[i],
- sizeof(data[0]));
+ data += op_len;
+ if (addr_len) {
+ err = airoha_snand_write_data(as_ctrl, data, addr_len,
+ op->addr.buswidth);
if (err)
return err;
}
/* dummy */
- data[0] = 0xff;
- for (i = 0; i < op->dummy.nbytes; i++) {
- err = airoha_snand_write_data(as_ctrl, 0x8, &data[0],
- sizeof(data[0]));
+ data += addr_len;
+ if (dummy_len) {
+ err = airoha_snand_write_data(as_ctrl, data, dummy_len,
+ op->dummy.buswidth);
if (err)
return err;
}
/* data */
- if (op->data.dir == SPI_MEM_DATA_IN) {
- err = airoha_snand_read_data(as_ctrl, op->data.buf.in,
- op->data.nbytes);
- if (err)
- return err;
- } else {
- err = airoha_snand_write_data(as_ctrl, 0x8, op->data.buf.out,
- op->data.nbytes);
+ if (op->data.nbytes) {
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ err = airoha_snand_read_data(as_ctrl, op->data.buf.in,
+ op->data.nbytes,
+ op->data.buswidth);
+ else
+ err = airoha_snand_write_data(as_ctrl, op->data.buf.out,
+ op->data.nbytes,
+ op->data.buswidth);
if (err)
return err;
}
diff --git a/drivers/spi/spi-amlogic-spifc-a4.c b/drivers/spi/spi-amlogic-spifc-a4.c
index 4338d00e56a6..35a7c4965e11 100644
--- a/drivers/spi/spi-amlogic-spifc-a4.c
+++ b/drivers/spi/spi-amlogic-spifc-a4.c
@@ -286,7 +286,7 @@ static int aml_sfc_set_bus_width(struct aml_sfc *sfc, u8 buswidth, u32 mask)
for (i = 0; i <= LANE_MAX; i++) {
if (buswidth == 1 << i) {
- conf = i << __bf_shf(mask);
+ conf = i << __ffs(mask);
return regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG,
mask, conf);
}
@@ -566,7 +566,7 @@ static int aml_sfc_raw_io_op(struct aml_sfc *sfc, const struct spi_mem_op *op)
if (!op->data.nbytes)
goto end_xfer;
- conf = (op->data.nbytes >> RAW_SIZE_BW) << __bf_shf(RAW_EXT_SIZE);
+ conf = (op->data.nbytes >> RAW_SIZE_BW) << __ffs(RAW_EXT_SIZE);
ret = regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG, RAW_EXT_SIZE, conf);
if (ret)
goto err_out;
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 8fb13df8ff87..81017402bc56 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1995,7 +1995,7 @@ static int cqspi_probe(struct platform_device *pdev)
if (cqspi->use_direct_mode) {
ret = cqspi_request_mmap_dma(cqspi);
if (ret == -EPROBE_DEFER)
- goto probe_setup_failed;
+ goto probe_dma_failed;
}
if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) {
@@ -2019,9 +2019,10 @@ static int cqspi_probe(struct platform_device *pdev)
return 0;
probe_setup_failed:
- cqspi_controller_enable(cqspi, 0);
if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM)))
pm_runtime_disable(dev);
+probe_dma_failed:
+ cqspi_controller_enable(cqspi, 0);
probe_reset_failed:
if (cqspi->is_jh7110)
cqspi_jh7110_disable_clk(pdev, cqspi);
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index f0f576fac77a..7a5197586919 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -358,7 +358,9 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
if (IS_ERR(dwsmmio->rstc))
return PTR_ERR(dwsmmio->rstc);
- reset_control_deassert(dwsmmio->rstc);
+ ret = reset_control_deassert(dwsmmio->rstc);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to deassert resets\n");
dws->bus_num = pdev->id;
diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
index 4b63cb98df9c..b8c572394aac 100644
--- a/drivers/spi/spi-intel-pci.c
+++ b/drivers/spi/spi-intel-pci.c
@@ -75,10 +75,13 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x38a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x4d23), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x5794), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x5825), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x7723), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info },
diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
index 13bbb2133507..1775ad39e633 100644
--- a/drivers/spi/spi-intel.c
+++ b/drivers/spi/spi-intel.c
@@ -132,6 +132,7 @@
#define FLCOMP_C0DEN_16M 0x05
#define FLCOMP_C0DEN_32M 0x06
#define FLCOMP_C0DEN_64M 0x07
+#define FLCOMP_C0DEN_128M 0x08
#define INTEL_SPI_TIMEOUT 5000 /* ms */
#define INTEL_SPI_FIFO_SZ 64
@@ -1347,7 +1348,12 @@ static int intel_spi_read_desc(struct intel_spi *ispi)
case FLCOMP_C0DEN_64M:
ispi->chip0_size = SZ_64M;
break;
+ case FLCOMP_C0DEN_128M:
+ ispi->chip0_size = SZ_128M;
+ break;
default:
+ dev_warn(ispi->dev, "unsupported C0DEN: %#lx\n",
+ flcomp & FLCOMP_C0DEN_MASK);
return -EINVAL;
}
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index f9371f98a65b..b6c79e50d842 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -404,6 +404,10 @@ struct nxp_fspi {
#define FSPI_NEED_INIT BIT(0)
#define FSPI_DTR_MODE BIT(1)
int flags;
+ /* save the previous operation clock rate */
+ unsigned long pre_op_rate;
+ /* the max clock rate fspi output to device */
+ unsigned long max_rate;
};
static inline int needs_ip_only(struct nxp_fspi *f)
@@ -685,10 +689,13 @@ static void nxp_fspi_select_rx_sample_clk_source(struct nxp_fspi *f,
* change the mode back to mode 0.
*/
reg = fspi_readl(f, f->iobase + FSPI_MCR0);
- if (op_is_dtr)
+ if (op_is_dtr) {
reg |= FSPI_MCR0_RXCLKSRC(3);
- else /*select mode 0 */
+ f->max_rate = 166000000;
+ } else { /*select mode 0 */
reg &= ~FSPI_MCR0_RXCLKSRC(3);
+ f->max_rate = 66000000;
+ }
fspi_writel(f, reg, f->iobase + FSPI_MCR0);
}
@@ -719,6 +726,12 @@ static void nxp_fspi_dll_calibration(struct nxp_fspi *f)
0, POLL_TOUT, true);
if (ret)
dev_warn(f->dev, "DLL lock failed, please fix it!\n");
+
+ /*
+ * For ERR050272, DLL lock status bit is not accurate,
+ * wait for 4us more as a workaround.
+ */
+ udelay(4);
}
/*
@@ -780,11 +793,17 @@ static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi,
uint64_t size_kb;
/*
- * Return, if previously selected target device is same as current
- * requested target device. Also the DTR or STR mode do not change.
+ * Return when following condition all meet,
+ * 1, if previously selected target device is same as current
+ * requested target device.
+ * 2, the DTR or STR mode do not change.
+ * 3, previous operation max rate equals current one.
+ *
+ * For other case, need to re-config.
*/
if ((f->selected == spi_get_chipselect(spi, 0)) &&
- (!!(f->flags & FSPI_DTR_MODE) == op_is_dtr))
+ (!!(f->flags & FSPI_DTR_MODE) == op_is_dtr) &&
+ (f->pre_op_rate == op->max_freq))
return;
/* Reset FLSHxxCR0 registers */
@@ -802,6 +821,7 @@ static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi,
dev_dbg(f->dev, "Target device [CS:%x] selected\n", spi_get_chipselect(spi, 0));
nxp_fspi_select_rx_sample_clk_source(f, op_is_dtr);
+ rate = min(f->max_rate, op->max_freq);
if (op_is_dtr) {
f->flags |= FSPI_DTR_MODE;
@@ -832,6 +852,8 @@ static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi,
else
nxp_fspi_dll_override(f);
+ f->pre_op_rate = op->max_freq;
+
f->selected = spi_get_chipselect(spi, 0);
}
diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c
index 9eba5c0a60f2..b3c2b03b1153 100644
--- a/drivers/spi/spi-rockchip-sfc.c
+++ b/drivers/spi/spi-rockchip-sfc.c
@@ -704,7 +704,12 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto err_dma;
}
- sfc->dma_buffer = virt_to_phys(sfc->buffer);
+ sfc->dma_buffer = dma_map_single(dev, sfc->buffer,
+ sfc->max_iosize, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, sfc->dma_buffer)) {
+ ret = -ENOMEM;
+ goto err_dma_map;
+ }
}
ret = devm_spi_register_controller(dev, host);
@@ -715,6 +720,9 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
return 0;
err_register:
+ dma_unmap_single(dev, sfc->dma_buffer, sfc->max_iosize,
+ DMA_BIDIRECTIONAL);
+err_dma_map:
free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize));
err_dma:
pm_runtime_get_sync(dev);
@@ -736,6 +744,8 @@ static void rockchip_sfc_remove(struct platform_device *pdev)
struct spi_controller *host = sfc->host;
spi_unregister_controller(host);
+ dma_unmap_single(&pdev->dev, sfc->dma_buffer, sfc->max_iosize,
+ DMA_BIDIRECTIONAL);
free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize));
clk_disable_unprepare(sfc->clk);
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
index 94bbb3b6576d..01a5bb43cd2d 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
+++ b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
@@ -182,10 +182,12 @@ static int agilent_82350b_accel_write(struct gpib_board *board, u8 *buffer,
return retval;
#endif
- retval = agilent_82350b_write(board, buffer, 1, 0, &num_bytes);
- *bytes_written += num_bytes;
- if (retval < 0)
- return retval;
+ if (fifotransferlength > 0) {
+ retval = agilent_82350b_write(board, buffer, 1, 0, &num_bytes);
+ *bytes_written += num_bytes;
+ if (retval < 0)
+ return retval;
+ }
write_byte(tms_priv, tms_priv->imr0_bits & ~HR_BOIE, IMR0);
for (i = 1; i < fifotransferlength;) {
@@ -217,7 +219,7 @@ static int agilent_82350b_accel_write(struct gpib_board *board, u8 *buffer,
break;
}
write_byte(tms_priv, tms_priv->imr0_bits, IMR0);
- if (retval)
+ if (retval < 0)
return retval;
if (send_eoi) {
diff --git a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
index 164dcfc3c9ef..f7bfb4a8e553 100644
--- a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
+++ b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
@@ -1517,6 +1517,11 @@ void fmh_gpib_detach(struct gpib_board *board)
resource_size(e_priv->gpib_iomem_res));
}
fmh_gpib_generic_detach(board);
+
+ if (board->dev) {
+ put_device(board->dev);
+ board->dev = NULL;
+ }
}
static int fmh_gpib_pci_attach_impl(struct gpib_board *board,
diff --git a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c b/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
index 4dec87d12687..1f8412de9fa3 100644
--- a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
+++ b/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
@@ -327,7 +327,10 @@ static void ni_usb_soft_update_status(struct gpib_board *board, unsigned int ni_
board->status &= ~clear_mask;
board->status &= ~ni_usb_ibsta_mask;
board->status |= ni_usb_ibsta & ni_usb_ibsta_mask;
- // FIXME should generate events on DTAS and DCAS
+ if (ni_usb_ibsta & DCAS)
+ push_gpib_event(board, EVENT_DEV_CLR);
+ if (ni_usb_ibsta & DTAS)
+ push_gpib_event(board, EVENT_DEV_TRG);
spin_lock_irqsave(&board->spinlock, flags);
/* remove set status bits from monitored set why ?***/
@@ -694,8 +697,12 @@ static int ni_usb_read(struct gpib_board *board, u8 *buffer, size_t length,
*/
break;
case NIUSB_ATN_STATE_ERROR:
- retval = -EIO;
- dev_err(&usb_dev->dev, "read when ATN set\n");
+ if (status.ibsta & DCAS) {
+ retval = -EINTR;
+ } else {
+ retval = -EIO;
+ dev_dbg(&usb_dev->dev, "read when ATN set stat: 0x%06x\n", status.ibsta);
+ }
break;
case NIUSB_ADDRESSING_ERROR:
retval = -EIO;
diff --git a/drivers/tee/qcomtee/Kconfig b/drivers/tee/qcomtee/Kconfig
index 927686abceb1..9f19dee08db4 100644
--- a/drivers/tee/qcomtee/Kconfig
+++ b/drivers/tee/qcomtee/Kconfig
@@ -2,6 +2,7 @@
# Qualcomm Trusted Execution Environment Configuration
config QCOMTEE
tristate "Qualcomm TEE Support"
+ depends on ARCH_QCOM || COMPILE_TEST
depends on !CPU_BIG_ENDIAN
select QCOM_SCM
select QCOM_TZMEM_MODE_SHMBRIDGE
diff --git a/drivers/tee/qcomtee/call.c b/drivers/tee/qcomtee/call.c
index cc17a48d0ab7..ac134452cc9c 100644
--- a/drivers/tee/qcomtee/call.c
+++ b/drivers/tee/qcomtee/call.c
@@ -308,7 +308,7 @@ out_failed:
}
/* Release any IO and OO objects not processed. */
- for (; u[i].type && i < num_params; i++) {
+ for (; i < num_params && u[i].type; i++) {
if (u[i].type == QCOMTEE_ARG_TYPE_OO ||
u[i].type == QCOMTEE_ARG_TYPE_IO)
qcomtee_object_put(u[i].o);
diff --git a/drivers/tee/qcomtee/core.c b/drivers/tee/qcomtee/core.c
index 783acc59cfa9..b6715ada7700 100644
--- a/drivers/tee/qcomtee/core.c
+++ b/drivers/tee/qcomtee/core.c
@@ -424,7 +424,7 @@ static int qcomtee_prepare_msg(struct qcomtee_object_invoke_ctx *oic,
if (!(u[i].flags & QCOMTEE_ARG_FLAGS_UADDR))
memcpy(msgptr, u[i].b.addr, u[i].b.size);
else if (copy_from_user(msgptr, u[i].b.uaddr, u[i].b.size))
- return -EINVAL;
+ return -EFAULT;
offset += qcomtee_msg_offset_align(u[i].b.size);
ib++;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index a53ba04d9770..710ae4d40aec 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -635,7 +635,9 @@ static int dw8250_probe(struct platform_device *pdev)
if (IS_ERR(data->rst))
return PTR_ERR(data->rst);
- reset_control_deassert(data->rst);
+ err = reset_control_deassert(data->rst);
+ if (err)
+ return dev_err_probe(dev, err, "failed to deassert resets\n");
err = devm_add_action_or_reset(dev, dw8250_reset_control_assert, data->rst);
if (err)
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 04a0cbab02c2..b9cc0b786ca6 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -40,6 +40,8 @@
#define PCI_DEVICE_ID_ACCESSIO_COM_4SM 0x10db
#define PCI_DEVICE_ID_ACCESSIO_COM_8SM 0x10ea
+#define PCI_DEVICE_ID_ADVANTECH_XR17V352 0x0018
+
#define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002
#define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004
#define PCI_DEVICE_ID_COMMTECH_2324PCI335 0x000a
@@ -1622,6 +1624,12 @@ static const struct exar8250_board pbn_fastcom35x_8 = {
.exit = pci_xr17v35x_exit,
};
+static const struct exar8250_board pbn_adv_XR17V352 = {
+ .num_ports = 2,
+ .setup = pci_xr17v35x_setup,
+ .exit = pci_xr17v35x_exit,
+};
+
static const struct exar8250_board pbn_exar_XR17V4358 = {
.num_ports = 12,
.setup = pci_xr17v35x_setup,
@@ -1696,6 +1704,9 @@ static const struct pci_device_id exar_pci_tbl[] = {
USR_DEVICE(XR17C152, 2980, pbn_exar_XR17C15x),
USR_DEVICE(XR17C152, 2981, pbn_exar_XR17C15x),
+ /* ADVANTECH devices */
+ EXAR_DEVICE(ADVANTECH, XR17V352, pbn_adv_XR17V352),
+
/* Exar Corp. XR17C15[248] Dual/Quad/Octal UART */
EXAR_DEVICE(EXAR, XR17C152, pbn_exar_XR17C15x),
EXAR_DEVICE(EXAR, XR17C154, pbn_exar_XR17C15x),
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index b44de2ed7413..5875a7b9b4b1 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -435,6 +435,7 @@ static int __maybe_unused mtk8250_runtime_suspend(struct device *dev)
while
(serial_in(up, MTK_UART_DEBUG0));
+ clk_disable_unprepare(data->uart_clk);
clk_disable_unprepare(data->bus_clk);
return 0;
@@ -445,6 +446,7 @@ static int __maybe_unused mtk8250_runtime_resume(struct device *dev)
struct mtk8250_data *data = dev_get_drvdata(dev);
clk_prepare_enable(data->bus_clk);
+ clk_prepare_enable(data->uart_clk);
return 0;
}
@@ -475,13 +477,13 @@ static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p,
int dmacnt;
#endif
- data->uart_clk = devm_clk_get(&pdev->dev, "baud");
+ data->uart_clk = devm_clk_get_enabled(&pdev->dev, "baud");
if (IS_ERR(data->uart_clk)) {
/*
* For compatibility with older device trees try unnamed
* clk when no baud clk can be found.
*/
- data->uart_clk = devm_clk_get(&pdev->dev, NULL);
+ data->uart_clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(data->uart_clk)) {
dev_warn(&pdev->dev, "Can't get uart clock\n");
return PTR_ERR(data->uart_clk);
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 1a2c4c14f6aa..c7435595dce1 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -588,13 +588,6 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
div /= prescaler;
}
- /* Enable enhanced features */
- sc16is7xx_efr_lock(port);
- sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
- SC16IS7XX_EFR_ENABLE_BIT,
- SC16IS7XX_EFR_ENABLE_BIT);
- sc16is7xx_efr_unlock(port);
-
/* If bit MCR_CLKSEL is set, the divide by 4 prescaler is activated. */
sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
SC16IS7XX_MCR_CLKSEL_BIT,
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 538b2f991609..62bb62b82cbe 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1014,16 +1014,18 @@ static int sci_handle_fifo_overrun(struct uart_port *port)
struct sci_port *s = to_sci_port(port);
const struct plat_sci_reg *reg;
int copied = 0;
- u16 status;
+ u32 status;
- reg = sci_getreg(port, s->params->overrun_reg);
- if (!reg->size)
- return 0;
+ if (s->type != SCI_PORT_RSCI) {
+ reg = sci_getreg(port, s->params->overrun_reg);
+ if (!reg->size)
+ return 0;
+ }
- status = sci_serial_in(port, s->params->overrun_reg);
+ status = s->ops->read_reg(port, s->params->overrun_reg);
if (status & s->params->overrun_mask) {
status &= ~s->params->overrun_mask;
- sci_serial_out(port, s->params->overrun_reg, status);
+ s->ops->write_reg(port, s->params->overrun_reg, status);
port->icount.overrun++;
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 8339fec975b9..9ca27de4767a 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -4282,8 +4282,8 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
get, UIC_GET_ATTR_ID(attr_sel),
UFS_UIC_COMMAND_RETRIES - retries);
- if (mib_val && !ret)
- *mib_val = uic_cmd.argument3;
+ if (mib_val)
+ *mib_val = ret == 0 ? uic_cmd.argument3 : 0;
if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
&& pwr_mode_change)
@@ -4999,7 +4999,7 @@ EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
{
- int tx_lanes = 0, i, err = 0;
+ int tx_lanes, i, err = 0;
if (!peer)
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
@@ -6673,6 +6673,20 @@ static void ufshcd_err_handler(struct work_struct *work)
hba->saved_uic_err, hba->force_reset,
ufshcd_is_link_broken(hba) ? "; link is broken" : "");
+ /*
+ * Use ufshcd_rpm_get_noresume() here to safely perform link recovery
+ * even if an error occurs during runtime suspend or runtime resume.
+ * This avoids potential deadlocks that could happen if we tried to
+ * resume the device while a PM operation is already in progress.
+ */
+ ufshcd_rpm_get_noresume(hba);
+ if (hba->pm_op_in_progress) {
+ ufshcd_link_recovery(hba);
+ ufshcd_rpm_put(hba);
+ return;
+ }
+ ufshcd_rpm_put(hba);
+
down(&hba->host_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
if (ufshcd_err_handling_should_stop(hba)) {
@@ -6684,14 +6698,6 @@ static void ufshcd_err_handler(struct work_struct *work)
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_rpm_get_noresume(hba);
- if (hba->pm_op_in_progress) {
- ufshcd_link_recovery(hba);
- ufshcd_rpm_put(hba);
- return;
- }
- ufshcd_rpm_put(hba);
-
ufshcd_err_handling_prepare(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f5bc53875330..47f589c4104a 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -467,6 +467,8 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Huawei 4G LTE module */
{ USB_DEVICE(0x12d1, 0x15bb), .driver_info =
USB_QUIRK_DISCONNECT_SUSPEND },
+ { USB_DEVICE(0x12d1, 0x15c1), .driver_info =
+ USB_QUIRK_DISCONNECT_SUSPEND },
{ USB_DEVICE(0x12d1, 0x15c3), .driver_info =
USB_QUIRK_DISCONNECT_SUSPEND },
diff --git a/drivers/usb/dwc3/dwc3-generic-plat.c b/drivers/usb/dwc3/dwc3-generic-plat.c
index d96b20570002..f8ad79c08c4e 100644
--- a/drivers/usb/dwc3/dwc3-generic-plat.c
+++ b/drivers/usb/dwc3/dwc3-generic-plat.c
@@ -85,11 +85,8 @@ static int dwc3_generic_probe(struct platform_device *pdev)
static void dwc3_generic_remove(struct platform_device *pdev)
{
struct dwc3 *dwc = platform_get_drvdata(pdev);
- struct dwc3_generic *dwc3g = to_dwc3_generic(dwc);
dwc3_core_remove(dwc);
-
- clk_bulk_disable_unprepare(dwc3g->num_clocks, dwc3g->clks);
}
static int dwc3_generic_suspend(struct device *dev)
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index 20165e1582d9..b71680c58de6 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -667,8 +667,6 @@ static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr,
return ERR_PTR(-EINVAL);
if (!usb_raw_io_flags_valid(io->flags))
return ERR_PTR(-EINVAL);
- if (io->length > PAGE_SIZE)
- return ERR_PTR(-EINVAL);
if (get_from_user)
data = memdup_user(ptr + sizeof(*io), io->length);
else {
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index 63edf2d8f245..ecda964e018a 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -892,7 +892,8 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
dev_info(dbc->dev, "DbC configured\n");
portsc = readl(&dbc->regs->portsc);
writel(portsc, &dbc->regs->portsc);
- return EVT_GSER;
+ ret = EVT_GSER;
+ break;
}
return EVT_DONE;
@@ -954,7 +955,8 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
break;
case TRB_TYPE(TRB_TRANSFER):
dbc_handle_xfer_event(dbc, evt);
- ret = EVT_XFER_DONE;
+ if (ret != EVT_GSER)
+ ret = EVT_XFER_DONE;
break;
default:
break;
@@ -1390,8 +1392,15 @@ int xhci_dbc_suspend(struct xhci_hcd *xhci)
if (!dbc)
return 0;
- if (dbc->state == DS_CONFIGURED)
+ switch (dbc->state) {
+ case DS_ENABLED:
+ case DS_CONNECTED:
+ case DS_CONFIGURED:
dbc->resume_required = 1;
+ break;
+ default:
+ break;
+ }
xhci_dbc_stop(dbc);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 5c8ab519f497..f67a4d956204 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -582,6 +582,8 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
if (!usb_hcd_is_primary_hcd(hcd))
return 0;
+ xhci->allow_single_roothub = 1;
+
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
xhci_pme_acpi_rtd3_enable(pdev);
@@ -637,7 +639,6 @@ int xhci_pci_common_probe(struct pci_dev *dev, const struct pci_device_id *id)
xhci = hcd_to_xhci(hcd);
xhci->reset = reset;
- xhci->allow_single_roothub = 1;
if (!xhci_has_one_roothub(xhci)) {
xhci->shared_hcd = usb_create_shared_hcd(&xhci_pci_hc_driver, &dev->dev,
pci_name(dev), hcd);
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 09ac6f1c985f..0b56b773dbdf 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -182,6 +182,7 @@ config USB_LJCA
config USB_USBIO
tristate "Intel USBIO Bridge support"
depends on USB && ACPI
+ depends on X86 || COMPILE_TEST
select AUXILIARY_BUS
help
This adds support for Intel USBIO drivers.
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 62e984d20e59..5de856f65f0d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -273,6 +273,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EM05CN 0x0312
#define QUECTEL_PRODUCT_EM05G_GR 0x0313
#define QUECTEL_PRODUCT_EM05G_RS 0x0314
+#define QUECTEL_PRODUCT_RG255C 0x0316
#define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800
#define QUECTEL_PRODUCT_RM520N 0x0801
@@ -617,6 +618,7 @@ static void option_instat_callback(struct urb *urb);
#define UNISOC_VENDOR_ID 0x1782
/* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */
#define TOZED_PRODUCT_LT70C 0x4055
+#define UNISOC_PRODUCT_UIS7720 0x4064
/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
#define LUAT_PRODUCT_AIR720U 0x4e00
@@ -1270,6 +1272,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0xff, 0x40) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
@@ -1398,10 +1403,14 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a2, 0xff), /* Telit FN920C04 (MBIM) */
.driver_info = NCTRL(4) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a3, 0xff), /* Telit FN920C04 (ECM) */
+ .driver_info = NCTRL(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a7, 0xff), /* Telit FN920C04 (MBIM) */
.driver_info = NCTRL(4) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a8, 0xff), /* Telit FN920C04 (ECM) */
+ .driver_info = NCTRL(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff), /* Telit FN920C04 (MBIM) */
@@ -2466,6 +2475,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, UNISOC_PRODUCT_UIS7720, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff), /* TCL IK512 MBIM */
.driver_info = NCTRL(1) },
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index b2a568a5bc9b..cc78770509db 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -7876,9 +7876,9 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
port->partner_desc.identity = &port->partner_ident;
- port->role_sw = usb_role_switch_get(port->dev);
+ port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode);
if (!port->role_sw)
- port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode);
+ port->role_sw = usb_role_switch_get(port->dev);
if (IS_ERR(port->role_sw)) {
err = PTR_ERR(port->role_sw);
goto out_destroy_wq;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 916cad80941c..5167bec14e36 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -38,6 +38,7 @@
#include <linux/workqueue.h>
#include <linux/notifier.h>
#include <linux/mm_inline.h>
+#include <linux/overflow.h>
#include "vfio.h"
#define DRIVER_VERSION "0.2"
@@ -167,12 +168,14 @@ static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
{
struct rb_node *node = iommu->dma_list.rb_node;
+ WARN_ON(!size);
+
while (node) {
struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
- if (start + size <= dma->iova)
+ if (start + size - 1 < dma->iova)
node = node->rb_left;
- else if (start >= dma->iova + dma->size)
+ else if (start > dma->iova + dma->size - 1)
node = node->rb_right;
else
return dma;
@@ -182,16 +185,19 @@ static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
}
static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu,
- dma_addr_t start, u64 size)
+ dma_addr_t start,
+ dma_addr_t end)
{
struct rb_node *res = NULL;
struct rb_node *node = iommu->dma_list.rb_node;
struct vfio_dma *dma_res = NULL;
+ WARN_ON(end < start);
+
while (node) {
struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
- if (start < dma->iova + dma->size) {
+ if (start <= dma->iova + dma->size - 1) {
res = node;
dma_res = dma;
if (start >= dma->iova)
@@ -201,7 +207,7 @@ static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu,
node = node->rb_right;
}
}
- if (res && size && dma_res->iova >= start + size)
+ if (res && dma_res->iova > end)
res = NULL;
return res;
}
@@ -211,11 +217,13 @@ static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new)
struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL;
struct vfio_dma *dma;
+ WARN_ON(new->size != 0);
+
while (*link) {
parent = *link;
dma = rb_entry(parent, struct vfio_dma, node);
- if (new->iova + new->size <= dma->iova)
+ if (new->iova <= dma->iova)
link = &(*link)->rb_left;
else
link = &(*link)->rb_right;
@@ -895,14 +903,20 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
unsigned long remote_vaddr;
struct vfio_dma *dma;
bool do_accounting;
+ dma_addr_t iova_end;
+ size_t iova_size;
- if (!iommu || !pages)
+ if (!iommu || !pages || npage <= 0)
return -EINVAL;
/* Supported for v2 version only */
if (!iommu->v2)
return -EACCES;
+ if (check_mul_overflow(npage, PAGE_SIZE, &iova_size) ||
+ check_add_overflow(user_iova, iova_size - 1, &iova_end))
+ return -EOVERFLOW;
+
mutex_lock(&iommu->lock);
if (WARN_ONCE(iommu->vaddr_invalid_count,
@@ -1008,12 +1022,21 @@ static void vfio_iommu_type1_unpin_pages(void *iommu_data,
{
struct vfio_iommu *iommu = iommu_data;
bool do_accounting;
+ dma_addr_t iova_end;
+ size_t iova_size;
int i;
/* Supported for v2 version only */
if (WARN_ON(!iommu->v2))
return;
+ if (WARN_ON(npage <= 0))
+ return;
+
+ if (WARN_ON(check_mul_overflow(npage, PAGE_SIZE, &iova_size) ||
+ check_add_overflow(user_iova, iova_size - 1, &iova_end)))
+ return;
+
mutex_lock(&iommu->lock);
do_accounting = list_empty(&iommu->domain_list);
@@ -1067,7 +1090,7 @@ static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
#define VFIO_IOMMU_TLB_SYNC_MAX 512
static size_t unmap_unpin_fast(struct vfio_domain *domain,
- struct vfio_dma *dma, dma_addr_t *iova,
+ struct vfio_dma *dma, dma_addr_t iova,
size_t len, phys_addr_t phys, long *unlocked,
struct list_head *unmapped_list,
int *unmapped_cnt,
@@ -1077,18 +1100,17 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain,
struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (entry) {
- unmapped = iommu_unmap_fast(domain->domain, *iova, len,
+ unmapped = iommu_unmap_fast(domain->domain, iova, len,
iotlb_gather);
if (!unmapped) {
kfree(entry);
} else {
- entry->iova = *iova;
+ entry->iova = iova;
entry->phys = phys;
entry->len = unmapped;
list_add_tail(&entry->list, unmapped_list);
- *iova += unmapped;
(*unmapped_cnt)++;
}
}
@@ -1107,18 +1129,17 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain,
}
static size_t unmap_unpin_slow(struct vfio_domain *domain,
- struct vfio_dma *dma, dma_addr_t *iova,
+ struct vfio_dma *dma, dma_addr_t iova,
size_t len, phys_addr_t phys,
long *unlocked)
{
- size_t unmapped = iommu_unmap(domain->domain, *iova, len);
+ size_t unmapped = iommu_unmap(domain->domain, iova, len);
if (unmapped) {
- *unlocked += vfio_unpin_pages_remote(dma, *iova,
+ *unlocked += vfio_unpin_pages_remote(dma, iova,
phys >> PAGE_SHIFT,
unmapped >> PAGE_SHIFT,
false);
- *iova += unmapped;
cond_resched();
}
return unmapped;
@@ -1127,12 +1148,12 @@ static size_t unmap_unpin_slow(struct vfio_domain *domain,
static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
bool do_accounting)
{
- dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
struct vfio_domain *domain, *d;
LIST_HEAD(unmapped_region_list);
struct iommu_iotlb_gather iotlb_gather;
int unmapped_region_cnt = 0;
long unlocked = 0;
+ size_t pos = 0;
if (!dma->size)
return 0;
@@ -1156,13 +1177,14 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
}
iommu_iotlb_gather_init(&iotlb_gather);
- while (iova < end) {
+ while (pos < dma->size) {
size_t unmapped, len;
phys_addr_t phys, next;
+ dma_addr_t iova = dma->iova + pos;
phys = iommu_iova_to_phys(domain->domain, iova);
if (WARN_ON(!phys)) {
- iova += PAGE_SIZE;
+ pos += PAGE_SIZE;
continue;
}
@@ -1171,7 +1193,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
* may require hardware cache flushing, try to find the
* largest contiguous physical memory chunk to unmap.
*/
- for (len = PAGE_SIZE; iova + len < end; len += PAGE_SIZE) {
+ for (len = PAGE_SIZE; pos + len < dma->size; len += PAGE_SIZE) {
next = iommu_iova_to_phys(domain->domain, iova + len);
if (next != phys + len)
break;
@@ -1181,16 +1203,18 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
* First, try to use fast unmap/unpin. In case of failure,
* switch to slow unmap/unpin path.
*/
- unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys,
+ unmapped = unmap_unpin_fast(domain, dma, iova, len, phys,
&unlocked, &unmapped_region_list,
&unmapped_region_cnt,
&iotlb_gather);
if (!unmapped) {
- unmapped = unmap_unpin_slow(domain, dma, &iova, len,
+ unmapped = unmap_unpin_slow(domain, dma, iova, len,
phys, &unlocked);
if (WARN_ON(!unmapped))
break;
}
+
+ pos += unmapped;
}
dma->iommu_mapped = false;
@@ -1282,7 +1306,7 @@ static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
}
static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
- dma_addr_t iova, size_t size, size_t pgsize)
+ dma_addr_t iova, dma_addr_t iova_end, size_t pgsize)
{
struct vfio_dma *dma;
struct rb_node *n;
@@ -1299,8 +1323,8 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
if (dma && dma->iova != iova)
return -EINVAL;
- dma = vfio_find_dma(iommu, iova + size - 1, 0);
- if (dma && dma->iova + dma->size != iova + size)
+ dma = vfio_find_dma(iommu, iova_end, 1);
+ if (dma && dma->iova + dma->size - 1 != iova_end)
return -EINVAL;
for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
@@ -1309,7 +1333,7 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
if (dma->iova < iova)
continue;
- if (dma->iova > iova + size - 1)
+ if (dma->iova > iova_end)
break;
ret = update_user_bitmap(bitmap, iommu, dma, iova, pgsize);
@@ -1374,7 +1398,8 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
int ret = -EINVAL, retries = 0;
unsigned long pgshift;
dma_addr_t iova = unmap->iova;
- u64 size = unmap->size;
+ dma_addr_t iova_end;
+ size_t size = unmap->size;
bool unmap_all = unmap->flags & VFIO_DMA_UNMAP_FLAG_ALL;
bool invalidate_vaddr = unmap->flags & VFIO_DMA_UNMAP_FLAG_VADDR;
struct rb_node *n, *first_n;
@@ -1387,6 +1412,11 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
goto unlock;
}
+ if (iova != unmap->iova || size != unmap->size) {
+ ret = -EOVERFLOW;
+ goto unlock;
+ }
+
pgshift = __ffs(iommu->pgsize_bitmap);
pgsize = (size_t)1 << pgshift;
@@ -1396,10 +1426,15 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
if (unmap_all) {
if (iova || size)
goto unlock;
- size = U64_MAX;
- } else if (!size || size & (pgsize - 1) ||
- iova + size - 1 < iova || size > SIZE_MAX) {
- goto unlock;
+ iova_end = ~(dma_addr_t)0;
+ } else {
+ if (!size || size & (pgsize - 1))
+ goto unlock;
+
+ if (check_add_overflow(iova, size - 1, &iova_end)) {
+ ret = -EOVERFLOW;
+ goto unlock;
+ }
}
/* When dirty tracking is enabled, allow only min supported pgsize */
@@ -1446,17 +1481,17 @@ again:
if (dma && dma->iova != iova)
goto unlock;
- dma = vfio_find_dma(iommu, iova + size - 1, 0);
- if (dma && dma->iova + dma->size != iova + size)
+ dma = vfio_find_dma(iommu, iova_end, 1);
+ if (dma && dma->iova + dma->size - 1 != iova_end)
goto unlock;
}
ret = 0;
- n = first_n = vfio_find_dma_first_node(iommu, iova, size);
+ n = first_n = vfio_find_dma_first_node(iommu, iova, iova_end);
while (n) {
dma = rb_entry(n, struct vfio_dma, node);
- if (dma->iova >= iova + size)
+ if (dma->iova > iova_end)
break;
if (!iommu->v2 && iova > dma->iova)
@@ -1648,7 +1683,9 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
{
bool set_vaddr = map->flags & VFIO_DMA_MAP_FLAG_VADDR;
dma_addr_t iova = map->iova;
+ dma_addr_t iova_end;
unsigned long vaddr = map->vaddr;
+ unsigned long vaddr_end;
size_t size = map->size;
int ret = 0, prot = 0;
size_t pgsize;
@@ -1656,8 +1693,15 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
/* Verify that none of our __u64 fields overflow */
if (map->size != size || map->vaddr != vaddr || map->iova != iova)
+ return -EOVERFLOW;
+
+ if (!size)
return -EINVAL;
+ if (check_add_overflow(iova, size - 1, &iova_end) ||
+ check_add_overflow(vaddr, size - 1, &vaddr_end))
+ return -EOVERFLOW;
+
/* READ/WRITE from device perspective */
if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
prot |= IOMMU_WRITE;
@@ -1673,13 +1717,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
WARN_ON((pgsize - 1) & PAGE_MASK);
- if (!size || (size | iova | vaddr) & (pgsize - 1)) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- /* Don't allow IOVA or virtual address wrap */
- if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) {
+ if ((size | iova | vaddr) & (pgsize - 1)) {
ret = -EINVAL;
goto out_unlock;
}
@@ -1710,7 +1748,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
goto out_unlock;
}
- if (!vfio_iommu_iova_dma_valid(iommu, iova, iova + size - 1)) {
+ if (!vfio_iommu_iova_dma_valid(iommu, iova, iova_end)) {
ret = -EINVAL;
goto out_unlock;
}
@@ -1783,12 +1821,12 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
for (; n; n = rb_next(n)) {
struct vfio_dma *dma;
- dma_addr_t iova;
+ size_t pos = 0;
dma = rb_entry(n, struct vfio_dma, node);
- iova = dma->iova;
- while (iova < dma->iova + dma->size) {
+ while (pos < dma->size) {
+ dma_addr_t iova = dma->iova + pos;
phys_addr_t phys;
size_t size;
@@ -1804,14 +1842,14 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
phys = iommu_iova_to_phys(d->domain, iova);
if (WARN_ON(!phys)) {
- iova += PAGE_SIZE;
+ pos += PAGE_SIZE;
continue;
}
size = PAGE_SIZE;
p = phys + size;
i = iova + size;
- while (i < dma->iova + dma->size &&
+ while (pos + size < dma->size &&
p == iommu_iova_to_phys(d->domain, i)) {
size += PAGE_SIZE;
p += PAGE_SIZE;
@@ -1819,9 +1857,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
}
} else {
unsigned long pfn;
- unsigned long vaddr = dma->vaddr +
- (iova - dma->iova);
- size_t n = dma->iova + dma->size - iova;
+ unsigned long vaddr = dma->vaddr + pos;
+ size_t n = dma->size - pos;
long npage;
npage = vfio_pin_pages_remote(dma, vaddr,
@@ -1852,7 +1889,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
goto unwind;
}
- iova += size;
+ pos += size;
}
}
@@ -1869,29 +1906,29 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
unwind:
for (; n; n = rb_prev(n)) {
struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
- dma_addr_t iova;
+ size_t pos = 0;
if (dma->iommu_mapped) {
iommu_unmap(domain->domain, dma->iova, dma->size);
continue;
}
- iova = dma->iova;
- while (iova < dma->iova + dma->size) {
+ while (pos < dma->size) {
+ dma_addr_t iova = dma->iova + pos;
phys_addr_t phys, p;
size_t size;
dma_addr_t i;
phys = iommu_iova_to_phys(domain->domain, iova);
if (!phys) {
- iova += PAGE_SIZE;
+ pos += PAGE_SIZE;
continue;
}
size = PAGE_SIZE;
p = phys + size;
i = iova + size;
- while (i < dma->iova + dma->size &&
+ while (pos + size < dma->size &&
p == iommu_iova_to_phys(domain->domain, i)) {
size += PAGE_SIZE;
p += PAGE_SIZE;
@@ -2977,7 +3014,8 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
struct vfio_iommu_type1_dirty_bitmap_get range;
unsigned long pgshift;
size_t data_size = dirty.argsz - minsz;
- size_t iommu_pgsize;
+ size_t size, iommu_pgsize;
+ dma_addr_t iova, iova_end;
if (!data_size || data_size < sizeof(range))
return -EINVAL;
@@ -2986,14 +3024,24 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
sizeof(range)))
return -EFAULT;
- if (range.iova + range.size < range.iova)
+ iova = range.iova;
+ size = range.size;
+
+ if (iova != range.iova || size != range.size)
+ return -EOVERFLOW;
+
+ if (!size)
return -EINVAL;
+
+ if (check_add_overflow(iova, size - 1, &iova_end))
+ return -EOVERFLOW;
+
if (!access_ok((void __user *)range.bitmap.data,
range.bitmap.size))
return -EINVAL;
pgshift = __ffs(range.bitmap.pgsize);
- ret = verify_bitmap_size(range.size >> pgshift,
+ ret = verify_bitmap_size(size >> pgshift,
range.bitmap.size);
if (ret)
return ret;
@@ -3007,19 +3055,18 @@ static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
ret = -EINVAL;
goto out_unlock;
}
- if (range.iova & (iommu_pgsize - 1)) {
+ if (iova & (iommu_pgsize - 1)) {
ret = -EINVAL;
goto out_unlock;
}
- if (!range.size || range.size & (iommu_pgsize - 1)) {
+ if (size & (iommu_pgsize - 1)) {
ret = -EINVAL;
goto out_unlock;
}
if (iommu->dirty_page_tracking)
ret = vfio_iova_dirty_bitmap(range.bitmap.data,
- iommu, range.iova,
- range.size,
+ iommu, iova, iova_end,
range.bitmap.pgsize);
else
ret = -EINVAL;
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 210fd3ac18a4..56ef1d88e003 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -2614,8 +2614,12 @@ static int aty_init(struct fb_info *info)
pr_cont("\n");
}
#endif
- if (par->pll_ops->init_pll)
- par->pll_ops->init_pll(info, &par->pll);
+ if (par->pll_ops->init_pll) {
+ ret = par->pll_ops->init_pll(info, &par->pll);
+ if (ret)
+ return ret;
+ }
+
if (par->pll_ops->resume_pll)
par->pll_ops->resume_pll(info, &par->pll);
diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
index a9ec7f488522..dc5ad3fcc7be 100644
--- a/drivers/video/fbdev/core/bitblit.c
+++ b/drivers/video/fbdev/core/bitblit.c
@@ -79,12 +79,16 @@ static inline void bit_putcs_aligned(struct vc_data *vc, struct fb_info *info,
struct fb_image *image, u8 *buf, u8 *dst)
{
u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
+ unsigned int charcnt = vc->vc_font.charcount;
u32 idx = vc->vc_font.width >> 3;
u8 *src;
while (cnt--) {
- src = vc->vc_font.data + (scr_readw(s++)&
- charmask)*cellsize;
+ u16 ch = scr_readw(s++) & charmask;
+
+ if (ch >= charcnt)
+ ch = 0;
+ src = vc->vc_font.data + (unsigned int)ch * cellsize;
if (attr) {
update_attr(buf, src, attr, vc);
@@ -112,14 +116,18 @@ static inline void bit_putcs_unaligned(struct vc_data *vc,
u8 *dst)
{
u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
+ unsigned int charcnt = vc->vc_font.charcount;
u32 shift_low = 0, mod = vc->vc_font.width % 8;
u32 shift_high = 8;
u32 idx = vc->vc_font.width >> 3;
u8 *src;
while (cnt--) {
- src = vc->vc_font.data + (scr_readw(s++)&
- charmask)*cellsize;
+ u16 ch = scr_readw(s++) & charmask;
+
+ if (ch >= charcnt)
+ ch = 0;
+ src = vc->vc_font.data + (unsigned int)ch * cellsize;
if (attr) {
update_attr(buf, src, attr, vc);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 96cc9b389246..9bd3c3814b5c 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -2810,6 +2810,25 @@ int fbcon_mode_deleted(struct fb_info *info,
return found;
}
+static void fbcon_delete_mode(struct fb_videomode *m)
+{
+ struct fbcon_display *p;
+
+ for (int i = first_fb_vc; i <= last_fb_vc; i++) {
+ p = &fb_display[i];
+ if (p->mode == m)
+ p->mode = NULL;
+ }
+}
+
+void fbcon_delete_modelist(struct list_head *head)
+{
+ struct fb_modelist *modelist;
+
+ list_for_each_entry(modelist, head, list)
+ fbcon_delete_mode(&modelist->mode);
+}
+
#ifdef CONFIG_VT_HW_CONSOLE_BINDING
static void fbcon_unbind(void)
{
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 53f1719b1ae1..eff757ebbed1 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -544,6 +544,7 @@ static void do_unregister_framebuffer(struct fb_info *fb_info)
fb_info->pixmap.addr = NULL;
}
+ fbcon_delete_modelist(&fb_info->modelist);
fb_destroy_modelist(&fb_info->modelist);
registered_fb[fb_info->node] = NULL;
num_registered_fb--;
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index cbdb1caf61bd..0b8d23c12b77 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -192,7 +192,7 @@ static unsigned long pvr2fb_map;
#ifdef CONFIG_PVR2_DMA
static unsigned int shdma = PVR2_CASCADE_CHAN;
-static unsigned int pvr2dma = ONCHIP_NR_DMA_CHANNELS;
+static unsigned int pvr2dma = CONFIG_NR_ONCHIP_DMA_CHANNELS;
#endif
static struct fb_videomode pvr2_modedb[] = {
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index 91d070ef6989..6ff059ee1694 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -329,11 +329,13 @@ static int __init valkyriefb_init(void)
if (of_address_to_resource(dp, 0, &r)) {
printk(KERN_ERR "can't find address for valkyrie\n");
+ of_node_put(dp);
return 0;
}
frame_buffer_phys = r.start;
cmap_regs_phys = r.start + 0x304000;
+ of_node_put(dp);
}
#endif /* ppc (!CONFIG_MAC) */
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index f3248a3e5402..c1acbc98465d 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -66,7 +66,6 @@ static int __v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
struct p9_fid *fid;
struct inode *inode;
struct v9fs_inode *v9inode;
- unsigned int cached;
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -76,11 +75,7 @@ static int __v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
goto out_valid;
v9inode = V9FS_I(inode);
- struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
-
- cached = v9ses->cache & (CACHE_META | CACHE_LOOSE);
-
- if (!cached || v9inode->cache_validity & V9FS_INO_INVALID_ATTR) {
+ if (v9inode->cache_validity & V9FS_INO_INVALID_ATTR) {
int retval;
struct v9fs_session_info *v9ses;
@@ -114,6 +109,7 @@ static int __v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
p9_debug(P9_DEBUG_VFS,
"refresh inode: dentry = %pd (%p), got error %pe\n",
dentry, dentry, ERR_PTR(retval));
+ if (retval < 0)
return retval;
}
}
@@ -150,8 +146,6 @@ const struct dentry_operations v9fs_cached_dentry_operations = {
};
const struct dentry_operations v9fs_dentry_operations = {
- .d_revalidate = v9fs_lookup_revalidate,
- .d_weak_revalidate = __v9fs_lookup_revalidate,
.d_release = v9fs_dentry_release,
.d_unalias_trylock = v9fs_dentry_unalias_trylock,
.d_unalias_unlock = v9fs_dentry_unalias_unlock,
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 69f378a83775..d0c77ec31b1d 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -1339,14 +1339,8 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
* Don't update inode if the file type is different
*/
umode = p9mode2unixmode(v9ses, st, &rdev);
- if (inode_wrong_type(inode, umode)) {
- /*
- * Do this as a way of letting the caller know the inode should not
- * be reused
- */
- v9fs_invalidate_inode_attr(inode);
+ if (inode_wrong_type(inode, umode))
goto out;
- }
/*
* We don't want to refresh inode->i_size,
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 0b404e8484d2..be297e335468 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -897,14 +897,8 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
/*
* Don't update inode if the file type is different
*/
- if (inode_wrong_type(inode, st->st_mode)) {
- /*
- * Do this as a way of letting the caller know the inode should not
- * be reused
- */
- v9fs_invalidate_inode_attr(inode);
+ if (inode_wrong_type(inode, st->st_mode))
goto out;
- }
/*
* We don't want to refresh inode->i_size,
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 41e37f7f67cc..3df7b9d7fbe8 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -2110,9 +2110,9 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
for (int i = 0; i < count; i++) {
__btrfs_kill_delayed_node(delayed_nodes[i]);
+ btrfs_delayed_node_ref_tracker_dir_print(delayed_nodes[i]);
btrfs_release_delayed_node(delayed_nodes[i],
&delayed_node_trackers[i]);
- btrfs_delayed_node_ref_tracker_dir_print(delayed_nodes[i]);
}
}
}
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index 0d949edc0caf..b09d4ec8c77d 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -219,6 +219,13 @@ static inline void btrfs_delayed_node_ref_tracker_dir_print(struct btrfs_delayed
if (!btrfs_test_opt(node->root->fs_info, REF_TRACKER))
return;
+ /*
+ * Only print if there are leaked references. The caller is
+ * holding one reference, so if refs == 1 there is no leak.
+ */
+ if (refcount_read(&node->refs) == 1)
+ return;
+
ref_tracker_dir_print(&node->ref_dir.dir,
BTRFS_DELAYED_NODE_REF_TRACKER_DISPLAY_LIMIT);
}
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index de4cb0f3fbd0..e9224145d754 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -982,7 +982,7 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
extent_root = btrfs_extent_root(fs_info, 0);
/* If the extent tree is damaged we cannot ignore it (IGNOREBADROOTS). */
- if (IS_ERR(extent_root)) {
+ if (!extent_root) {
btrfs_warn(fs_info, "ref-verify: extent tree not available, disabling");
btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
return 0;
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 6144e66661f5..96a030d28e09 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4102,6 +4102,48 @@ out:
return ret;
}
+static int rbtree_check_dir_ref_comp(const void *k, const struct rb_node *node)
+{
+ const struct recorded_ref *data = k;
+ const struct recorded_ref *ref = rb_entry(node, struct recorded_ref, node);
+
+ if (data->dir > ref->dir)
+ return 1;
+ if (data->dir < ref->dir)
+ return -1;
+ if (data->dir_gen > ref->dir_gen)
+ return 1;
+ if (data->dir_gen < ref->dir_gen)
+ return -1;
+ return 0;
+}
+
+static bool rbtree_check_dir_ref_less(struct rb_node *node, const struct rb_node *parent)
+{
+ const struct recorded_ref *entry = rb_entry(node, struct recorded_ref, node);
+
+ return rbtree_check_dir_ref_comp(entry, parent) < 0;
+}
+
+static int record_check_dir_ref_in_tree(struct rb_root *root,
+ struct recorded_ref *ref, struct list_head *list)
+{
+ struct recorded_ref *tmp_ref;
+ int ret;
+
+ if (rb_find(ref, root, rbtree_check_dir_ref_comp))
+ return 0;
+
+ ret = dup_ref(ref, list);
+ if (ret < 0)
+ return ret;
+
+ tmp_ref = list_last_entry(list, struct recorded_ref, list);
+ rb_add(&tmp_ref->node, root, rbtree_check_dir_ref_less);
+ tmp_ref->root = root;
+ return 0;
+}
+
static int rename_current_inode(struct send_ctx *sctx,
struct fs_path *current_path,
struct fs_path *new_path)
@@ -4129,11 +4171,11 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
struct recorded_ref *cur;
struct recorded_ref *cur2;
LIST_HEAD(check_dirs);
+ struct rb_root rbtree_check_dirs = RB_ROOT;
struct fs_path *valid_path = NULL;
u64 ow_inode = 0;
u64 ow_gen;
u64 ow_mode;
- u64 last_dir_ino_rm = 0;
bool did_overwrite = false;
bool is_orphan = false;
bool can_rename = true;
@@ -4437,7 +4479,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
goto out;
}
}
- ret = dup_ref(cur, &check_dirs);
+ ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs);
if (ret < 0)
goto out;
}
@@ -4465,7 +4507,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
}
list_for_each_entry(cur, &sctx->deleted_refs, list) {
- ret = dup_ref(cur, &check_dirs);
+ ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs);
if (ret < 0)
goto out;
}
@@ -4475,7 +4517,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
* We have a moved dir. Add the old parent to check_dirs
*/
cur = list_first_entry(&sctx->deleted_refs, struct recorded_ref, list);
- ret = dup_ref(cur, &check_dirs);
+ ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs);
if (ret < 0)
goto out;
} else if (!S_ISDIR(sctx->cur_inode_mode)) {
@@ -4509,7 +4551,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
if (is_current_inode_path(sctx, cur->full_path))
fs_path_reset(&sctx->cur_inode_path);
}
- ret = dup_ref(cur, &check_dirs);
+ ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs);
if (ret < 0)
goto out;
}
@@ -4552,8 +4594,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
ret = cache_dir_utimes(sctx, cur->dir, cur->dir_gen);
if (ret < 0)
goto out;
- } else if (ret == inode_state_did_delete &&
- cur->dir != last_dir_ino_rm) {
+ } else if (ret == inode_state_did_delete) {
ret = can_rmdir(sctx, cur->dir, cur->dir_gen);
if (ret < 0)
goto out;
@@ -4565,7 +4606,6 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
ret = send_rmdir(sctx, valid_path);
if (ret < 0)
goto out;
- last_dir_ino_rm = cur->dir;
}
}
}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index aadc02374b2a..430e7419349c 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -2068,7 +2068,13 @@ static int btrfs_get_tree_subvol(struct fs_context *fc)
fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
if (!fs_info->super_copy || !fs_info->super_for_commit) {
- btrfs_free_fs_info(fs_info);
+ /*
+ * Dont call btrfs_free_fs_info() to free it as it's still
+ * initialized partially.
+ */
+ kfree(fs_info->super_copy);
+ kfree(fs_info->super_for_commit);
+ kvfree(fs_info);
return -ENOMEM;
}
btrfs_init_fs_info(fs_info);
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index e5581dbeb4c2..c8d8e129eb4b 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -55,10 +55,6 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
} else {
m->partialref = !!(advise & Z_EROFS_LI_PARTIAL_REF);
m->clusterofs = le16_to_cpu(di->di_clusterofs);
- if (m->clusterofs >= 1 << vi->z_lclusterbits) {
- DBG_BUGON(1);
- return -EFSCORRUPTED;
- }
m->pblk = le32_to_cpu(di->di_u.blkaddr);
}
return 0;
@@ -240,21 +236,29 @@ static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m,
unsigned int lcn, bool lookahead)
{
+ struct erofs_inode *vi = EROFS_I(m->inode);
+ int err;
+
+ if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT) {
+ err = z_erofs_load_compact_lcluster(m, lcn, lookahead);
+ } else {
+ DBG_BUGON(vi->datalayout != EROFS_INODE_COMPRESSED_FULL);
+ err = z_erofs_load_full_lcluster(m, lcn);
+ }
+ if (err)
+ return err;
+
if (m->type >= Z_EROFS_LCLUSTER_TYPE_MAX) {
erofs_err(m->inode->i_sb, "unknown type %u @ lcn %u of nid %llu",
- m->type, lcn, EROFS_I(m->inode)->nid);
+ m->type, lcn, EROFS_I(m->inode)->nid);
DBG_BUGON(1);
return -EOPNOTSUPP;
+ } else if (m->type != Z_EROFS_LCLUSTER_TYPE_NONHEAD &&
+ m->clusterofs >= (1 << vi->z_lclusterbits)) {
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
}
-
- switch (EROFS_I(m->inode)->datalayout) {
- case EROFS_INODE_COMPRESSED_FULL:
- return z_erofs_load_full_lcluster(m, lcn);
- case EROFS_INODE_COMPRESSED_COMPACT:
- return z_erofs_load_compact_lcluster(m, lcn, lookahead);
- default:
- return -EINVAL;
- }
+ return 0;
}
static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
@@ -268,20 +272,19 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
unsigned long lcn = m->lcn - lookback_distance;
int err;
+ if (!lookback_distance)
+ break;
+
err = z_erofs_load_lcluster_from_disk(m, lcn, false);
if (err)
return err;
-
if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
lookback_distance = m->delta[0];
- if (!lookback_distance)
- break;
continue;
- } else {
- m->headtype = m->type;
- m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
- return 0;
}
+ m->headtype = m->type;
+ m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
+ return 0;
}
erofs_err(sb, "bogus lookback distance %u @ lcn %lu of nid %llu",
lookback_distance, m->lcn, vi->nid);
@@ -431,13 +434,6 @@ static int z_erofs_map_blocks_fo(struct inode *inode,
end = inode->i_size;
} else {
if (m.type != Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
- /* m.lcn should be >= 1 if endoff < m.clusterofs */
- if (!m.lcn) {
- erofs_err(sb, "invalid logical cluster 0 at nid %llu",
- vi->nid);
- err = -EFSCORRUPTED;
- goto unmap_out;
- }
end = (m.lcn << lclusterbits) | m.clusterofs;
map->m_flags |= EROFS_MAP_FULL_MAPPED;
m.delta[0] = 1;
@@ -596,7 +592,7 @@ static int z_erofs_map_blocks_ext(struct inode *inode,
vi->z_fragmentoff = map->m_plen;
if (recsz > offsetof(struct z_erofs_extent, pstart_lo))
vi->z_fragmentoff |= map->m_pa << 32;
- } else if (map->m_plen) {
+ } else if (map->m_plen & Z_EROFS_EXTENT_PLEN_MASK) {
map->m_flags |= EROFS_MAP_MAPPED |
EROFS_MAP_FULL_MAPPED | EROFS_MAP_ENCODED;
fmt = map->m_plen >> Z_EROFS_EXTENT_PLEN_FMT_BIT;
@@ -715,6 +711,7 @@ static int z_erofs_map_sanity_check(struct inode *inode,
struct erofs_map_blocks *map)
{
struct erofs_sb_info *sbi = EROFS_I_SB(inode);
+ u64 pend;
if (!(map->m_flags & EROFS_MAP_ENCODED))
return 0;
@@ -732,6 +729,10 @@ static int z_erofs_map_sanity_check(struct inode *inode,
if (unlikely(map->m_plen > Z_EROFS_PCLUSTER_MAX_SIZE ||
map->m_llen > Z_EROFS_PCLUSTER_MAX_DSIZE))
return -EOPNOTSUPP;
+ /* Filesystems beyond 48-bit physical block addresses are invalid */
+ if (unlikely(check_add_overflow(map->m_pa, map->m_plen, &pend) ||
+ (pend >> sbi->blkszbits) >= BIT_ULL(48)))
+ return -EFSCORRUPTED;
return 0;
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 9c94ed8c3ab0..f42548ee9083 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -478,14 +478,6 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
if (!hugetlb_vma_trylock_write(vma))
continue;
- /*
- * Skip VMAs without shareable locks. Per the design in commit
- * 40549ba8f8e0, these will be handled by remove_inode_hugepages()
- * called after this function with proper locking.
- */
- if (!__vma_shareable_lock(vma))
- goto skip;
-
v_start = vma_offset_start(vma, start);
v_end = vma_offset_end(vma, end);
@@ -496,7 +488,6 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
* vmas. Therefore, lock is not held when calling
* unmap_hugepage_range for private vmas.
*/
-skip:
hugetlb_vma_unlock_write(vma);
}
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index e466cf52d7d7..7f7e6bb23a90 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -988,10 +988,11 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
static void
nfsd4_read_release(union nfsd4_op_u *u)
{
- if (u->read.rd_nf)
+ if (u->read.rd_nf) {
+ trace_nfsd_read_done(u->read.rd_rqstp, u->read.rd_fhp,
+ u->read.rd_offset, u->read.rd_length);
nfsd_file_put(u->read.rd_nf);
- trace_nfsd_read_done(u->read.rd_rqstp, u->read.rd_fhp,
- u->read.rd_offset, u->read.rd_length);
+ }
}
static __be32
@@ -2892,10 +2893,20 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
rqstp->rq_lease_breaker = (void **)&cstate->clp;
- trace_nfsd_compound(rqstp, args->tag, args->taglen, args->opcnt);
+ trace_nfsd_compound(rqstp, args->tag, args->taglen, args->client_opcnt);
while (!status && resp->opcnt < args->opcnt) {
op = &args->ops[resp->opcnt++];
+ if (unlikely(resp->opcnt == NFSD_MAX_OPS_PER_COMPOUND)) {
+ /* If there are still more operations to process,
+ * stop here and report NFS4ERR_RESOURCE. */
+ if (cstate->minorversion == 0 &&
+ args->client_opcnt > resp->opcnt) {
+ op->status = nfserr_resource;
+ goto encode_op;
+ }
+ }
+
/*
* The XDR decode routines may have pre-set op->status;
* for example, if there is a miscellaneous XDR error
@@ -2972,7 +2983,7 @@ encode_op:
status = op->status;
}
- trace_nfsd_compound_status(args->opcnt, resp->opcnt,
+ trace_nfsd_compound_status(args->client_opcnt, resp->opcnt,
status, nfsd4_op_name(op->opnum));
nfsd4_cstate_clear_replay(cstate);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 81fa7cc6c77b..c1b54322c412 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3902,6 +3902,7 @@ static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfs
ca->headerpadsz = 0;
ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
+ ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index c0a3c6a7c8bb..6040a6145dad 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2488,8 +2488,10 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
if (xdr_stream_decode_u32(argp->xdr, &argp->minorversion) < 0)
return false;
- if (xdr_stream_decode_u32(argp->xdr, &argp->opcnt) < 0)
+ if (xdr_stream_decode_u32(argp->xdr, &argp->client_opcnt) < 0)
return false;
+ argp->opcnt = min_t(u32, argp->client_opcnt,
+ NFSD_MAX_OPS_PER_COMPOUND);
if (argp->opcnt > ARRAY_SIZE(argp->iops)) {
argp->ops = vcalloc(argp->opcnt, sizeof(*argp->ops));
@@ -2628,10 +2630,8 @@ static __be32 nfsd4_encode_components_esc(struct xdr_stream *xdr, char sep,
__be32 *p;
__be32 pathlen;
int pathlen_offset;
- int strlen, count=0;
char *str, *end, *next;
-
- dprintk("nfsd4_encode_components(%s)\n", components);
+ int count = 0;
pathlen_offset = xdr->buf->len;
p = xdr_reserve_space(xdr, 4);
@@ -2658,9 +2658,8 @@ static __be32 nfsd4_encode_components_esc(struct xdr_stream *xdr, char sep,
for (; *end && (*end != sep); end++)
/* find sep or end of string */;
- strlen = end - str;
- if (strlen) {
- if (xdr_stream_encode_opaque(xdr, str, strlen) < 0)
+ if (end > str) {
+ if (xdr_stream_encode_opaque(xdr, str, end - str) < 0)
return nfserr_resource;
count++;
} else
@@ -2939,6 +2938,12 @@ struct nfsd4_fattr_args {
typedef __be32(*nfsd4_enc_attr)(struct xdr_stream *xdr,
const struct nfsd4_fattr_args *args);
+static __be32 nfsd4_encode_fattr4__inval(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfserr_inval;
+}
+
static __be32 nfsd4_encode_fattr4__noop(struct xdr_stream *xdr,
const struct nfsd4_fattr_args *args)
{
@@ -3560,6 +3565,8 @@ static const nfsd4_enc_attr nfsd4_enc_fattr4_encode_ops[] = {
[FATTR4_MODE_UMASK] = nfsd4_encode_fattr4__noop,
[FATTR4_XATTR_SUPPORT] = nfsd4_encode_fattr4_xattr_support,
+ [FATTR4_TIME_DELEG_ACCESS] = nfsd4_encode_fattr4__inval,
+ [FATTR4_TIME_DELEG_MODIFY] = nfsd4_encode_fattr4__inval,
[FATTR4_OPEN_ARGUMENTS] = nfsd4_encode_fattr4_open_arguments,
};
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index ea87b42894dd..f19320018639 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -57,6 +57,9 @@ struct readdir_cd {
__be32 err; /* 0, nfserr, or nfserr_eof */
};
+/* Maximum number of operations per session compound */
+#define NFSD_MAX_OPS_PER_COMPOUND 200
+
struct nfsd_genl_rqstp {
struct sockaddr rq_daddr;
struct sockaddr rq_saddr;
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index d4b48602b2b0..ee0570cbdd9e 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -903,6 +903,7 @@ struct nfsd4_compoundargs {
char * tag;
u32 taglen;
u32 minorversion;
+ u32 client_opcnt;
u32 opcnt;
bool splice_ok;
struct nfsd4_op *ops;
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 86f2631e6360..10923bf7c8b8 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -867,6 +867,11 @@ static int __ocfs2_move_extents_range(struct buffer_head *di_bh,
mlog_errno(ret);
goto out;
}
+ /*
+ * Invalidate extent cache after moving/defragging to prevent
+ * stale cached data with outdated extent flags.
+ */
+ ocfs2_extent_map_trunc(inode, cpos);
context->clusters_moved += alloc_size;
next:
diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c
index 4076336fbba6..572a9925bd6c 100644
--- a/fs/resctrl/monitor.c
+++ b/fs/resctrl/monitor.c
@@ -1782,15 +1782,13 @@ int resctrl_mon_resource_init(void)
mba_mbps_default_event = QOS_L3_MBM_TOTAL_EVENT_ID;
if (r->mon.mbm_cntr_assignable) {
- if (!resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
- resctrl_enable_mon_event(QOS_L3_MBM_TOTAL_EVENT_ID);
- if (!resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
- resctrl_enable_mon_event(QOS_L3_MBM_LOCAL_EVENT_ID);
- mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask;
- mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask &
- (READS_TO_LOCAL_MEM |
- READS_TO_LOCAL_S_MEM |
- NON_TEMP_WRITE_TO_LOCAL_MEM);
+ if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
+ mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask;
+ if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
+ mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask &
+ (READS_TO_LOCAL_MEM |
+ READS_TO_LOCAL_S_MEM |
+ NON_TEMP_WRITE_TO_LOCAL_MEM);
r->mon.mbm_assign_on_mkdir = true;
resctrl_file_fflags_init("num_mbm_cntrs",
RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index 4f959f1e08d2..185ac41bd7e9 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -173,7 +173,7 @@ module_param(enable_oplocks, bool, 0644);
MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
module_param(enable_gcm_256, bool, 0644);
-MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
+MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/1");
module_param(require_gcm_256, bool, 0644);
MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 16a00a61fd2c..203e2aaa3c25 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -534,8 +534,6 @@ struct smb_version_operations {
void (*new_lease_key)(struct cifs_fid *);
int (*generate_signingkey)(struct cifs_ses *ses,
struct TCP_Server_Info *server);
- int (*calc_signature)(struct smb_rqst *, struct TCP_Server_Info *,
- bool allocate_crypto);
int (*set_integrity)(const unsigned int, struct cifs_tcon *tcon,
struct cifsFileInfo *src_file);
int (*enum_snapshots)(const unsigned int xid, struct cifs_tcon *tcon,
@@ -732,7 +730,7 @@ struct TCP_Server_Info {
bool nosharesock;
bool tcp_nodelay;
bool terminate;
- unsigned int credits; /* send no more requests at once */
+ int credits; /* send no more requests at once */
unsigned int max_credits; /* can override large 32000 default at mnt */
unsigned int in_flight; /* number of requests on the wire to server */
unsigned int max_in_flight; /* max number of requests that were on wire */
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index 4976be2c47c1..3528c365a452 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -9,6 +9,7 @@
#define _CIFSPROTO_H
#include <linux/nls.h>
#include <linux/ctype.h>
+#include "cifsglob.h"
#include "trace.h"
#ifdef CONFIG_CIFS_DFS_UPCALL
#include "dfs_cache.h"
@@ -615,6 +616,8 @@ extern int E_md4hash(const unsigned char *passwd, unsigned char *p16,
extern struct TCP_Server_Info *
cifs_find_tcp_session(struct smb3_fs_context *ctx);
+struct cifs_tcon *cifs_setup_ipc(struct cifs_ses *ses, bool seal);
+
void __cifs_put_smb_ses(struct cifs_ses *ses);
extern struct cifs_ses *
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index 2881efcbe09a..7da194f29fef 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -1311,6 +1311,8 @@ cifs_readv_callback(struct mid_q_entry *mid)
.rreq_debug_id = rdata->rreq->debug_id,
.rreq_debug_index = rdata->subreq.debug_index,
};
+ unsigned int rreq_debug_id = rdata->rreq->debug_id;
+ unsigned int subreq_debug_index = rdata->subreq.debug_index;
cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu\n",
__func__, mid->mid, mid->mid_state, rdata->result,
@@ -1374,6 +1376,9 @@ do_retry:
__set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags);
}
+ trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value,
+ server->credits, server->in_flight,
+ 0, cifs_trace_rw_credits_read_response_clear);
rdata->credits.value = 0;
rdata->subreq.error = rdata->result;
rdata->subreq.transferred += rdata->got_bytes;
@@ -1381,6 +1386,9 @@ do_retry:
netfs_read_subreq_terminated(&rdata->subreq);
release_mid(mid);
add_credits(server, &credits, 0);
+ trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0,
+ server->credits, server->in_flight,
+ credits.value, cifs_trace_rw_credits_read_response_add);
}
/* cifs_async_readv - send an async write, and set up mid to handle result */
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index dd12f3eb61dc..55cb4b0cbd48 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -310,6 +310,8 @@ cifs_abort_connection(struct TCP_Server_Info *server)
server->ssocket->flags);
sock_release(server->ssocket);
server->ssocket = NULL;
+ } else if (cifs_rdma_enabled(server)) {
+ smbd_destroy(server);
}
server->sequence_number = 0;
server->session_estab = false;
@@ -338,12 +340,6 @@ cifs_abort_connection(struct TCP_Server_Info *server)
mid_execute_callback(mid);
release_mid(mid);
}
-
- if (cifs_rdma_enabled(server)) {
- cifs_server_lock(server);
- smbd_destroy(server);
- cifs_server_unlock(server);
- }
}
static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets)
@@ -2015,39 +2011,31 @@ static int match_session(struct cifs_ses *ses,
/**
* cifs_setup_ipc - helper to setup the IPC tcon for the session
* @ses: smb session to issue the request on
- * @ctx: the superblock configuration context to use for building the
- * new tree connection for the IPC (interprocess communication RPC)
+ * @seal: if encryption is requested
*
* A new IPC connection is made and stored in the session
* tcon_ipc. The IPC tcon has the same lifetime as the session.
*/
-static int
-cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+struct cifs_tcon *cifs_setup_ipc(struct cifs_ses *ses, bool seal)
{
int rc = 0, xid;
struct cifs_tcon *tcon;
char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0};
- bool seal = false;
struct TCP_Server_Info *server = ses->server;
/*
* If the mount request that resulted in the creation of the
* session requires encryption, force IPC to be encrypted too.
*/
- if (ctx->seal) {
- if (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)
- seal = true;
- else {
- cifs_server_dbg(VFS,
- "IPC: server doesn't support encryption\n");
- return -EOPNOTSUPP;
- }
+ if (seal && !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) {
+ cifs_server_dbg(VFS, "IPC: server doesn't support encryption\n");
+ return ERR_PTR(-EOPNOTSUPP);
}
/* no need to setup directory caching on IPC share, so pass in false */
tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_ipc);
if (tcon == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
spin_lock(&server->srv_lock);
scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname);
@@ -2057,13 +2045,13 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
tcon->ses = ses;
tcon->ipc = true;
tcon->seal = seal;
- rc = server->ops->tree_connect(xid, ses, unc, tcon, ctx->local_nls);
+ rc = server->ops->tree_connect(xid, ses, unc, tcon, ses->local_nls);
free_xid(xid);
if (rc) {
- cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc);
+ cifs_server_dbg(VFS | ONCE, "failed to connect to IPC (rc=%d)\n", rc);
tconInfoFree(tcon, netfs_trace_tcon_ref_free_ipc_fail);
- goto out;
+ return ERR_PTR(rc);
}
cifs_dbg(FYI, "IPC tcon rc=%d ipc tid=0x%x\n", rc, tcon->tid);
@@ -2071,9 +2059,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
spin_lock(&tcon->tc_lock);
tcon->status = TID_GOOD;
spin_unlock(&tcon->tc_lock);
- ses->tcon_ipc = tcon;
-out:
- return rc;
+ return tcon;
}
static struct cifs_ses *
@@ -2347,6 +2333,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
{
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
+ struct cifs_tcon *ipc;
struct cifs_ses *ses;
unsigned int xid;
int retries = 0;
@@ -2525,7 +2512,12 @@ retry_new_session:
list_add(&ses->smb_ses_list, &server->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
- cifs_setup_ipc(ses, ctx);
+ ipc = cifs_setup_ipc(ses, ctx->seal);
+ spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&ses->ses_lock);
+ ses->tcon_ipc = !IS_ERR(ipc) ? ipc : NULL;
+ spin_unlock(&ses->ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
free_xid(xid);
diff --git a/fs/smb/client/dfs_cache.c b/fs/smb/client/dfs_cache.c
index 4dada26d56b5..f2ad0ccd08a7 100644
--- a/fs/smb/client/dfs_cache.c
+++ b/fs/smb/client/dfs_cache.c
@@ -1120,24 +1120,63 @@ static bool target_share_equal(struct cifs_tcon *tcon, const char *s1)
return match;
}
-static bool is_ses_good(struct cifs_ses *ses)
+static bool is_ses_good(struct cifs_tcon *tcon, struct cifs_ses *ses)
{
struct TCP_Server_Info *server = ses->server;
- struct cifs_tcon *tcon = ses->tcon_ipc;
+ struct cifs_tcon *ipc = NULL;
bool ret;
+ spin_lock(&cifs_tcp_ses_lock);
spin_lock(&ses->ses_lock);
spin_lock(&ses->chan_lock);
+
ret = !cifs_chan_needs_reconnect(ses, server) &&
- ses->ses_status == SES_GOOD &&
- !tcon->need_reconnect;
+ ses->ses_status == SES_GOOD;
+
spin_unlock(&ses->chan_lock);
+
+ if (!ret)
+ goto out;
+
+ if (likely(ses->tcon_ipc)) {
+ if (ses->tcon_ipc->need_reconnect) {
+ ret = false;
+ goto out;
+ }
+ } else {
+ spin_unlock(&ses->ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ ipc = cifs_setup_ipc(ses, tcon->seal);
+
+ spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&ses->ses_lock);
+ if (!IS_ERR(ipc)) {
+ if (!ses->tcon_ipc) {
+ ses->tcon_ipc = ipc;
+ ipc = NULL;
+ }
+ } else {
+ ret = false;
+ ipc = NULL;
+ }
+ }
+
+out:
spin_unlock(&ses->ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+ if (ipc && server->ops->tree_disconnect) {
+ unsigned int xid = get_xid();
+
+ (void)server->ops->tree_disconnect(xid, ipc);
+ _free_xid(xid);
+ }
+ tconInfoFree(ipc, netfs_trace_tcon_ref_free_ipc);
return ret;
}
/* Refresh dfs referral of @ses */
-static void refresh_ses_referral(struct cifs_ses *ses)
+static void refresh_ses_referral(struct cifs_tcon *tcon, struct cifs_ses *ses)
{
struct cache_entry *ce;
unsigned int xid;
@@ -1153,7 +1192,7 @@ static void refresh_ses_referral(struct cifs_ses *ses)
}
ses = CIFS_DFS_ROOT_SES(ses);
- if (!is_ses_good(ses)) {
+ if (!is_ses_good(tcon, ses)) {
cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",
__func__);
goto out;
@@ -1241,7 +1280,7 @@ static void refresh_tcon_referral(struct cifs_tcon *tcon, bool force_refresh)
up_read(&htable_rw_lock);
ses = CIFS_DFS_ROOT_SES(ses);
- if (!is_ses_good(ses)) {
+ if (!is_ses_good(tcon, ses)) {
cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",
__func__);
goto out;
@@ -1309,7 +1348,7 @@ void dfs_cache_refresh(struct work_struct *work)
tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
list_for_each_entry(ses, &tcon->dfs_ses_list, dlist)
- refresh_ses_referral(ses);
+ refresh_ses_referral(tcon, ses);
refresh_tcon_referral(tcon, false);
queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 098a79b7a959..cac355364e43 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -2484,11 +2484,8 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
}
#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
do_rename_exit:
- if (rc == 0) {
+ if (rc == 0)
d_move(from_dentry, to_dentry);
- /* Force a new lookup */
- d_drop(from_dentry);
- }
cifs_put_tlink(tlink);
return rc;
}
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 95cd484cfbba..1e39f2165e42 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -2799,11 +2799,12 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid fid;
int rc;
__le16 *utf16_path;
- struct cached_fid *cfid = NULL;
+ struct cached_fid *cfid;
int retries = 0, cur_sleep = 1;
replay_again:
/* reinitialize for possible replay */
+ cfid = NULL;
flags = CIFS_CP_CREATE_CLOSE_OP;
oplock = SMB2_OPLOCK_LEVEL_NONE;
server = cifs_pick_channel(ses);
@@ -5446,7 +5447,6 @@ struct smb_version_operations smb20_operations = {
.get_lease_key = smb2_get_lease_key,
.set_lease_key = smb2_set_lease_key,
.new_lease_key = smb2_new_lease_key,
- .calc_signature = smb2_calc_signature,
.is_read_op = smb2_is_read_op,
.set_oplock_level = smb2_set_oplock_level,
.create_lease_buf = smb2_create_lease_buf,
@@ -5550,7 +5550,6 @@ struct smb_version_operations smb21_operations = {
.get_lease_key = smb2_get_lease_key,
.set_lease_key = smb2_set_lease_key,
.new_lease_key = smb2_new_lease_key,
- .calc_signature = smb2_calc_signature,
.is_read_op = smb21_is_read_op,
.set_oplock_level = smb21_set_oplock_level,
.create_lease_buf = smb2_create_lease_buf,
@@ -5660,7 +5659,6 @@ struct smb_version_operations smb30_operations = {
.set_lease_key = smb2_set_lease_key,
.new_lease_key = smb2_new_lease_key,
.generate_signingkey = generate_smb30signingkey,
- .calc_signature = smb3_calc_signature,
.set_integrity = smb3_set_integrity,
.is_read_op = smb21_is_read_op,
.set_oplock_level = smb3_set_oplock_level,
@@ -5777,7 +5775,6 @@ struct smb_version_operations smb311_operations = {
.set_lease_key = smb2_set_lease_key,
.new_lease_key = smb2_new_lease_key,
.generate_signingkey = generate_smb311signingkey,
- .calc_signature = smb3_calc_signature,
.set_integrity = smb3_set_integrity,
.is_read_op = smb21_is_read_op,
.set_oplock_level = smb3_set_oplock_level,
diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h
index 6eb86d134abc..5241daaae543 100644
--- a/fs/smb/client/smb2proto.h
+++ b/fs/smb/client/smb2proto.h
@@ -39,12 +39,6 @@ extern struct mid_q_entry *smb2_setup_async_request(
struct TCP_Server_Info *server, struct smb_rqst *rqst);
extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
__u64 ses_id, __u32 tid);
-extern int smb2_calc_signature(struct smb_rqst *rqst,
- struct TCP_Server_Info *server,
- bool allocate_crypto);
-extern int smb3_calc_signature(struct smb_rqst *rqst,
- struct TCP_Server_Info *server,
- bool allocate_crypto);
extern void smb2_echo_request(struct work_struct *work);
extern __le32 smb2_get_lease_state(struct cifsInodeInfo *cinode);
extern bool smb2_is_valid_oplock_break(char *buffer,
diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
index ad6068e17a2a..6a9b80385b86 100644
--- a/fs/smb/client/smb2transport.c
+++ b/fs/smb/client/smb2transport.c
@@ -209,9 +209,9 @@ smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
return tcon;
}
-int
+static int
smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
- bool allocate_crypto)
+ bool allocate_crypto)
{
int rc;
unsigned char smb2_signature[SMB2_HMACSHA256_SIZE];
@@ -465,9 +465,9 @@ generate_smb311signingkey(struct cifs_ses *ses,
return generate_smb3signingkey(ses, server, &triplet);
}
-int
+static int
smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
- bool allocate_crypto)
+ bool allocate_crypto)
{
int rc;
unsigned char smb3_signature[SMB2_CMACAES_SIZE];
@@ -477,6 +477,9 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
struct smb_rqst drqst;
u8 key[SMB3_SIGN_KEY_SIZE];
+ if (server->vals->protocol_id <= SMB21_PROT_ID)
+ return smb2_calc_signature(rqst, server, allocate_crypto);
+
rc = smb3_get_sign_key(le64_to_cpu(shdr->SessionId), server, key);
if (unlikely(rc)) {
cifs_server_dbg(FYI, "%s: Could not get signing key\n", __func__);
@@ -547,7 +550,6 @@ out:
static int
smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
{
- int rc = 0;
struct smb2_hdr *shdr;
struct smb2_sess_setup_req *ssr;
bool is_binding;
@@ -574,9 +576,7 @@ smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
return 0;
}
- rc = server->ops->calc_signature(rqst, server, false);
-
- return rc;
+ return smb3_calc_signature(rqst, server, false);
}
int
@@ -612,7 +612,7 @@ smb2_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
memset(shdr->Signature, 0, SMB2_SIGNATURE_SIZE);
- rc = server->ops->calc_signature(rqst, server, true);
+ rc = smb3_calc_signature(rqst, server, true);
if (rc)
return rc;
diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
index 49e2df3ad1f0..85a4c55b61b8 100644
--- a/fs/smb/client/smbdirect.c
+++ b/fs/smb/client/smbdirect.c
@@ -172,6 +172,7 @@ static void smbd_disconnect_wake_up_all(struct smbdirect_socket *sc)
* in order to notice the broken connection.
*/
wake_up_all(&sc->status_wait);
+ wake_up_all(&sc->send_io.lcredits.wait_queue);
wake_up_all(&sc->send_io.credits.wait_queue);
wake_up_all(&sc->send_io.pending.dec_wait_queue);
wake_up_all(&sc->send_io.pending.zero_wait_queue);
@@ -495,6 +496,7 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
struct smbdirect_send_io *request =
container_of(wc->wr_cqe, struct smbdirect_send_io, cqe);
struct smbdirect_socket *sc = request->socket;
+ int lcredits = 0;
log_rdma_send(INFO, "smbdirect_send_io 0x%p completed wc->status=%s\n",
request, ib_wc_status_msg(wc->status));
@@ -504,22 +506,24 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
request->sge[i].addr,
request->sge[i].length,
DMA_TO_DEVICE);
+ mempool_free(request, sc->send_io.mem.pool);
+ lcredits += 1;
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
if (wc->status != IB_WC_WR_FLUSH_ERR)
log_rdma_send(ERR, "wc->status=%s wc->opcode=%d\n",
ib_wc_status_msg(wc->status), wc->opcode);
- mempool_free(request, sc->send_io.mem.pool);
smbd_disconnect_rdma_connection(sc);
return;
}
+ atomic_add(lcredits, &sc->send_io.lcredits.count);
+ wake_up(&sc->send_io.lcredits.wait_queue);
+
if (atomic_dec_and_test(&sc->send_io.pending.count))
wake_up(&sc->send_io.pending.zero_wait_queue);
wake_up(&sc->send_io.pending.dec_wait_queue);
-
- mempool_free(request, sc->send_io.mem.pool);
}
static void dump_smbdirect_negotiate_resp(struct smbdirect_negotiate_resp *resp)
@@ -567,6 +571,7 @@ static bool process_negotiation_response(
log_rdma_event(ERR, "error: credits_granted==0\n");
return false;
}
+ atomic_set(&sc->send_io.lcredits.count, sp->send_credit_target);
atomic_set(&sc->send_io.credits.count, le16_to_cpu(packet->credits_granted));
if (le32_to_cpu(packet->preferred_send_size) > sp->max_recv_size) {
@@ -1114,6 +1119,24 @@ static int smbd_post_send_iter(struct smbdirect_socket *sc,
struct smbdirect_data_transfer *packet;
int new_credits = 0;
+wait_lcredit:
+ /* Wait for local send credits */
+ rc = wait_event_interruptible(sc->send_io.lcredits.wait_queue,
+ atomic_read(&sc->send_io.lcredits.count) > 0 ||
+ sc->status != SMBDIRECT_SOCKET_CONNECTED);
+ if (rc)
+ goto err_wait_lcredit;
+
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+ log_outgoing(ERR, "disconnected not sending on wait_credit\n");
+ rc = -EAGAIN;
+ goto err_wait_lcredit;
+ }
+ if (unlikely(atomic_dec_return(&sc->send_io.lcredits.count) < 0)) {
+ atomic_inc(&sc->send_io.lcredits.count);
+ goto wait_lcredit;
+ }
+
wait_credit:
/* Wait for send credits. A SMBD packet needs one credit */
rc = wait_event_interruptible(sc->send_io.credits.wait_queue,
@@ -1132,23 +1155,6 @@ wait_credit:
goto wait_credit;
}
-wait_send_queue:
- wait_event(sc->send_io.pending.dec_wait_queue,
- atomic_read(&sc->send_io.pending.count) < sp->send_credit_target ||
- sc->status != SMBDIRECT_SOCKET_CONNECTED);
-
- if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
- log_outgoing(ERR, "disconnected not sending on wait_send_queue\n");
- rc = -EAGAIN;
- goto err_wait_send_queue;
- }
-
- if (unlikely(atomic_inc_return(&sc->send_io.pending.count) >
- sp->send_credit_target)) {
- atomic_dec(&sc->send_io.pending.count);
- goto wait_send_queue;
- }
-
request = mempool_alloc(sc->send_io.mem.pool, GFP_KERNEL);
if (!request) {
rc = -ENOMEM;
@@ -1229,10 +1235,21 @@ wait_send_queue:
le32_to_cpu(packet->data_length),
le32_to_cpu(packet->remaining_data_length));
+ /*
+ * Now that we got a local and a remote credit
+ * we add us as pending
+ */
+ atomic_inc(&sc->send_io.pending.count);
+
rc = smbd_post_send(sc, request);
if (!rc)
return 0;
+ if (atomic_dec_and_test(&sc->send_io.pending.count))
+ wake_up(&sc->send_io.pending.zero_wait_queue);
+
+ wake_up(&sc->send_io.pending.dec_wait_queue);
+
err_dma:
for (i = 0; i < request->num_sge; i++)
if (request->sge[i].addr)
@@ -1246,14 +1263,14 @@ err_dma:
atomic_sub(new_credits, &sc->recv_io.credits.count);
err_alloc:
- if (atomic_dec_and_test(&sc->send_io.pending.count))
- wake_up(&sc->send_io.pending.zero_wait_queue);
-
-err_wait_send_queue:
- /* roll back send credits and pending */
atomic_inc(&sc->send_io.credits.count);
+ wake_up(&sc->send_io.credits.wait_queue);
err_wait_credit:
+ atomic_inc(&sc->send_io.lcredits.count);
+ wake_up(&sc->send_io.lcredits.wait_queue);
+
+err_wait_lcredit:
return rc;
}
@@ -1767,6 +1784,7 @@ static struct smbd_connection *_smbd_get_connection(
struct smbdirect_socket *sc;
struct smbdirect_socket_parameters *sp;
struct rdma_conn_param conn_param;
+ struct ib_qp_cap qp_cap;
struct ib_qp_init_attr qp_attr;
struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
struct ib_port_immutable port_immutable;
@@ -1838,6 +1856,25 @@ static struct smbd_connection *_smbd_get_connection(
goto config_failed;
}
+ sp->responder_resources =
+ min_t(u8, sp->responder_resources,
+ sc->ib.dev->attrs.max_qp_rd_atom);
+ log_rdma_mr(INFO, "responder_resources=%d\n",
+ sp->responder_resources);
+
+ /*
+ * We use allocate sp->responder_resources * 2 MRs
+ * and each MR needs WRs for REG and INV, so
+ * we use '* 4'.
+ *
+ * +1 for ib_drain_qp()
+ */
+ memset(&qp_cap, 0, sizeof(qp_cap));
+ qp_cap.max_send_wr = sp->send_credit_target + sp->responder_resources * 4 + 1;
+ qp_cap.max_recv_wr = sp->recv_credit_max + 1;
+ qp_cap.max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE;
+ qp_cap.max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE;
+
sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0);
if (IS_ERR(sc->ib.pd)) {
rc = PTR_ERR(sc->ib.pd);
@@ -1848,7 +1885,7 @@ static struct smbd_connection *_smbd_get_connection(
sc->ib.send_cq =
ib_alloc_cq_any(sc->ib.dev, sc,
- sp->send_credit_target, IB_POLL_SOFTIRQ);
+ qp_cap.max_send_wr, IB_POLL_SOFTIRQ);
if (IS_ERR(sc->ib.send_cq)) {
sc->ib.send_cq = NULL;
goto alloc_cq_failed;
@@ -1856,7 +1893,7 @@ static struct smbd_connection *_smbd_get_connection(
sc->ib.recv_cq =
ib_alloc_cq_any(sc->ib.dev, sc,
- sp->recv_credit_max, IB_POLL_SOFTIRQ);
+ qp_cap.max_recv_wr, IB_POLL_SOFTIRQ);
if (IS_ERR(sc->ib.recv_cq)) {
sc->ib.recv_cq = NULL;
goto alloc_cq_failed;
@@ -1865,11 +1902,7 @@ static struct smbd_connection *_smbd_get_connection(
memset(&qp_attr, 0, sizeof(qp_attr));
qp_attr.event_handler = smbd_qp_async_error_upcall;
qp_attr.qp_context = sc;
- qp_attr.cap.max_send_wr = sp->send_credit_target;
- qp_attr.cap.max_recv_wr = sp->recv_credit_max;
- qp_attr.cap.max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE;
- qp_attr.cap.max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE;
- qp_attr.cap.max_inline_data = 0;
+ qp_attr.cap = qp_cap;
qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
qp_attr.qp_type = IB_QPT_RC;
qp_attr.send_cq = sc->ib.send_cq;
@@ -1883,12 +1916,6 @@ static struct smbd_connection *_smbd_get_connection(
}
sc->ib.qp = sc->rdma.cm_id->qp;
- sp->responder_resources =
- min_t(u8, sp->responder_resources,
- sc->ib.dev->attrs.max_qp_rd_atom);
- log_rdma_mr(INFO, "responder_resources=%d\n",
- sp->responder_resources);
-
memset(&conn_param, 0, sizeof(conn_param));
conn_param.initiator_depth = sp->initiator_depth;
conn_param.responder_resources = sp->responder_resources;
diff --git a/fs/smb/client/trace.c b/fs/smb/client/trace.c
index 465483787193..16b0e719731f 100644
--- a/fs/smb/client/trace.c
+++ b/fs/smb/client/trace.c
@@ -4,5 +4,6 @@
*
* Author(s): Steve French <stfrench@microsoft.com>
*/
+#include "cifsglob.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
diff --git a/fs/smb/common/smbdirect/smbdirect_socket.h b/fs/smb/common/smbdirect/smbdirect_socket.h
index 361db7f9f623..ee5a90d691c8 100644
--- a/fs/smb/common/smbdirect/smbdirect_socket.h
+++ b/fs/smb/common/smbdirect/smbdirect_socket.h
@@ -142,7 +142,15 @@ struct smbdirect_socket {
} mem;
/*
- * The credit state for the send side
+ * The local credit state for ib_post_send()
+ */
+ struct {
+ atomic_t count;
+ wait_queue_head_t wait_queue;
+ } lcredits;
+
+ /*
+ * The remote credit state for the send side
*/
struct {
atomic_t count;
@@ -337,6 +345,9 @@ static __always_inline void smbdirect_socket_init(struct smbdirect_socket *sc)
INIT_DELAYED_WORK(&sc->idle.timer_work, __smbdirect_socket_disabled_work);
disable_delayed_work_sync(&sc->idle.timer_work);
+ atomic_set(&sc->send_io.lcredits.count, 0);
+ init_waitqueue_head(&sc->send_io.lcredits.wait_queue);
+
atomic_set(&sc->send_io.credits.count, 0);
init_waitqueue_head(&sc->send_io.credits.wait_queue);
diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
index 46f87fd1ce1c..2c08cccfa680 100644
--- a/fs/smb/server/transport_ipc.c
+++ b/fs/smb/server/transport_ipc.c
@@ -263,10 +263,16 @@ static void ipc_msg_handle_free(int handle)
static int handle_response(int type, void *payload, size_t sz)
{
- unsigned int handle = *(unsigned int *)payload;
+ unsigned int handle;
struct ipc_msg_table_entry *entry;
int ret = 0;
+ /* Prevent 4-byte read beyond declared payload size */
+ if (sz < sizeof(unsigned int))
+ return -EINVAL;
+
+ handle = *(unsigned int *)payload;
+
ipc_update_last_active();
down_read(&ipc_msg_table_lock);
hash_for_each_possible(ipc_msg_table, entry, ipc_table_hlist, handle) {
diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
index a201c5871a77..7d86553fcc7c 100644
--- a/fs/smb/server/transport_rdma.c
+++ b/fs/smb/server/transport_rdma.c
@@ -219,6 +219,7 @@ static void smb_direct_disconnect_wake_up_all(struct smbdirect_socket *sc)
* in order to notice the broken connection.
*/
wake_up_all(&sc->status_wait);
+ wake_up_all(&sc->send_io.lcredits.wait_queue);
wake_up_all(&sc->send_io.credits.wait_queue);
wake_up_all(&sc->send_io.pending.zero_wait_queue);
wake_up_all(&sc->recv_io.reassembly.wait_queue);
@@ -417,9 +418,6 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
sc->ib.dev = sc->rdma.cm_id->device;
- INIT_WORK(&sc->recv_io.posted.refill_work,
- smb_direct_post_recv_credits);
- INIT_WORK(&sc->idle.immediate_work, smb_direct_send_immediate_work);
INIT_DELAYED_WORK(&sc->idle.timer_work, smb_direct_idle_connection_timer);
conn = ksmbd_conn_alloc();
@@ -450,11 +448,10 @@ static void free_transport(struct smb_direct_transport *t)
struct smbdirect_recv_io *recvmsg;
disable_work_sync(&sc->disconnect_work);
- if (sc->status < SMBDIRECT_SOCKET_DISCONNECTING) {
+ if (sc->status < SMBDIRECT_SOCKET_DISCONNECTING)
smb_direct_disconnect_rdma_work(&sc->disconnect_work);
- wait_event_interruptible(sc->status_wait,
- sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
- }
+ if (sc->status < SMBDIRECT_SOCKET_DISCONNECTED)
+ wait_event(sc->status_wait, sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
/*
* Wake up all waiters in all wait queues
@@ -469,9 +466,11 @@ static void free_transport(struct smb_direct_transport *t)
disable_delayed_work_sync(&sc->idle.timer_work);
disable_work_sync(&sc->idle.immediate_work);
+ if (sc->rdma.cm_id)
+ rdma_lock_handler(sc->rdma.cm_id);
+
if (sc->ib.qp) {
ib_drain_qp(sc->ib.qp);
- ib_mr_pool_destroy(sc->ib.qp, &sc->ib.qp->rdma_mrs);
sc->ib.qp = NULL;
rdma_destroy_qp(sc->rdma.cm_id);
}
@@ -498,8 +497,10 @@ static void free_transport(struct smb_direct_transport *t)
ib_free_cq(sc->ib.recv_cq);
if (sc->ib.pd)
ib_dealloc_pd(sc->ib.pd);
- if (sc->rdma.cm_id)
+ if (sc->rdma.cm_id) {
+ rdma_unlock_handler(sc->rdma.cm_id);
rdma_destroy_id(sc->rdma.cm_id);
+ }
smb_direct_destroy_pools(sc);
ksmbd_conn_free(KSMBD_TRANS(t)->conn);
@@ -524,6 +525,12 @@ static void smb_direct_free_sendmsg(struct smbdirect_socket *sc,
{
int i;
+ /*
+ * The list needs to be empty!
+ * The caller should take care of it.
+ */
+ WARN_ON_ONCE(!list_empty(&msg->sibling_list));
+
if (msg->num_sge > 0) {
ib_dma_unmap_single(sc->ib.dev,
msg->sge[0].addr, msg->sge[0].length,
@@ -909,9 +916,9 @@ static void smb_direct_post_recv_credits(struct work_struct *work)
static void send_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct smbdirect_send_io *sendmsg, *sibling;
+ struct smbdirect_send_io *sendmsg, *sibling, *next;
struct smbdirect_socket *sc;
- struct list_head *pos, *prev, *end;
+ int lcredits = 0;
sendmsg = container_of(wc->wr_cqe, struct smbdirect_send_io, cqe);
sc = sendmsg->socket;
@@ -920,27 +927,31 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
ib_wc_status_msg(wc->status), wc->status,
wc->opcode);
+ /*
+ * Free possible siblings and then the main send_io
+ */
+ list_for_each_entry_safe(sibling, next, &sendmsg->sibling_list, sibling_list) {
+ list_del_init(&sibling->sibling_list);
+ smb_direct_free_sendmsg(sc, sibling);
+ lcredits += 1;
+ }
+ /* Note this frees wc->wr_cqe, but not wc */
+ smb_direct_free_sendmsg(sc, sendmsg);
+ lcredits += 1;
+
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
pr_err("Send error. status='%s (%d)', opcode=%d\n",
ib_wc_status_msg(wc->status), wc->status,
wc->opcode);
smb_direct_disconnect_rdma_connection(sc);
+ return;
}
+ atomic_add(lcredits, &sc->send_io.lcredits.count);
+ wake_up(&sc->send_io.lcredits.wait_queue);
+
if (atomic_dec_and_test(&sc->send_io.pending.count))
wake_up(&sc->send_io.pending.zero_wait_queue);
-
- /* iterate and free the list of messages in reverse. the list's head
- * is invalid.
- */
- for (pos = &sendmsg->sibling_list, prev = pos->prev, end = sendmsg->sibling_list.next;
- prev != end; pos = prev, prev = prev->prev) {
- sibling = container_of(pos, struct smbdirect_send_io, sibling_list);
- smb_direct_free_sendmsg(sc, sibling);
- }
-
- sibling = container_of(pos, struct smbdirect_send_io, sibling_list);
- smb_direct_free_sendmsg(sc, sibling);
}
static int manage_credits_prior_sending(struct smbdirect_socket *sc)
@@ -988,8 +999,6 @@ static int smb_direct_post_send(struct smbdirect_socket *sc,
ret = ib_post_send(sc->ib.qp, wr, NULL);
if (ret) {
pr_err("failed to post send: %d\n", ret);
- if (atomic_dec_and_test(&sc->send_io.pending.count))
- wake_up(&sc->send_io.pending.zero_wait_queue);
smb_direct_disconnect_rdma_connection(sc);
}
return ret;
@@ -1032,19 +1041,29 @@ static int smb_direct_flush_send_list(struct smbdirect_socket *sc,
last->wr.send_flags = IB_SEND_SIGNALED;
last->wr.wr_cqe = &last->cqe;
+ /*
+ * Remove last from send_ctx->msg_list
+ * and splice the rest of send_ctx->msg_list
+ * to last->sibling_list.
+ *
+ * send_ctx->msg_list is a valid empty list
+ * at the end.
+ */
+ list_del_init(&last->sibling_list);
+ list_splice_tail_init(&send_ctx->msg_list, &last->sibling_list);
+ send_ctx->wr_cnt = 0;
+
ret = smb_direct_post_send(sc, &first->wr);
- if (!ret) {
- smb_direct_send_ctx_init(send_ctx,
- send_ctx->need_invalidate_rkey,
- send_ctx->remote_key);
- } else {
- atomic_add(send_ctx->wr_cnt, &sc->send_io.credits.count);
- wake_up(&sc->send_io.credits.wait_queue);
- list_for_each_entry_safe(first, last, &send_ctx->msg_list,
- sibling_list) {
- smb_direct_free_sendmsg(sc, first);
+ if (ret) {
+ struct smbdirect_send_io *sibling, *next;
+
+ list_for_each_entry_safe(sibling, next, &last->sibling_list, sibling_list) {
+ list_del_init(&sibling->sibling_list);
+ smb_direct_free_sendmsg(sc, sibling);
}
+ smb_direct_free_sendmsg(sc, last);
}
+
return ret;
}
@@ -1070,6 +1089,23 @@ static int wait_for_credits(struct smbdirect_socket *sc,
} while (true);
}
+static int wait_for_send_lcredit(struct smbdirect_socket *sc,
+ struct smbdirect_send_batch *send_ctx)
+{
+ if (send_ctx && (atomic_read(&sc->send_io.lcredits.count) <= 1)) {
+ int ret;
+
+ ret = smb_direct_flush_send_list(sc, send_ctx, false);
+ if (ret)
+ return ret;
+ }
+
+ return wait_for_credits(sc,
+ &sc->send_io.lcredits.wait_queue,
+ &sc->send_io.lcredits.count,
+ 1);
+}
+
static int wait_for_send_credits(struct smbdirect_socket *sc,
struct smbdirect_send_batch *send_ctx)
{
@@ -1257,9 +1293,13 @@ static int smb_direct_post_send_data(struct smbdirect_socket *sc,
int data_length;
struct scatterlist sg[SMBDIRECT_SEND_IO_MAX_SGE - 1];
+ ret = wait_for_send_lcredit(sc, send_ctx);
+ if (ret)
+ goto lcredit_failed;
+
ret = wait_for_send_credits(sc, send_ctx);
if (ret)
- return ret;
+ goto credit_failed;
data_length = 0;
for (i = 0; i < niov; i++)
@@ -1267,10 +1307,8 @@ static int smb_direct_post_send_data(struct smbdirect_socket *sc,
ret = smb_direct_create_header(sc, data_length, remaining_data_length,
&msg);
- if (ret) {
- atomic_inc(&sc->send_io.credits.count);
- return ret;
- }
+ if (ret)
+ goto header_failed;
for (i = 0; i < niov; i++) {
struct ib_sge *sge;
@@ -1308,7 +1346,11 @@ static int smb_direct_post_send_data(struct smbdirect_socket *sc,
return 0;
err:
smb_direct_free_sendmsg(sc, msg);
+header_failed:
atomic_inc(&sc->send_io.credits.count);
+credit_failed:
+ atomic_inc(&sc->send_io.lcredits.count);
+lcredit_failed:
return ret;
}
@@ -1687,10 +1729,10 @@ static int smb_direct_cm_handler(struct rdma_cm_id *cm_id,
}
case RDMA_CM_EVENT_DEVICE_REMOVAL:
case RDMA_CM_EVENT_DISCONNECTED: {
- ib_drain_qp(sc->ib.qp);
-
sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
smb_direct_disconnect_rdma_work(&sc->disconnect_work);
+ if (sc->ib.qp)
+ ib_drain_qp(sc->ib.qp);
break;
}
case RDMA_CM_EVENT_CONNECT_ERROR: {
@@ -1864,27 +1906,17 @@ static int smb_direct_prepare_negotiation(struct smbdirect_socket *sc)
goto out_err;
}
- smb_direct_post_recv_credits(&sc->recv_io.posted.refill_work);
return 0;
out_err:
put_recvmsg(sc, recvmsg);
return ret;
}
-static unsigned int smb_direct_get_max_fr_pages(struct smbdirect_socket *sc)
-{
- return min_t(unsigned int,
- sc->ib.dev->attrs.max_fast_reg_page_list_len,
- 256);
-}
-
-static int smb_direct_init_params(struct smbdirect_socket *sc,
- struct ib_qp_cap *cap)
+static int smb_direct_init_params(struct smbdirect_socket *sc)
{
struct smbdirect_socket_parameters *sp = &sc->parameters;
- struct ib_device *device = sc->ib.dev;
- int max_send_sges, max_rw_wrs, max_send_wrs;
- unsigned int max_sge_per_wr, wrs_per_credit;
+ int max_send_sges;
+ unsigned int maxpages;
/* need 3 more sge. because a SMB_DIRECT header, SMB2 header,
* SMB2 response could be mapped.
@@ -1895,67 +1927,20 @@ static int smb_direct_init_params(struct smbdirect_socket *sc,
return -EINVAL;
}
- /* Calculate the number of work requests for RDMA R/W.
- * The maximum number of pages which can be registered
- * with one Memory region can be transferred with one
- * R/W credit. And at least 4 work requests for each credit
- * are needed for MR registration, RDMA R/W, local & remote
- * MR invalidation.
- */
- sc->rw_io.credits.num_pages = smb_direct_get_max_fr_pages(sc);
- sc->rw_io.credits.max = DIV_ROUND_UP(sp->max_read_write_size,
- (sc->rw_io.credits.num_pages - 1) *
- PAGE_SIZE);
-
- max_sge_per_wr = min_t(unsigned int, device->attrs.max_send_sge,
- device->attrs.max_sge_rd);
- max_sge_per_wr = max_t(unsigned int, max_sge_per_wr,
- max_send_sges);
- wrs_per_credit = max_t(unsigned int, 4,
- DIV_ROUND_UP(sc->rw_io.credits.num_pages,
- max_sge_per_wr) + 1);
- max_rw_wrs = sc->rw_io.credits.max * wrs_per_credit;
-
- max_send_wrs = sp->send_credit_target + max_rw_wrs;
- if (max_send_wrs > device->attrs.max_cqe ||
- max_send_wrs > device->attrs.max_qp_wr) {
- pr_err("consider lowering send_credit_target = %d\n",
- sp->send_credit_target);
- pr_err("Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
- device->attrs.max_cqe, device->attrs.max_qp_wr);
- return -EINVAL;
- }
+ atomic_set(&sc->send_io.lcredits.count, sp->send_credit_target);
- if (sp->recv_credit_max > device->attrs.max_cqe ||
- sp->recv_credit_max > device->attrs.max_qp_wr) {
- pr_err("consider lowering receive_credit_max = %d\n",
- sp->recv_credit_max);
- pr_err("Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
- device->attrs.max_cqe, device->attrs.max_qp_wr);
- return -EINVAL;
- }
-
- if (device->attrs.max_send_sge < SMBDIRECT_SEND_IO_MAX_SGE) {
- pr_err("warning: device max_send_sge = %d too small\n",
- device->attrs.max_send_sge);
- return -EINVAL;
- }
- if (device->attrs.max_recv_sge < SMBDIRECT_RECV_IO_MAX_SGE) {
- pr_err("warning: device max_recv_sge = %d too small\n",
- device->attrs.max_recv_sge);
- return -EINVAL;
- }
+ maxpages = DIV_ROUND_UP(sp->max_read_write_size, PAGE_SIZE);
+ sc->rw_io.credits.max = rdma_rw_mr_factor(sc->ib.dev,
+ sc->rdma.cm_id->port_num,
+ maxpages);
+ sc->rw_io.credits.num_pages = DIV_ROUND_UP(maxpages, sc->rw_io.credits.max);
+ /* add one extra in order to handle unaligned pages */
+ sc->rw_io.credits.max += 1;
sc->recv_io.credits.target = 1;
atomic_set(&sc->rw_io.credits.count, sc->rw_io.credits.max);
- cap->max_send_wr = max_send_wrs;
- cap->max_recv_wr = sp->recv_credit_max;
- cap->max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE;
- cap->max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE;
- cap->max_inline_data = 0;
- cap->max_rdma_ctxs = sc->rw_io.credits.max;
return 0;
}
@@ -2029,13 +2014,129 @@ err:
return -ENOMEM;
}
-static int smb_direct_create_qpair(struct smbdirect_socket *sc,
- struct ib_qp_cap *cap)
+static u32 smb_direct_rdma_rw_send_wrs(struct ib_device *dev, const struct ib_qp_init_attr *attr)
+{
+ /*
+ * This could be split out of rdma_rw_init_qp()
+ * and be a helper function next to rdma_rw_mr_factor()
+ *
+ * We can't check unlikely(rdma_rw_force_mr) here,
+ * but that is most likely 0 anyway.
+ */
+ u32 factor;
+
+ WARN_ON_ONCE(attr->port_num == 0);
+
+ /*
+ * Each context needs at least one RDMA READ or WRITE WR.
+ *
+ * For some hardware we might need more, eventually we should ask the
+ * HCA driver for a multiplier here.
+ */
+ factor = 1;
+
+ /*
+ * If the device needs MRs to perform RDMA READ or WRITE operations,
+ * we'll need two additional MRs for the registrations and the
+ * invalidation.
+ */
+ if (rdma_protocol_iwarp(dev, attr->port_num) || dev->attrs.max_sgl_rd)
+ factor += 2; /* inv + reg */
+
+ return factor * attr->cap.max_rdma_ctxs;
+}
+
+static int smb_direct_create_qpair(struct smbdirect_socket *sc)
{
struct smbdirect_socket_parameters *sp = &sc->parameters;
int ret;
+ struct ib_qp_cap qp_cap;
struct ib_qp_init_attr qp_attr;
- int pages_per_rw;
+ u32 max_send_wr;
+ u32 rdma_send_wr;
+
+ /*
+ * Note that {rdma,ib}_create_qp() will call
+ * rdma_rw_init_qp() if cap->max_rdma_ctxs is not 0.
+ * It will adjust cap->max_send_wr to the required
+ * number of additional WRs for the RDMA RW operations.
+ * It will cap cap->max_send_wr to the device limit.
+ *
+ * +1 for ib_drain_qp
+ */
+ qp_cap.max_send_wr = sp->send_credit_target + 1;
+ qp_cap.max_recv_wr = sp->recv_credit_max + 1;
+ qp_cap.max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE;
+ qp_cap.max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE;
+ qp_cap.max_inline_data = 0;
+ qp_cap.max_rdma_ctxs = sc->rw_io.credits.max;
+
+ /*
+ * Find out the number of max_send_wr
+ * after rdma_rw_init_qp() adjusted it.
+ *
+ * We only do it on a temporary variable,
+ * as rdma_create_qp() will trigger
+ * rdma_rw_init_qp() again.
+ */
+ memset(&qp_attr, 0, sizeof(qp_attr));
+ qp_attr.cap = qp_cap;
+ qp_attr.port_num = sc->rdma.cm_id->port_num;
+ rdma_send_wr = smb_direct_rdma_rw_send_wrs(sc->ib.dev, &qp_attr);
+ max_send_wr = qp_cap.max_send_wr + rdma_send_wr;
+
+ if (qp_cap.max_send_wr > sc->ib.dev->attrs.max_cqe ||
+ qp_cap.max_send_wr > sc->ib.dev->attrs.max_qp_wr) {
+ pr_err("Possible CQE overrun: max_send_wr %d\n",
+ qp_cap.max_send_wr);
+ pr_err("device %.*s reporting max_cqe %d max_qp_wr %d\n",
+ IB_DEVICE_NAME_MAX,
+ sc->ib.dev->name,
+ sc->ib.dev->attrs.max_cqe,
+ sc->ib.dev->attrs.max_qp_wr);
+ pr_err("consider lowering send_credit_target = %d\n",
+ sp->send_credit_target);
+ return -EINVAL;
+ }
+
+ if (qp_cap.max_rdma_ctxs &&
+ (max_send_wr >= sc->ib.dev->attrs.max_cqe ||
+ max_send_wr >= sc->ib.dev->attrs.max_qp_wr)) {
+ pr_err("Possible CQE overrun: rdma_send_wr %d + max_send_wr %d = %d\n",
+ rdma_send_wr, qp_cap.max_send_wr, max_send_wr);
+ pr_err("device %.*s reporting max_cqe %d max_qp_wr %d\n",
+ IB_DEVICE_NAME_MAX,
+ sc->ib.dev->name,
+ sc->ib.dev->attrs.max_cqe,
+ sc->ib.dev->attrs.max_qp_wr);
+ pr_err("consider lowering send_credit_target = %d, max_rdma_ctxs = %d\n",
+ sp->send_credit_target, qp_cap.max_rdma_ctxs);
+ return -EINVAL;
+ }
+
+ if (qp_cap.max_recv_wr > sc->ib.dev->attrs.max_cqe ||
+ qp_cap.max_recv_wr > sc->ib.dev->attrs.max_qp_wr) {
+ pr_err("Possible CQE overrun: max_recv_wr %d\n",
+ qp_cap.max_recv_wr);
+ pr_err("device %.*s reporting max_cqe %d max_qp_wr %d\n",
+ IB_DEVICE_NAME_MAX,
+ sc->ib.dev->name,
+ sc->ib.dev->attrs.max_cqe,
+ sc->ib.dev->attrs.max_qp_wr);
+ pr_err("consider lowering receive_credit_max = %d\n",
+ sp->recv_credit_max);
+ return -EINVAL;
+ }
+
+ if (qp_cap.max_send_sge > sc->ib.dev->attrs.max_send_sge ||
+ qp_cap.max_recv_sge > sc->ib.dev->attrs.max_recv_sge) {
+ pr_err("device %.*s max_send_sge/max_recv_sge = %d/%d too small\n",
+ IB_DEVICE_NAME_MAX,
+ sc->ib.dev->name,
+ sc->ib.dev->attrs.max_send_sge,
+ sc->ib.dev->attrs.max_recv_sge);
+ return -EINVAL;
+ }
sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0);
if (IS_ERR(sc->ib.pd)) {
@@ -2046,8 +2147,7 @@ static int smb_direct_create_qpair(struct smbdirect_socket *sc,
}
sc->ib.send_cq = ib_alloc_cq_any(sc->ib.dev, sc,
- sp->send_credit_target +
- cap->max_rdma_ctxs,
+ max_send_wr,
IB_POLL_WORKQUEUE);
if (IS_ERR(sc->ib.send_cq)) {
pr_err("Can't create RDMA send CQ\n");
@@ -2057,7 +2157,7 @@ static int smb_direct_create_qpair(struct smbdirect_socket *sc,
}
sc->ib.recv_cq = ib_alloc_cq_any(sc->ib.dev, sc,
- sp->recv_credit_max,
+ qp_cap.max_recv_wr,
IB_POLL_WORKQUEUE);
if (IS_ERR(sc->ib.recv_cq)) {
pr_err("Can't create RDMA recv CQ\n");
@@ -2066,10 +2166,18 @@ static int smb_direct_create_qpair(struct smbdirect_socket *sc,
goto err;
}
+ /*
+ * We reset completely here!
+ * As the above use was just temporary
+ * to calc max_send_wr and rdma_send_wr.
+ *
+ * rdma_create_qp() will trigger rdma_rw_init_qp()
+ * again if max_rdma_ctxs is not 0.
+ */
memset(&qp_attr, 0, sizeof(qp_attr));
qp_attr.event_handler = smb_direct_qpair_handler;
qp_attr.qp_context = sc;
- qp_attr.cap = *cap;
+ qp_attr.cap = qp_cap;
qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
qp_attr.qp_type = IB_QPT_RC;
qp_attr.send_cq = sc->ib.send_cq;
@@ -2085,18 +2193,6 @@ static int smb_direct_create_qpair(struct smbdirect_socket *sc,
sc->ib.qp = sc->rdma.cm_id->qp;
sc->rdma.cm_id->event_handler = smb_direct_cm_handler;
- pages_per_rw = DIV_ROUND_UP(sp->max_read_write_size, PAGE_SIZE) + 1;
- if (pages_per_rw > sc->ib.dev->attrs.max_sgl_rd) {
- ret = ib_mr_pool_init(sc->ib.qp, &sc->ib.qp->rdma_mrs,
- sc->rw_io.credits.max, IB_MR_TYPE_MEM_REG,
- sc->rw_io.credits.num_pages, 0);
- if (ret) {
- pr_err("failed to init mr pool count %zu pages %zu\n",
- sc->rw_io.credits.max, sc->rw_io.credits.num_pages);
- goto err;
- }
- }
-
return 0;
err:
if (sc->ib.qp) {
@@ -2154,8 +2250,8 @@ static int smb_direct_prepare(struct ksmbd_transport *t)
return -ECONNABORTED;
ret = smb_direct_check_recvmsg(recvmsg);
- if (ret == -ECONNABORTED)
- goto out;
+ if (ret)
+ goto put;
req = (struct smbdirect_negotiate_req *)recvmsg->packet;
sp->max_recv_size = min_t(int, sp->max_recv_size,
@@ -2170,23 +2266,46 @@ static int smb_direct_prepare(struct ksmbd_transport *t)
sc->recv_io.credits.target = min_t(u16, sc->recv_io.credits.target, sp->recv_credit_max);
sc->recv_io.credits.target = max_t(u16, sc->recv_io.credits.target, 1);
- ret = smb_direct_send_negotiate_response(sc, ret);
-out:
+put:
spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags);
sc->recv_io.reassembly.queue_length--;
list_del(&recvmsg->list);
spin_unlock_irqrestore(&sc->recv_io.reassembly.lock, flags);
put_recvmsg(sc, recvmsg);
+ if (ret == -ECONNABORTED)
+ return ret;
+
+ if (ret)
+ goto respond;
+
+ /*
+ * We negotiated with success, so we need to refill the recv queue.
+ * We do that with sc->idle.immediate_work still being disabled
+ * via smbdirect_socket_init(), so that queue_work(sc->workqueue,
+ * &sc->idle.immediate_work) in smb_direct_post_recv_credits()
+ * is a no-op.
+ *
+ * The message that grants the credits to the client is
+ * the negotiate response.
+ */
+ INIT_WORK(&sc->recv_io.posted.refill_work, smb_direct_post_recv_credits);
+ smb_direct_post_recv_credits(&sc->recv_io.posted.refill_work);
+ if (unlikely(sc->first_error))
+ return sc->first_error;
+ INIT_WORK(&sc->idle.immediate_work, smb_direct_send_immediate_work);
+
+respond:
+ ret = smb_direct_send_negotiate_response(sc, ret);
+
return ret;
}
static int smb_direct_connect(struct smbdirect_socket *sc)
{
- struct ib_qp_cap qp_cap;
int ret;
- ret = smb_direct_init_params(sc, &qp_cap);
+ ret = smb_direct_init_params(sc);
if (ret) {
pr_err("Can't configure RDMA parameters\n");
return ret;
@@ -2198,7 +2317,7 @@ static int smb_direct_connect(struct smbdirect_socket *sc)
return ret;
}
- ret = smb_direct_create_qpair(sc, &qp_cap);
+ ret = smb_direct_create_qpair(sc);
if (ret) {
pr_err("Can't accept RDMA client: %d\n", ret);
return ret;
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 2d78e94072a0..e142bac4f9f8 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -498,17 +498,26 @@ int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
}
EXPORT_SYMBOL_GPL(compat_only_sysfs_link_entry_to_kobj);
-static int sysfs_group_attrs_change_owner(struct kernfs_node *grp_kn,
+static int sysfs_group_attrs_change_owner(struct kobject *kobj,
+ struct kernfs_node *grp_kn,
const struct attribute_group *grp,
struct iattr *newattrs)
{
struct kernfs_node *kn;
- int error;
+ int error, i;
+ umode_t mode;
if (grp->attrs) {
struct attribute *const *attr;
- for (attr = grp->attrs; *attr; attr++) {
+ for (i = 0, attr = grp->attrs; *attr; i++, attr++) {
+ if (grp->is_visible) {
+ mode = grp->is_visible(kobj, *attr, i);
+ if (mode & SYSFS_GROUP_INVISIBLE)
+ break;
+ if (!mode)
+ continue;
+ }
kn = kernfs_find_and_get(grp_kn, (*attr)->name);
if (!kn)
return -ENOENT;
@@ -523,7 +532,14 @@ static int sysfs_group_attrs_change_owner(struct kernfs_node *grp_kn,
if (grp->bin_attrs) {
const struct bin_attribute *const *bin_attr;
- for (bin_attr = grp->bin_attrs; *bin_attr; bin_attr++) {
+ for (i = 0, bin_attr = grp->bin_attrs; *bin_attr; i++, bin_attr++) {
+ if (grp->is_bin_visible) {
+ mode = grp->is_bin_visible(kobj, *bin_attr, i);
+ if (mode & SYSFS_GROUP_INVISIBLE)
+ break;
+ if (!mode)
+ continue;
+ }
kn = kernfs_find_and_get(grp_kn, (*bin_attr)->attr.name);
if (!kn)
return -ENOENT;
@@ -573,7 +589,7 @@ int sysfs_group_change_owner(struct kobject *kobj,
error = kernfs_setattr(grp_kn, &newattrs);
if (!error)
- error = sysfs_group_attrs_change_owner(grp_kn, grp, &newattrs);
+ error = sysfs_group_attrs_change_owner(kobj, grp_kn, grp, &newattrs);
kernfs_put(grp_kn);
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
index 8930d5254e1d..b99da294e9a3 100644
--- a/fs/xfs/Kconfig
+++ b/fs/xfs/Kconfig
@@ -119,6 +119,15 @@ config XFS_RT
See the xfs man page in section 5 for additional information.
+ This option is mandatory to support zoned block devices. For these
+ devices, the realtime subvolume must be backed by a zoned block
+ device and a regular block device used as the main device (for
+ metadata). If the zoned block device is a host-managed SMR hard-disk
+ containing conventional zones at the beginning of its address space,
+ XFS will use the disk conventional zones as the main device and the
+ remaining sequential write required zones as the backing storage for
+ the realtime subvolume.
+
If unsure, say N.
config XFS_DRAIN_INTENTS
@@ -156,7 +165,7 @@ config XFS_ONLINE_SCRUB_STATS
bool "XFS online metadata check usage data collection"
default y
depends on XFS_ONLINE_SCRUB
- select DEBUG_FS
+ depends on DEBUG_FS
help
If you say Y here, the kernel will gather usage data about
the online metadata check subsystem. This includes the number
diff --git a/fs/xfs/libxfs/xfs_rtgroup.h b/fs/xfs/libxfs/xfs_rtgroup.h
index d36a6ae0abe5..d4fcf591e63d 100644
--- a/fs/xfs/libxfs/xfs_rtgroup.h
+++ b/fs/xfs/libxfs/xfs_rtgroup.h
@@ -50,6 +50,12 @@ struct xfs_rtgroup {
uint8_t *rtg_rsum_cache;
struct xfs_open_zone *rtg_open_zone;
};
+
+ /*
+ * Count of outstanding GC operations for zoned XFS. Any RTG with a
+ * non-zero rtg_gccount will not be picked as new GC victim.
+ */
+ atomic_t rtg_gccount;
};
/*
diff --git a/fs/xfs/scrub/nlinks.c b/fs/xfs/scrub/nlinks.c
index 26721fab5cab..091c79e432e5 100644
--- a/fs/xfs/scrub/nlinks.c
+++ b/fs/xfs/scrub/nlinks.c
@@ -376,6 +376,36 @@ out_incomplete:
return error;
}
+static uint
+xchk_nlinks_ilock_dir(
+ struct xfs_inode *ip)
+{
+ uint lock_mode = XFS_ILOCK_SHARED;
+
+ /*
+ * We're going to scan the directory entries, so we must be ready to
+ * pull the data fork mappings into memory if they aren't already.
+ */
+ if (xfs_need_iread_extents(&ip->i_df))
+ lock_mode = XFS_ILOCK_EXCL;
+
+ /*
+ * We're going to scan the parent pointers, so we must be ready to
+ * pull the attr fork mappings into memory if they aren't already.
+ */
+ if (xfs_has_parent(ip->i_mount) && xfs_inode_has_attr_fork(ip) &&
+ xfs_need_iread_extents(&ip->i_af))
+ lock_mode = XFS_ILOCK_EXCL;
+
+ /*
+ * Take the IOLOCK so that other threads cannot start a directory
+ * update while we're scanning.
+ */
+ lock_mode |= XFS_IOLOCK_SHARED;
+ xfs_ilock(ip, lock_mode);
+ return lock_mode;
+}
+
/* Walk a directory to bump the observed link counts of the children. */
STATIC int
xchk_nlinks_collect_dir(
@@ -394,8 +424,7 @@ xchk_nlinks_collect_dir(
return 0;
/* Prevent anyone from changing this directory while we walk it. */
- xfs_ilock(dp, XFS_IOLOCK_SHARED);
- lock_mode = xfs_ilock_data_map_shared(dp);
+ lock_mode = xchk_nlinks_ilock_dir(dp);
/*
* The dotdot entry of an unlinked directory still points to the last
@@ -452,7 +481,6 @@ out_abort:
xchk_iscan_abort(&xnc->collect_iscan);
out_unlock:
xfs_iunlock(dp, lock_mode);
- xfs_iunlock(dp, XFS_IOLOCK_SHARED);
return error;
}
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 773d959965dc..47edf3041631 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1751,7 +1751,7 @@ xfs_init_buftarg(
const char *descr)
{
/* The maximum size of the buftarg is only known once the sb is read. */
- btp->bt_nr_sectors = (xfs_daddr_t)-1;
+ btp->bt_nr_sectors = XFS_BUF_DADDR_MAX;
/* Set up device logical sector size mask */
btp->bt_logical_sectorsize = logical_sectorsize;
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 8fa7bdf59c91..e25cd2a160f3 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -22,6 +22,7 @@ extern struct kmem_cache *xfs_buf_cache;
*/
struct xfs_buf;
+#define XFS_BUF_DADDR_MAX ((xfs_daddr_t) S64_MAX)
#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
#define XBF_READ (1u << 0) /* buffer intended for reading from device */
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index f046d1215b04..b871dfde372b 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -236,7 +236,6 @@ typedef struct xfs_mount {
bool m_update_sb; /* sb needs update in mount */
unsigned int m_max_open_zones;
unsigned int m_zonegc_low_space;
- struct xfs_mru_cache *m_zone_cache; /* Inode to open zone cache */
/* max_atomic_write mount option value */
unsigned long long m_awu_max_bytes;
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index e85a156dc17d..1067ebb3b001 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -102,7 +102,7 @@ static const struct constant_table dax_param_enums[] = {
* Table driven mount option parser.
*/
enum {
- Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
+ Op_deprecated, Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32,
@@ -114,7 +114,21 @@ enum {
Opt_lifetime, Opt_nolifetime, Opt_max_atomic_write,
};
+#define fsparam_dead(NAME) \
+ __fsparam(NULL, (NAME), Op_deprecated, fs_param_deprecated, NULL)
+
static const struct fs_parameter_spec xfs_fs_parameters[] = {
+ /*
+ * These mount options were supposed to be deprecated in September 2025
+ * but the deprecation warning was buggy, so not all users were
+ * notified. The deprecation is now obnoxiously loud and postponed to
+ * September 2030.
+ */
+ fsparam_dead("attr2"),
+ fsparam_dead("noattr2"),
+ fsparam_dead("ikeep"),
+ fsparam_dead("noikeep"),
+
fsparam_u32("logbufs", Opt_logbufs),
fsparam_string("logbsize", Opt_logbsize),
fsparam_string("logdev", Opt_logdev),
@@ -786,6 +800,12 @@ xfs_fs_evict_inode(
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
+
+ if (IS_ENABLED(CONFIG_XFS_RT) &&
+ S_ISREG(inode->i_mode) && inode->i_private) {
+ xfs_open_zone_put(inode->i_private);
+ inode->i_private = NULL;
+ }
}
static void
@@ -1373,16 +1393,25 @@ suffix_kstrtoull(
static inline void
xfs_fs_warn_deprecated(
struct fs_context *fc,
- struct fs_parameter *param,
- uint64_t flag,
- bool value)
+ struct fs_parameter *param)
{
- /* Don't print the warning if reconfiguring and current mount point
- * already had the flag set
+ /*
+ * Always warn about someone passing in a deprecated mount option.
+ * Previously we wouldn't print the warning if we were reconfiguring
+ * and current mount point already had the flag set, but that was not
+ * the right thing to do.
+ *
+ * Many distributions mount the root filesystem with no options in the
+ * initramfs and rely on mount -a to remount the root fs with the
+ * options in fstab. However, the old behavior meant that there would
+ * never be a warning about deprecated mount options for the root fs in
+ * /etc/fstab. On a single-fs system, that means no warning at all.
+ *
+ * Compounding this problem are distribution scripts that copy
+ * /proc/mounts to fstab, which means that we can't remove mount
+ * options unless we're 100% sure they have only ever been advertised
+ * in /proc/mounts in response to explicitly provided mount options.
*/
- if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
- !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
- return;
xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
}
@@ -1408,6 +1437,9 @@ xfs_fs_parse_param(
return opt;
switch (opt) {
+ case Op_deprecated:
+ xfs_fs_warn_deprecated(fc, param);
+ return 0;
case Opt_logbufs:
parsing_mp->m_logbufs = result.uint_32;
return 0;
@@ -1528,7 +1560,6 @@ xfs_fs_parse_param(
xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
return 0;
#endif
- /* Following mount options will be removed in September 2025 */
case Opt_max_open_zones:
parsing_mp->m_max_open_zones = result.uint_32;
return 0;
@@ -2221,7 +2252,7 @@ xfs_init_fs_context(
struct xfs_mount *mp;
int i;
- mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL | __GFP_NOFAIL);
+ mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
if (!mp)
return -ENOMEM;
diff --git a/fs/xfs/xfs_zone_alloc.c b/fs/xfs/xfs_zone_alloc.c
index 1147bacb2da8..040402240807 100644
--- a/fs/xfs/xfs_zone_alloc.c
+++ b/fs/xfs/xfs_zone_alloc.c
@@ -26,14 +26,22 @@
#include "xfs_trace.h"
#include "xfs_mru_cache.h"
+static void
+xfs_open_zone_free_rcu(
+ struct callback_head *cb)
+{
+ struct xfs_open_zone *oz = container_of(cb, typeof(*oz), oz_rcu);
+
+ xfs_rtgroup_rele(oz->oz_rtg);
+ kfree(oz);
+}
+
void
xfs_open_zone_put(
struct xfs_open_zone *oz)
{
- if (atomic_dec_and_test(&oz->oz_ref)) {
- xfs_rtgroup_rele(oz->oz_rtg);
- kfree(oz);
- }
+ if (atomic_dec_and_test(&oz->oz_ref))
+ call_rcu(&oz->oz_rcu, xfs_open_zone_free_rcu);
}
static inline uint32_t
@@ -238,6 +246,14 @@ xfs_zoned_map_extent(
* If a data write raced with this GC write, keep the existing data in
* the data fork, mark our newly written GC extent as reclaimable, then
* move on to the next extent.
+ *
+ * Note that this can also happen when racing with operations that do
+ * not actually invalidate the data, but just move it to a different
+ * inode (XFS_IOC_EXCHANGE_RANGE), or to a different offset inside the
+ * inode (FALLOC_FL_COLLAPSE_RANGE / FALLOC_FL_INSERT_RANGE). If the
+ * data was just moved around, GC fails to free the zone, but the zone
+ * becomes a GC candidate again as soon as all previous GC I/O has
+ * finished and these blocks will be moved out eventually.
*/
if (old_startblock != NULLFSBLOCK &&
old_startblock != data.br_startblock)
@@ -614,14 +630,25 @@ static inline enum rw_hint xfs_inode_write_hint(struct xfs_inode *ip)
}
/*
- * Try to pack inodes that are written back after they were closed tight instead
- * of trying to open new zones for them or spread them to the least recently
- * used zone. This optimizes the data layout for workloads that untar or copy
- * a lot of small files. Right now this does not separate multiple such
+ * Try to tightly pack small files that are written back after they were closed
+ * instead of trying to open new zones for them or spread them to the least
+ * recently used zone. This optimizes the data layout for workloads that untar
+ * or copy a lot of small files. Right now this does not separate multiple such
* streams.
*/
static inline bool xfs_zoned_pack_tight(struct xfs_inode *ip)
{
+ struct xfs_mount *mp = ip->i_mount;
+ size_t zone_capacity =
+ XFS_FSB_TO_B(mp, mp->m_groups[XG_TYPE_RTG].blocks);
+
+ /*
+ * Do not pack write files that are already using a full zone to avoid
+ * fragmentation.
+ */
+ if (i_size_read(VFS_I(ip)) >= zone_capacity)
+ return false;
+
return !inode_is_open_for_write(VFS_I(ip)) &&
!(ip->i_diflags & XFS_DIFLAG_APPEND);
}
@@ -746,97 +773,54 @@ xfs_mark_rtg_boundary(
}
/*
- * Cache the last zone written to for an inode so that it is considered first
- * for subsequent writes.
- */
-struct xfs_zone_cache_item {
- struct xfs_mru_cache_elem mru;
- struct xfs_open_zone *oz;
-};
-
-static inline struct xfs_zone_cache_item *
-xfs_zone_cache_item(struct xfs_mru_cache_elem *mru)
-{
- return container_of(mru, struct xfs_zone_cache_item, mru);
-}
-
-static void
-xfs_zone_cache_free_func(
- void *data,
- struct xfs_mru_cache_elem *mru)
-{
- struct xfs_zone_cache_item *item = xfs_zone_cache_item(mru);
-
- xfs_open_zone_put(item->oz);
- kfree(item);
-}
-
-/*
* Check if we have a cached last open zone available for the inode and
* if yes return a reference to it.
*/
static struct xfs_open_zone *
-xfs_cached_zone(
- struct xfs_mount *mp,
- struct xfs_inode *ip)
+xfs_get_cached_zone(
+ struct xfs_inode *ip)
{
- struct xfs_mru_cache_elem *mru;
- struct xfs_open_zone *oz;
+ struct xfs_open_zone *oz;
- mru = xfs_mru_cache_lookup(mp->m_zone_cache, ip->i_ino);
- if (!mru)
- return NULL;
- oz = xfs_zone_cache_item(mru)->oz;
+ rcu_read_lock();
+ oz = VFS_I(ip)->i_private;
if (oz) {
/*
* GC only steals open zones at mount time, so no GC zones
* should end up in the cache.
*/
ASSERT(!oz->oz_is_gc);
- ASSERT(atomic_read(&oz->oz_ref) > 0);
- atomic_inc(&oz->oz_ref);
+ if (!atomic_inc_not_zero(&oz->oz_ref))
+ oz = NULL;
}
- xfs_mru_cache_done(mp->m_zone_cache);
+ rcu_read_unlock();
+
return oz;
}
/*
- * Update the last used zone cache for a given inode.
+ * Stash our zone in the inode so that is is reused for future allocations.
*
- * The caller must have a reference on the open zone.
+ * The open_zone structure will be pinned until either the inode is freed or
+ * until the cached open zone is replaced with a different one because the
+ * current one was full when we tried to use it. This means we keep any
+ * open zone around forever as long as any inode that used it for the last
+ * write is cached, which slightly increases the memory use of cached inodes
+ * that were every written to, but significantly simplifies the cached zone
+ * lookup. Because the open_zone is clearly marked as full when all data
+ * in the underlying RTG was written, the caching is always safe.
*/
static void
-xfs_zone_cache_create_association(
- struct xfs_inode *ip,
- struct xfs_open_zone *oz)
+xfs_set_cached_zone(
+ struct xfs_inode *ip,
+ struct xfs_open_zone *oz)
{
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_zone_cache_item *item = NULL;
- struct xfs_mru_cache_elem *mru;
+ struct xfs_open_zone *old_oz;
- ASSERT(atomic_read(&oz->oz_ref) > 0);
atomic_inc(&oz->oz_ref);
-
- mru = xfs_mru_cache_lookup(mp->m_zone_cache, ip->i_ino);
- if (mru) {
- /*
- * If we have an association already, update it to point to the
- * new zone.
- */
- item = xfs_zone_cache_item(mru);
- xfs_open_zone_put(item->oz);
- item->oz = oz;
- xfs_mru_cache_done(mp->m_zone_cache);
- return;
- }
-
- item = kmalloc(sizeof(*item), GFP_KERNEL);
- if (!item) {
- xfs_open_zone_put(oz);
- return;
- }
- item->oz = oz;
- xfs_mru_cache_insert(mp->m_zone_cache, ip->i_ino, &item->mru);
+ old_oz = xchg(&VFS_I(ip)->i_private, oz);
+ if (old_oz)
+ xfs_open_zone_put(old_oz);
}
static void
@@ -880,15 +864,14 @@ xfs_zone_alloc_and_submit(
* the inode is still associated with a zone and use that if so.
*/
if (!*oz)
- *oz = xfs_cached_zone(mp, ip);
+ *oz = xfs_get_cached_zone(ip);
if (!*oz) {
select_zone:
*oz = xfs_select_zone(mp, write_hint, pack_tight);
if (!*oz)
goto out_error;
-
- xfs_zone_cache_create_association(ip, *oz);
+ xfs_set_cached_zone(ip, *oz);
}
alloc_len = xfs_zone_alloc_blocks(*oz, XFS_B_TO_FSB(mp, ioend->io_size),
@@ -966,6 +949,12 @@ xfs_free_open_zones(
xfs_open_zone_put(oz);
}
spin_unlock(&zi->zi_open_zones_lock);
+
+ /*
+ * Wait for all open zones to be freed so that they drop the group
+ * references:
+ */
+ rcu_barrier();
}
struct xfs_init_zones {
@@ -1279,14 +1268,6 @@ xfs_mount_zones(
error = xfs_zone_gc_mount(mp);
if (error)
goto out_free_zone_info;
-
- /*
- * Set up a mru cache to track inode to open zone for data placement
- * purposes. The magic values for group count and life time is the
- * same as the defaults for file streams, which seems sane enough.
- */
- xfs_mru_cache_create(&mp->m_zone_cache, mp,
- 5000, 10, xfs_zone_cache_free_func);
return 0;
out_free_zone_info:
@@ -1300,5 +1281,4 @@ xfs_unmount_zones(
{
xfs_zone_gc_unmount(mp);
xfs_free_zone_info(mp->m_zone_info);
- xfs_mru_cache_destroy(mp->m_zone_cache);
}
diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c
index 064cd1a857a0..4ade54445532 100644
--- a/fs/xfs/xfs_zone_gc.c
+++ b/fs/xfs/xfs_zone_gc.c
@@ -114,6 +114,8 @@ struct xfs_gc_bio {
/* Open Zone being written to */
struct xfs_open_zone *oz;
+ struct xfs_rtgroup *victim_rtg;
+
/* Bio used for reads and writes, including the bvec used by it */
struct bio_vec bv;
struct bio bio; /* must be last */
@@ -264,6 +266,7 @@ xfs_zone_gc_iter_init(
iter->rec_count = 0;
iter->rec_idx = 0;
iter->victim_rtg = victim_rtg;
+ atomic_inc(&victim_rtg->rtg_gccount);
}
/*
@@ -362,6 +365,7 @@ xfs_zone_gc_query(
return 0;
done:
+ atomic_dec(&iter->victim_rtg->rtg_gccount);
xfs_rtgroup_rele(iter->victim_rtg);
iter->victim_rtg = NULL;
return 0;
@@ -451,6 +455,20 @@ xfs_zone_gc_pick_victim_from(
if (!rtg)
continue;
+ /*
+ * If the zone is already undergoing GC, don't pick it again.
+ *
+ * This prevents us from picking one of the zones for which we
+ * already submitted GC I/O, but for which the remapping hasn't
+ * concluded yet. This won't cause data corruption, but
+ * increases write amplification and slows down GC, so this is
+ * a bad thing.
+ */
+ if (atomic_read(&rtg->rtg_gccount)) {
+ xfs_rtgroup_rele(rtg);
+ continue;
+ }
+
/* skip zones that are just waiting for a reset */
if (rtg_rmap(rtg)->i_used_blocks == 0 ||
rtg_rmap(rtg)->i_used_blocks >= victim_used) {
@@ -491,21 +509,6 @@ xfs_zone_gc_select_victim(
struct xfs_rtgroup *victim_rtg = NULL;
unsigned int bucket;
- if (xfs_is_shutdown(mp))
- return false;
-
- if (iter->victim_rtg)
- return true;
-
- /*
- * Don't start new work if we are asked to stop or park.
- */
- if (kthread_should_stop() || kthread_should_park())
- return false;
-
- if (!xfs_zoned_need_gc(mp))
- return false;
-
spin_lock(&zi->zi_used_buckets_lock);
for (bucket = 0; bucket < XFS_ZONE_USED_BUCKETS; bucket++) {
victim_rtg = xfs_zone_gc_pick_victim_from(mp, bucket);
@@ -703,6 +706,9 @@ xfs_zone_gc_start_chunk(
chunk->scratch = &data->scratch[data->scratch_idx];
chunk->data = data;
chunk->oz = oz;
+ chunk->victim_rtg = iter->victim_rtg;
+ atomic_inc(&chunk->victim_rtg->rtg_group.xg_active_ref);
+ atomic_inc(&chunk->victim_rtg->rtg_gccount);
bio->bi_iter.bi_sector = xfs_rtb_to_daddr(mp, chunk->old_startblock);
bio->bi_end_io = xfs_zone_gc_end_io;
@@ -725,6 +731,8 @@ static void
xfs_zone_gc_free_chunk(
struct xfs_gc_bio *chunk)
{
+ atomic_dec(&chunk->victim_rtg->rtg_gccount);
+ xfs_rtgroup_rele(chunk->victim_rtg);
list_del(&chunk->entry);
xfs_open_zone_put(chunk->oz);
xfs_irele(chunk->ip);
@@ -785,6 +793,10 @@ xfs_zone_gc_split_write(
split_chunk->oz = chunk->oz;
atomic_inc(&chunk->oz->oz_ref);
+ split_chunk->victim_rtg = chunk->victim_rtg;
+ atomic_inc(&chunk->victim_rtg->rtg_group.xg_active_ref);
+ atomic_inc(&chunk->victim_rtg->rtg_gccount);
+
chunk->offset += split_len;
chunk->len -= split_len;
chunk->old_startblock += XFS_B_TO_FSB(data->mp, split_len);
@@ -975,6 +987,27 @@ xfs_zone_gc_reset_zones(
} while (next);
}
+static bool
+xfs_zone_gc_should_start_new_work(
+ struct xfs_zone_gc_data *data)
+{
+ if (xfs_is_shutdown(data->mp))
+ return false;
+ if (!xfs_zone_gc_space_available(data))
+ return false;
+
+ if (!data->iter.victim_rtg) {
+ if (kthread_should_stop() || kthread_should_park())
+ return false;
+ if (!xfs_zoned_need_gc(data->mp))
+ return false;
+ if (!xfs_zone_gc_select_victim(data))
+ return false;
+ }
+
+ return true;
+}
+
/*
* Handle the work to read and write data for GC and to reset the zones,
* including handling all completions.
@@ -982,7 +1015,7 @@ xfs_zone_gc_reset_zones(
* Note that the order of the chunks is preserved so that we don't undo the
* optimal order established by xfs_zone_gc_query().
*/
-static bool
+static void
xfs_zone_gc_handle_work(
struct xfs_zone_gc_data *data)
{
@@ -996,30 +1029,22 @@ xfs_zone_gc_handle_work(
zi->zi_reset_list = NULL;
spin_unlock(&zi->zi_reset_list_lock);
- if (!xfs_zone_gc_select_victim(data) ||
- !xfs_zone_gc_space_available(data)) {
- if (list_empty(&data->reading) &&
- list_empty(&data->writing) &&
- list_empty(&data->resetting) &&
- !reset_list)
- return false;
- }
-
- __set_current_state(TASK_RUNNING);
- try_to_freeze();
-
- if (reset_list)
+ if (reset_list) {
+ set_current_state(TASK_RUNNING);
xfs_zone_gc_reset_zones(data, reset_list);
+ }
list_for_each_entry_safe(chunk, next, &data->resetting, entry) {
if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE)
break;
+ set_current_state(TASK_RUNNING);
xfs_zone_gc_finish_reset(chunk);
}
list_for_each_entry_safe(chunk, next, &data->writing, entry) {
if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE)
break;
+ set_current_state(TASK_RUNNING);
xfs_zone_gc_finish_chunk(chunk);
}
@@ -1027,15 +1052,18 @@ xfs_zone_gc_handle_work(
list_for_each_entry_safe(chunk, next, &data->reading, entry) {
if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE)
break;
+ set_current_state(TASK_RUNNING);
xfs_zone_gc_write_chunk(chunk);
}
blk_finish_plug(&plug);
- blk_start_plug(&plug);
- while (xfs_zone_gc_start_chunk(data))
- ;
- blk_finish_plug(&plug);
- return true;
+ if (xfs_zone_gc_should_start_new_work(data)) {
+ set_current_state(TASK_RUNNING);
+ blk_start_plug(&plug);
+ while (xfs_zone_gc_start_chunk(data))
+ ;
+ blk_finish_plug(&plug);
+ }
}
/*
@@ -1059,8 +1087,18 @@ xfs_zoned_gcd(
for (;;) {
set_current_state(TASK_INTERRUPTIBLE | TASK_FREEZABLE);
xfs_set_zonegc_running(mp);
- if (xfs_zone_gc_handle_work(data))
+
+ xfs_zone_gc_handle_work(data);
+
+ /*
+ * Only sleep if nothing set the state to running. Else check for
+ * work again as someone might have queued up more work and woken
+ * us in the meantime.
+ */
+ if (get_current_state() == TASK_RUNNING) {
+ try_to_freeze();
continue;
+ }
if (list_empty(&data->reading) &&
list_empty(&data->writing) &&
diff --git a/fs/xfs/xfs_zone_priv.h b/fs/xfs/xfs_zone_priv.h
index 35e6de3d25ed..4322e26dd99a 100644
--- a/fs/xfs/xfs_zone_priv.h
+++ b/fs/xfs/xfs_zone_priv.h
@@ -44,6 +44,8 @@ struct xfs_open_zone {
* the life time of an open zone.
*/
struct xfs_rtgroup *oz_rtg;
+
+ struct rcu_head oz_rcu;
};
/*
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8a9a2e732a65..e04d56a5332e 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -832,7 +832,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
/* Required sections not related to debugging. */
#define ELF_DETAILS \
- .modinfo : { *(.modinfo) } \
+ .modinfo : { *(.modinfo) . = ALIGN(8); } \
.comment 0 : { *(.comment) } \
.symtab 0 : { *(.symtab) } \
.strtab 0 : { *(.strtab) } \
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index cd7ee4df9045..81e603839c4a 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -338,6 +338,7 @@ struct ffa_mem_region_attributes {
* an `struct ffa_mem_region_addr_range`.
*/
u32 composite_off;
+ u8 impdef_val[16];
u64 reserved;
};
@@ -417,15 +418,31 @@ struct ffa_mem_region {
#define CONSTITUENTS_OFFSET(x) \
(offsetof(struct ffa_composite_mem_region, constituents[x]))
+#define FFA_EMAD_HAS_IMPDEF_FIELD(version) ((version) >= FFA_VERSION_1_2)
+#define FFA_MEM_REGION_HAS_EP_MEM_OFFSET(version) ((version) > FFA_VERSION_1_0)
+
+static inline u32 ffa_emad_size_get(u32 ffa_version)
+{
+ u32 sz;
+ struct ffa_mem_region_attributes *ep_mem_access;
+
+ if (FFA_EMAD_HAS_IMPDEF_FIELD(ffa_version))
+ sz = sizeof(*ep_mem_access);
+ else
+ sz = sizeof(*ep_mem_access) - sizeof(ep_mem_access->impdef_val);
+
+ return sz;
+}
+
static inline u32
ffa_mem_desc_offset(struct ffa_mem_region *buf, int count, u32 ffa_version)
{
- u32 offset = count * sizeof(struct ffa_mem_region_attributes);
+ u32 offset = count * ffa_emad_size_get(ffa_version);
/*
* Earlier to v1.1, the endpoint memory descriptor array started at
* offset 32(i.e. offset of ep_mem_offset in the current structure)
*/
- if (ffa_version <= FFA_VERSION_1_0)
+ if (!FFA_MEM_REGION_HAS_EP_MEM_OFFSET(ffa_version))
offset += offsetof(struct ffa_mem_region, ep_mem_offset);
else
offset += sizeof(struct ffa_mem_region);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 8e8d1cc8b06c..44c30183ecc3 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -341,15 +341,15 @@ enum req_op {
/* write the zero filled sector many times */
REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9,
/* Open a zone */
- REQ_OP_ZONE_OPEN = (__force blk_opf_t)10,
+ REQ_OP_ZONE_OPEN = (__force blk_opf_t)11,
/* Close a zone */
- REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11,
+ REQ_OP_ZONE_CLOSE = (__force blk_opf_t)13,
/* Transition a zone to full */
- REQ_OP_ZONE_FINISH = (__force blk_opf_t)13,
+ REQ_OP_ZONE_FINISH = (__force blk_opf_t)15,
/* reset a zone write pointer */
- REQ_OP_ZONE_RESET = (__force blk_opf_t)15,
+ REQ_OP_ZONE_RESET = (__force blk_opf_t)17,
/* reset all the zone present on the device */
- REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17,
+ REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)19,
/* Driver private requests */
REQ_OP_DRV_IN = (__force blk_opf_t)34,
@@ -478,6 +478,7 @@ static inline bool op_is_zone_mgmt(enum req_op op)
{
switch (op & REQ_OP_MASK) {
case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_RESET_ALL:
case REQ_OP_ZONE_OPEN:
case REQ_OP_ZONE_CLOSE:
case REQ_OP_ZONE_FINISH:
diff --git a/include/linux/fbcon.h b/include/linux/fbcon.h
index 81f0e698acbf..f206370060e1 100644
--- a/include/linux/fbcon.h
+++ b/include/linux/fbcon.h
@@ -18,6 +18,7 @@ void fbcon_suspended(struct fb_info *info);
void fbcon_resumed(struct fb_info *info);
int fbcon_mode_deleted(struct fb_info *info,
struct fb_videomode *mode);
+void fbcon_delete_modelist(struct list_head *head);
void fbcon_new_modelist(struct fb_info *info);
void fbcon_get_requirement(struct fb_info *info,
struct fb_blit_caps *caps);
@@ -38,6 +39,7 @@ static inline void fbcon_suspended(struct fb_info *info) {}
static inline void fbcon_resumed(struct fb_info *info) {}
static inline int fbcon_mode_deleted(struct fb_info *info,
struct fb_videomode *mode) { return 0; }
+static inline void fbcon_delete_modelist(struct list_head *head) {}
static inline void fbcon_new_modelist(struct fb_info *info) {}
static inline void fbcon_get_requirement(struct fb_info *info,
struct fb_blit_caps *caps) {}
diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h
index 622a2939ebe0..87983a5f3681 100644
--- a/include/linux/gpio/regmap.h
+++ b/include/linux/gpio/regmap.h
@@ -38,6 +38,10 @@ struct regmap;
* offset to a register/bitmask pair. If not
* given the default gpio_regmap_simple_xlate()
* is used.
+ * @fixed_direction_output:
+ * (Optional) Bitmap representing the fixed direction of
+ * the GPIO lines. Useful when there are GPIO lines with a
+ * fixed direction mixed together in the same register.
* @drvdata: (Optional) Pointer to driver specific data which is
* not used by gpio-remap but is provided "as is" to the
* driver callback(s).
@@ -85,6 +89,7 @@ struct gpio_regmap_config {
int reg_stride;
int ngpio_per_reg;
struct irq_domain *irq_domain;
+ unsigned long *fixed_direction_output;
#ifdef CONFIG_REGMAP_IRQ
struct regmap_irq_chip *regmap_irq_chip;
diff --git a/include/linux/hung_task.h b/include/linux/hung_task.h
index 34e615c76ca5..c4403eeb7144 100644
--- a/include/linux/hung_task.h
+++ b/include/linux/hung_task.h
@@ -20,6 +20,10 @@
* always zero. So we can use these bits to encode the specific blocking
* type.
*
+ * Note that on architectures where this is not guaranteed, or for any
+ * unaligned lock, this tracking mechanism is silently skipped for that
+ * lock.
+ *
* Type encoding:
* 00 - Blocked on mutex (BLOCKER_TYPE_MUTEX)
* 01 - Blocked on semaphore (BLOCKER_TYPE_SEM)
@@ -45,7 +49,7 @@ static inline void hung_task_set_blocker(void *lock, unsigned long type)
* If the lock pointer matches the BLOCKER_TYPE_MASK, return
* without writing anything.
*/
- if (WARN_ON_ONCE(lock_ptr & BLOCKER_TYPE_MASK))
+ if (lock_ptr & BLOCKER_TYPE_MASK)
return;
WRITE_ONCE(current->blocker, lock_ptr | type);
@@ -53,8 +57,6 @@ static inline void hung_task_set_blocker(void *lock, unsigned long type)
static inline void hung_task_clear_blocker(void)
{
- WARN_ON_ONCE(!READ_ONCE(current->blocker));
-
WRITE_ONCE(current->blocker, 0UL);
}
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 07614cd95bed..1b0b36aa2a76 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -10833,7 +10833,9 @@ struct mlx5_ifc_pcam_regs_5000_to_507f_bits {
u8 port_access_reg_cap_mask_127_to_96[0x20];
u8 port_access_reg_cap_mask_95_to_64[0x20];
- u8 port_access_reg_cap_mask_63_to_36[0x1c];
+ u8 port_access_reg_cap_mask_63[0x1];
+ u8 pphcr[0x1];
+ u8 port_access_reg_cap_mask_61_to_36[0x1a];
u8 pplm[0x1];
u8 port_access_reg_cap_mask_34_to_32[0x3];
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index a3f44f6c2da1..0b436e15f4cd 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -629,13 +629,13 @@ DEFINE_GUARD(pm_runtime_active_auto, struct device *,
* device.
*/
DEFINE_GUARD_COND(pm_runtime_active, _try,
- pm_runtime_get_active(_T, RPM_TRANSPARENT))
+ pm_runtime_get_active(_T, RPM_TRANSPARENT), _RET == 0)
DEFINE_GUARD_COND(pm_runtime_active, _try_enabled,
- pm_runtime_resume_and_get(_T))
+ pm_runtime_resume_and_get(_T), _RET == 0)
DEFINE_GUARD_COND(pm_runtime_active_auto, _try,
- pm_runtime_get_active(_T, RPM_TRANSPARENT))
+ pm_runtime_get_active(_T, RPM_TRANSPARENT), _RET == 0)
DEFINE_GUARD_COND(pm_runtime_active_auto, _try_enabled,
- pm_runtime_resume_and_get(_T))
+ pm_runtime_resume_and_get(_T), _RET == 0)
/**
* pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0.
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 4e1ac1fbcec4..55343795644b 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -1643,7 +1643,7 @@ struct regmap_irq_chip_data;
* @status_invert: Inverted status register: cleared bits are active interrupts.
* @status_is_level: Status register is actuall signal level: Xor status
* register with previous value to get active interrupts.
- * @wake_invert: Inverted wake register: cleared bits are wake enabled.
+ * @wake_invert: Inverted wake register: cleared bits are wake disabled.
* @type_in_mask: Use the mask registers for controlling irq type. Use this if
* the hardware provides separate bits for rising/falling edge
* or low/high level interrupts and they should be combined into
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index fb3fec9affaa..a7cc3d1f4fd1 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -4204,6 +4204,9 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk,
struct sk_buff_head *sk_queue,
unsigned int flags, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, int *err);
+__poll_t datagram_poll_queue(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait,
+ struct sk_buff_head *rcv_queue);
__poll_t datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 20e0584db1dd..4d1780848d0e 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -401,6 +401,10 @@ virtio_net_hdr_tnl_from_skb(const struct sk_buff *skb,
if (!tnl_hdr_negotiated)
return -EINVAL;
+ vhdr->hash_hdr.hash_value = 0;
+ vhdr->hash_hdr.hash_report = 0;
+ vhdr->hash_hdr.padding = 0;
+
/* Let the basic parsing deal with plain GSO features. */
skb_shinfo(skb)->gso_type &= ~tnl_gso_type;
ret = virtio_net_hdr_from_skb(skb, hdr, true, false, vlan_hlen);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 9ecc70baaca9..8d0e703bc929 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -434,6 +434,7 @@ enum {
HCI_USER_CHANNEL,
HCI_EXT_CONFIGURED,
HCI_LE_ADV,
+ HCI_LE_ADV_0,
HCI_LE_PER_ADV,
HCI_LE_SCAN,
HCI_SSP_ENABLED,
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 2924c2bf2a98..b8100dbfe5d7 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -244,6 +244,7 @@ struct adv_info {
bool enabled;
bool pending;
bool periodic;
+ bool periodic_enabled;
__u8 mesh;
__u8 instance;
__u8 handle;
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 4bb0eaedda18..00e182a22720 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -38,8 +38,8 @@
#define L2CAP_DEFAULT_TX_WINDOW 63
#define L2CAP_DEFAULT_EXT_WINDOW 0x3FFF
#define L2CAP_DEFAULT_MAX_TX 3
-#define L2CAP_DEFAULT_RETRANS_TO 2 /* seconds */
-#define L2CAP_DEFAULT_MONITOR_TO 12 /* seconds */
+#define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */
+#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */
#define L2CAP_DEFAULT_MAX_PDU_SIZE 1492 /* Sized for AMP packet */
#define L2CAP_DEFAULT_ACK_TO 200
#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 74edea06985b..bca0333f1e99 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -853,7 +853,7 @@ struct mgmt_cp_set_mesh {
__le16 window;
__le16 period;
__u8 num_ad_types;
- __u8 ad_types[];
+ __u8 ad_types[] __counted_by(num_ad_types);
} __packed;
#define MGMT_SET_MESH_RECEIVER_SIZE 6
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 5ca230ed526a..ab20f549b8f9 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -370,7 +370,7 @@ void tcp_delack_timer_handler(struct sock *sk);
int tcp_ioctl(struct sock *sk, int cmd, int *karg);
enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
-void tcp_rcvbuf_grow(struct sock *sk);
+void tcp_rcvbuf_grow(struct sock *sk, u32 newval);
void tcp_rcv_space_adjust(struct sock *sk);
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
void tcp_twsk_destructor(struct sock *sk);
diff --git a/include/net/tls.h b/include/net/tls.h
index 857340338b69..c7bcdb3afad7 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -451,25 +451,26 @@ static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
/* Log all TLS record header TCP sequences in [seq, seq+len] */
static inline void
-tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
+tls_offload_rx_resync_async_request_start(struct tls_offload_resync_async *resync_async,
+ __be32 seq, u16 len)
{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
-
- atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
+ atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) |
((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
- rx_ctx->resync_async->loglen = 0;
- rx_ctx->resync_async->rcd_delta = 0;
+ resync_async->loglen = 0;
+ resync_async->rcd_delta = 0;
}
static inline void
-tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
+tls_offload_rx_resync_async_request_end(struct tls_offload_resync_async *resync_async,
+ __be32 seq)
{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
+ atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
+}
- atomic64_set(&rx_ctx->resync_async->req,
- ((u64)ntohl(seq) << 32) | RESYNC_REQ);
+static inline void
+tls_offload_rx_resync_async_request_cancel(struct tls_offload_resync_async *resync_async)
+{
+ atomic64_set(&resync_async->req, 0);
}
static inline void
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 6d6500148c4b..993008cdea65 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -252,8 +252,8 @@ struct scsi_device {
unsigned int queue_stopped; /* request queue is quiesced */
bool offline_already; /* Device offline message logged */
- unsigned int ua_new_media_ctr; /* Counter for New Media UNIT ATTENTIONs */
- unsigned int ua_por_ctr; /* Counter for Power On / Reset UAs */
+ atomic_t ua_new_media_ctr; /* Counter for New Media UNIT ATTENTIONs */
+ atomic_t ua_por_ctr; /* Counter for Power On / Reset UAs */
atomic_t disk_events_disable_depth; /* disable depth for disk events */
@@ -693,10 +693,8 @@ static inline int scsi_device_busy(struct scsi_device *sdev)
}
/* Macros to access the UNIT ATTENTION counters */
-#define scsi_get_ua_new_media_ctr(sdev) \
- ((const unsigned int)(sdev->ua_new_media_ctr))
-#define scsi_get_ua_por_ctr(sdev) \
- ((const unsigned int)(sdev->ua_por_ctr))
+#define scsi_get_ua_new_media_ctr(sdev) atomic_read(&sdev->ua_new_media_ctr)
+#define scsi_get_ua_por_ctr(sdev) atomic_read(&sdev->ua_por_ctr)
#define MODULE_ALIAS_SCSI_DEVICE(type) \
MODULE_ALIAS("scsi:t-" __stringify(type) "*")
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 9d2c36c6a0ed..6757233bd064 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -218,6 +218,9 @@ TRACE_EVENT(tcp_rcvbuf_grow,
__field(__u32, space)
__field(__u32, ooo_space)
__field(__u32, rcvbuf)
+ __field(__u32, rcv_ssthresh)
+ __field(__u32, window_clamp)
+ __field(__u32, rcv_wnd)
__field(__u8, scaling_ratio)
__field(__u16, sport)
__field(__u16, dport)
@@ -245,6 +248,9 @@ TRACE_EVENT(tcp_rcvbuf_grow,
tp->rcv_nxt;
__entry->rcvbuf = sk->sk_rcvbuf;
+ __entry->rcv_ssthresh = tp->rcv_ssthresh;
+ __entry->window_clamp = tp->window_clamp;
+ __entry->rcv_wnd = tp->rcv_wnd;
__entry->scaling_ratio = tp->scaling_ratio;
__entry->sport = ntohs(inet->inet_sport);
__entry->dport = ntohs(inet->inet_dport);
@@ -264,11 +270,14 @@ TRACE_EVENT(tcp_rcvbuf_grow,
),
TP_printk("time=%u rtt_us=%u copied=%u inq=%u space=%u ooo=%u scaling_ratio=%u rcvbuf=%u "
+ "rcv_ssthresh=%u window_clamp=%u rcv_wnd=%u "
"family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 "
"saddrv6=%pI6c daddrv6=%pI6c skaddr=%p sock_cookie=%llx",
__entry->time, __entry->rtt_us, __entry->copied,
__entry->inq, __entry->space, __entry->ooo_space,
__entry->scaling_ratio, __entry->rcvbuf,
+ __entry->rcv_ssthresh, __entry->window_clamp,
+ __entry->rcv_wnd,
show_family_name(__entry->family),
__entry->sport, __entry->dport,
__entry->saddr, __entry->daddr,
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index 40ff19f52a8d..517489a7ec60 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -1013,6 +1013,20 @@ struct drm_xe_vm_destroy {
* valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
* mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
* handle MBZ, and the BO offset MBZ.
+ * - %DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET - Can be used in combination with
+ * %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR to reset madvises when the underlying
+ * CPU address space range is unmapped (typically with munmap(2) or brk(2)).
+ * The madvise values set with &DRM_IOCTL_XE_MADVISE are reset to the values
+ * that were present immediately after the &DRM_IOCTL_XE_VM_BIND.
+ * The reset GPU virtual address range is the intersection of the range bound
+ * using &DRM_IOCTL_XE_VM_BIND and the virtual CPU address space range
+ * unmapped.
+ * This functionality is present to mimic the behaviour of CPU address space
+ * madvises set using madvise(2), which are typically reset on unmap.
+ * Note: free(3) may or may not call munmap(2) and/or brk(2), and may thus
+ * not invoke autoreset. Neither will stack variables going out of scope.
+ * Therefore it's recommended to always explicitly reset the madvises when
+ * freeing the memory backing a region used in a &DRM_IOCTL_XE_MADVISE call.
*
* The @prefetch_mem_region_instance for %DRM_XE_VM_BIND_OP_PREFETCH can also be:
* - %DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, which ensures prefetching occurs in
@@ -1119,6 +1133,7 @@ struct drm_xe_vm_bind_op {
#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
#define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4)
#define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR (1 << 5)
+#define DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET (1 << 6)
/** @flags: Bind flags */
__u32 flags;
diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h
index cde8f173f566..22acaaec7b1c 100644
--- a/include/uapi/linux/fb.h
+++ b/include/uapi/linux/fb.h
@@ -319,7 +319,7 @@ enum {
#define FB_VBLANK_HAVE_VCOUNT 0x020 /* the vcount field is valid */
#define FB_VBLANK_HAVE_HCOUNT 0x040 /* the hcount field is valid */
#define FB_VBLANK_VSYNCING 0x080 /* currently in a vsync */
-#define FB_VBLANK_HAVE_VSYNC 0x100 /* verical syncs can be detected */
+#define FB_VBLANK_HAVE_VSYNC 0x100 /* vertical syncs can be detected */
struct fb_vblank {
__u32 flags; /* FB_VBLANK flags */
diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
index ff3364531c77..294c75a8a3bd 100644
--- a/io_uring/fdinfo.c
+++ b/io_uring/fdinfo.c
@@ -59,7 +59,6 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
{
struct io_overflow_cqe *ocqe;
struct io_rings *r = ctx->rings;
- struct rusage sq_usage;
unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1;
unsigned int sq_head = READ_ONCE(r->sq.head);
unsigned int sq_tail = READ_ONCE(r->sq.tail);
@@ -152,14 +151,15 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
* thread termination.
*/
if (tsk) {
+ u64 usec;
+
get_task_struct(tsk);
rcu_read_unlock();
- getrusage(tsk, RUSAGE_SELF, &sq_usage);
+ usec = io_sq_cpu_usec(tsk);
put_task_struct(tsk);
sq_pid = sq->task_pid;
sq_cpu = sq->sq_cpu;
- sq_total_time = (sq_usage.ru_stime.tv_sec * 1000000
- + sq_usage.ru_stime.tv_usec);
+ sq_total_time = usec;
sq_work_time = sq->work_time;
} else {
rcu_read_unlock();
diff --git a/io_uring/filetable.c b/io_uring/filetable.c
index a21660e3145a..794ef95df293 100644
--- a/io_uring/filetable.c
+++ b/io_uring/filetable.c
@@ -57,7 +57,7 @@ void io_free_file_tables(struct io_ring_ctx *ctx, struct io_file_table *table)
static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
u32 slot_index)
- __must_hold(&req->ctx->uring_lock)
+ __must_hold(&ctx->uring_lock)
{
struct io_rsrc_node *node;
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 820ef0527666..296667ba712c 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -879,7 +879,7 @@ static inline struct io_cqe io_init_cqe(u64 user_data, s32 res, u32 cflags)
}
static __cold void io_cqe_overflow(struct io_ring_ctx *ctx, struct io_cqe *cqe,
- struct io_big_cqe *big_cqe)
+ struct io_big_cqe *big_cqe)
{
struct io_overflow_cqe *ocqe;
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index aad655e38672..a727e020fe03 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -155,6 +155,27 @@ static int io_provided_buffers_select(struct io_kiocb *req, size_t *len,
return 1;
}
+static bool io_should_commit(struct io_kiocb *req, unsigned int issue_flags)
+{
+ /*
+ * If we came in unlocked, we have no choice but to consume the
+ * buffer here, otherwise nothing ensures that the buffer won't
+ * get used by others. This does mean it'll be pinned until the
+ * IO completes, coming in unlocked means we're being called from
+ * io-wq context and there may be further retries in async hybrid
+ * mode. For the locked case, the caller must call commit when
+ * the transfer completes (or if we get -EAGAIN and must poll of
+ * retry).
+ */
+ if (issue_flags & IO_URING_F_UNLOCKED)
+ return true;
+
+ /* uring_cmd commits kbuf upfront, no need to auto-commit */
+ if (!io_file_can_poll(req) && req->opcode != IORING_OP_URING_CMD)
+ return true;
+ return false;
+}
+
static struct io_br_sel io_ring_buffer_select(struct io_kiocb *req, size_t *len,
struct io_buffer_list *bl,
unsigned int issue_flags)
@@ -181,17 +202,7 @@ static struct io_br_sel io_ring_buffer_select(struct io_kiocb *req, size_t *len,
sel.buf_list = bl;
sel.addr = u64_to_user_ptr(buf->addr);
- if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
- /*
- * If we came in unlocked, we have no choice but to consume the
- * buffer here, otherwise nothing ensures that the buffer won't
- * get used by others. This does mean it'll be pinned until the
- * IO completes, coming in unlocked means we're being called from
- * io-wq context and there may be further retries in async hybrid
- * mode. For the locked case, the caller must call commit when
- * the transfer completes (or if we get -EAGAIN and must poll of
- * retry).
- */
+ if (io_should_commit(req, issue_flags)) {
io_kbuf_commit(req, sel.buf_list, *len, 1);
sel.buf_list = NULL;
}
diff --git a/io_uring/net.c b/io_uring/net.c
index f99b90c762fc..a95cc9ca2a4d 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -383,7 +383,7 @@ static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
if (sr->flags & IORING_SEND_VECTORIZED)
- return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE);
+ return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE);
return import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
}
diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index a3f11349ce06..e22f072c7d5f 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -11,6 +11,7 @@
#include <linux/audit.h>
#include <linux/security.h>
#include <linux/cpuset.h>
+#include <linux/sched/cputime.h>
#include <linux/io_uring.h>
#include <uapi/linux/io_uring.h>
@@ -169,7 +170,38 @@ static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
return READ_ONCE(sqd->state);
}
-static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
+struct io_sq_time {
+ bool started;
+ u64 usec;
+};
+
+u64 io_sq_cpu_usec(struct task_struct *tsk)
+{
+ u64 utime, stime;
+
+ task_cputime_adjusted(tsk, &utime, &stime);
+ do_div(stime, 1000);
+ return stime;
+}
+
+static void io_sq_update_worktime(struct io_sq_data *sqd, struct io_sq_time *ist)
+{
+ if (!ist->started)
+ return;
+ ist->started = false;
+ sqd->work_time += io_sq_cpu_usec(current) - ist->usec;
+}
+
+static void io_sq_start_worktime(struct io_sq_time *ist)
+{
+ if (ist->started)
+ return;
+ ist->started = true;
+ ist->usec = io_sq_cpu_usec(current);
+}
+
+static int __io_sq_thread(struct io_ring_ctx *ctx, struct io_sq_data *sqd,
+ bool cap_entries, struct io_sq_time *ist)
{
unsigned int to_submit;
int ret = 0;
@@ -182,6 +214,8 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
if (to_submit || !wq_list_empty(&ctx->iopoll_list)) {
const struct cred *creds = NULL;
+ io_sq_start_worktime(ist);
+
if (ctx->sq_creds != current_cred())
creds = override_creds(ctx->sq_creds);
@@ -255,23 +289,11 @@ static bool io_sq_tw_pending(struct llist_node *retry_list)
return retry_list || !llist_empty(&tctx->task_list);
}
-static void io_sq_update_worktime(struct io_sq_data *sqd, struct rusage *start)
-{
- struct rusage end;
-
- getrusage(current, RUSAGE_SELF, &end);
- end.ru_stime.tv_sec -= start->ru_stime.tv_sec;
- end.ru_stime.tv_usec -= start->ru_stime.tv_usec;
-
- sqd->work_time += end.ru_stime.tv_usec + end.ru_stime.tv_sec * 1000000;
-}
-
static int io_sq_thread(void *data)
{
struct llist_node *retry_list = NULL;
struct io_sq_data *sqd = data;
struct io_ring_ctx *ctx;
- struct rusage start;
unsigned long timeout = 0;
char buf[TASK_COMM_LEN] = {};
DEFINE_WAIT(wait);
@@ -309,6 +331,7 @@ static int io_sq_thread(void *data)
mutex_lock(&sqd->lock);
while (1) {
bool cap_entries, sqt_spin = false;
+ struct io_sq_time ist = { };
if (io_sqd_events_pending(sqd) || signal_pending(current)) {
if (io_sqd_handle_event(sqd))
@@ -317,9 +340,8 @@ static int io_sq_thread(void *data)
}
cap_entries = !list_is_singular(&sqd->ctx_list);
- getrusage(current, RUSAGE_SELF, &start);
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
- int ret = __io_sq_thread(ctx, cap_entries);
+ int ret = __io_sq_thread(ctx, sqd, cap_entries, &ist);
if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list)))
sqt_spin = true;
@@ -327,15 +349,18 @@ static int io_sq_thread(void *data)
if (io_sq_tw(&retry_list, IORING_TW_CAP_ENTRIES_VALUE))
sqt_spin = true;
- list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
- if (io_napi(ctx))
+ list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
+ if (io_napi(ctx)) {
+ io_sq_start_worktime(&ist);
io_napi_sqpoll_busy_poll(ctx);
+ }
+ }
+
+ io_sq_update_worktime(sqd, &ist);
if (sqt_spin || !time_after(jiffies, timeout)) {
- if (sqt_spin) {
- io_sq_update_worktime(sqd, &start);
+ if (sqt_spin)
timeout = jiffies + sqd->sq_thread_idle;
- }
if (unlikely(need_resched())) {
mutex_unlock(&sqd->lock);
cond_resched();
diff --git a/io_uring/sqpoll.h b/io_uring/sqpoll.h
index b83dcdec9765..fd2f6f29b516 100644
--- a/io_uring/sqpoll.h
+++ b/io_uring/sqpoll.h
@@ -29,6 +29,7 @@ void io_sq_thread_unpark(struct io_sq_data *sqd);
void io_put_sq_data(struct io_sq_data *sqd);
void io_sqpoll_wait_sq(struct io_ring_ctx *ctx);
int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask);
+u64 io_sq_cpu_usec(struct task_struct *tsk);
static inline struct task_struct *sqpoll_task_locked(struct io_sq_data *sqd)
{
diff --git a/io_uring/waitid.c b/io_uring/waitid.c
index f25110fb1b12..53532ae6256c 100644
--- a/io_uring/waitid.c
+++ b/io_uring/waitid.c
@@ -250,7 +250,7 @@ int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EINVAL;
iwa = io_uring_alloc_async_data(NULL, req);
- if (!unlikely(iwa))
+ if (unlikely(!iwa))
return -ENOMEM;
iwa->req = req;
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 1e5c64cb6a42..138ede653de4 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -23,6 +23,7 @@
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/swiotlb.h>
#include <asm/sections.h>
#include "debug.h"
@@ -594,7 +595,9 @@ static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
if (rc == -ENOMEM) {
pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");
global_disable = true;
- } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
+ } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ !(IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) &&
+ is_swiotlb_active(entry->dev))) {
err_printk(entry->dev, entry,
"cacheline tracking EEXIST, overlapping mappings aren't supported\n");
}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 3ffa0d80ddd1..d1917b28761a 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -1030,7 +1030,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
void __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
const char *name)
{
- scoped_irqdesc_get_and_lock(irq, 0)
+ scoped_irqdesc_get_and_buslock(irq, 0)
__irq_do_set_handler(scoped_irqdesc, handle, is_chained, name);
}
EXPORT_SYMBOL_GPL(__irq_set_handler);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index c94837382037..400856abf672 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -659,7 +659,7 @@ void __disable_irq(struct irq_desc *desc)
static int __disable_irq_nosync(unsigned int irq)
{
- scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) {
+ scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) {
__disable_irq(scoped_irqdesc);
return 0;
}
@@ -789,7 +789,7 @@ void __enable_irq(struct irq_desc *desc)
*/
void enable_irq(unsigned int irq)
{
- scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) {
+ scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) {
struct irq_desc *desc = scoped_irqdesc;
if (WARN(!desc->irq_data.chip, "enable_irq before setup/request_irq: irq %u\n", irq))
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 14e85ff23551..53166ef86ba4 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -706,7 +706,6 @@ static void power_down(void)
#ifdef CONFIG_SUSPEND
if (hibernation_mode == HIBERNATION_SUSPEND) {
- pm_restore_gfp_mask();
error = suspend_devices_and_enter(mem_sleep_current);
if (!error)
goto exit;
@@ -746,9 +745,6 @@ static void power_down(void)
cpu_relax();
exit:
- /* Match the pm_restore_gfp_mask() call in hibernate(). */
- pm_restrict_gfp_mask();
-
/* Restore swap signature. */
error = swsusp_unmark();
if (error)
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 3cf2d7e72567..549f51ca3a1e 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -31,23 +31,35 @@
* held, unless the suspend/hibernate code is guaranteed not to run in parallel
* with that modification).
*/
+static unsigned int saved_gfp_count;
static gfp_t saved_gfp_mask;
void pm_restore_gfp_mask(void)
{
WARN_ON(!mutex_is_locked(&system_transition_mutex));
- if (saved_gfp_mask) {
- gfp_allowed_mask = saved_gfp_mask;
- saved_gfp_mask = 0;
- }
+
+ if (WARN_ON(!saved_gfp_count) || --saved_gfp_count)
+ return;
+
+ gfp_allowed_mask = saved_gfp_mask;
+ saved_gfp_mask = 0;
+
+ pm_pr_dbg("GFP mask restored\n");
}
void pm_restrict_gfp_mask(void)
{
WARN_ON(!mutex_is_locked(&system_transition_mutex));
- WARN_ON(saved_gfp_mask);
+
+ if (saved_gfp_count++) {
+ WARN_ON((saved_gfp_mask & ~(__GFP_IO | __GFP_FS)) != gfp_allowed_mask);
+ return;
+ }
+
saved_gfp_mask = gfp_allowed_mask;
gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
+
+ pm_pr_dbg("GFP mask restricted\n");
}
unsigned int lock_system_sleep(void)
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 8ff68ebaa1e0..dc0dfc349f22 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -132,6 +132,7 @@ int freeze_processes(void)
if (!pm_freezing)
static_branch_inc(&freezer_active);
+ pm_wakeup_clear(0);
pm_freezing = true;
error = try_to_freeze_tasks(true);
if (!error)
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 4bb4686c1c08..b4ca17c2fecf 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -595,7 +595,6 @@ static int enter_state(suspend_state_t state)
}
pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]);
- pm_wakeup_clear(0);
pm_suspend_clear_flags();
error = suspend_prepare(state);
if (error)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 2b0e88206d07..ecb251e883ea 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -67,8 +67,19 @@ static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES;
static struct delayed_work scx_watchdog_work;
-/* for %SCX_KICK_WAIT */
-static unsigned long __percpu *scx_kick_cpus_pnt_seqs;
+/*
+ * For %SCX_KICK_WAIT: Each CPU has a pointer to an array of pick_task sequence
+ * numbers. The arrays are allocated with kvzalloc() as size can exceed percpu
+ * allocator limits on large machines. O(nr_cpu_ids^2) allocation, allocated
+ * lazily when enabling and freed when disabling to avoid waste when sched_ext
+ * isn't active.
+ */
+struct scx_kick_pseqs {
+ struct rcu_head rcu;
+ unsigned long seqs[];
+};
+
+static DEFINE_PER_CPU(struct scx_kick_pseqs __rcu *, scx_kick_pseqs);
/*
* Direct dispatch marker.
@@ -780,13 +791,23 @@ static void schedule_deferred(struct rq *rq)
if (rq->scx.flags & SCX_RQ_IN_WAKEUP)
return;
+ /* Don't do anything if there already is a deferred operation. */
+ if (rq->scx.flags & SCX_RQ_BAL_CB_PENDING)
+ return;
+
/*
* If in balance, the balance callbacks will be called before rq lock is
* released. Schedule one.
+ *
+ *
+ * We can't directly insert the callback into the
+ * rq's list: The call can drop its lock and make the pending balance
+ * callback visible to unrelated code paths that call rq_pin_lock().
+ *
+ * Just let balance_one() know that it must do it itself.
*/
if (rq->scx.flags & SCX_RQ_IN_BALANCE) {
- queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
- deferred_bal_cb_workfn);
+ rq->scx.flags |= SCX_RQ_BAL_CB_PENDING;
return;
}
@@ -2003,6 +2024,19 @@ static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq)
dspc->cursor = 0;
}
+static inline void maybe_queue_balance_callback(struct rq *rq)
+{
+ lockdep_assert_rq_held(rq);
+
+ if (!(rq->scx.flags & SCX_RQ_BAL_CB_PENDING))
+ return;
+
+ queue_balance_callback(rq, &rq->scx.deferred_bal_cb,
+ deferred_bal_cb_workfn);
+
+ rq->scx.flags &= ~SCX_RQ_BAL_CB_PENDING;
+}
+
static int balance_one(struct rq *rq, struct task_struct *prev)
{
struct scx_sched *sch = scx_root;
@@ -2150,6 +2184,8 @@ static int balance_scx(struct rq *rq, struct task_struct *prev,
#endif
rq_repin_lock(rq, rf);
+ maybe_queue_balance_callback(rq);
+
return ret;
}
@@ -3471,7 +3507,9 @@ static void scx_sched_free_rcu_work(struct work_struct *work)
struct scx_dispatch_q *dsq;
int node;
+ irq_work_sync(&sch->error_irq_work);
kthread_stop(sch->helper->task);
+
free_percpu(sch->pcpu);
for_each_node_state(node, N_POSSIBLE)
@@ -3850,6 +3888,27 @@ static const char *scx_exit_reason(enum scx_exit_kind kind)
}
}
+static void free_kick_pseqs_rcu(struct rcu_head *rcu)
+{
+ struct scx_kick_pseqs *pseqs = container_of(rcu, struct scx_kick_pseqs, rcu);
+
+ kvfree(pseqs);
+}
+
+static void free_kick_pseqs(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct scx_kick_pseqs **pseqs = per_cpu_ptr(&scx_kick_pseqs, cpu);
+ struct scx_kick_pseqs *to_free;
+
+ to_free = rcu_replace_pointer(*pseqs, NULL, true);
+ if (to_free)
+ call_rcu(&to_free->rcu, free_kick_pseqs_rcu);
+ }
+}
+
static void scx_disable_workfn(struct kthread_work *work)
{
struct scx_sched *sch = container_of(work, struct scx_sched, disable_work);
@@ -3986,6 +4045,7 @@ static void scx_disable_workfn(struct kthread_work *work)
free_percpu(scx_dsp_ctx);
scx_dsp_ctx = NULL;
scx_dsp_max_batch = 0;
+ free_kick_pseqs();
mutex_unlock(&scx_enable_mutex);
@@ -4348,6 +4408,33 @@ static void scx_vexit(struct scx_sched *sch,
irq_work_queue(&sch->error_irq_work);
}
+static int alloc_kick_pseqs(void)
+{
+ int cpu;
+
+ /*
+ * Allocate per-CPU arrays sized by nr_cpu_ids. Use kvzalloc as size
+ * can exceed percpu allocator limits on large machines.
+ */
+ for_each_possible_cpu(cpu) {
+ struct scx_kick_pseqs **pseqs = per_cpu_ptr(&scx_kick_pseqs, cpu);
+ struct scx_kick_pseqs *new_pseqs;
+
+ WARN_ON_ONCE(rcu_access_pointer(*pseqs));
+
+ new_pseqs = kvzalloc_node(struct_size(new_pseqs, seqs, nr_cpu_ids),
+ GFP_KERNEL, cpu_to_node(cpu));
+ if (!new_pseqs) {
+ free_kick_pseqs();
+ return -ENOMEM;
+ }
+
+ rcu_assign_pointer(*pseqs, new_pseqs);
+ }
+
+ return 0;
+}
+
static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops)
{
struct scx_sched *sch;
@@ -4495,10 +4582,14 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
goto err_unlock;
}
+ ret = alloc_kick_pseqs();
+ if (ret)
+ goto err_unlock;
+
sch = scx_alloc_and_add_sched(ops);
if (IS_ERR(sch)) {
ret = PTR_ERR(sch);
- goto err_unlock;
+ goto err_free_pseqs;
}
/*
@@ -4701,6 +4792,8 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
return 0;
+err_free_pseqs:
+ free_kick_pseqs();
err_unlock:
mutex_unlock(&scx_enable_mutex);
return ret;
@@ -5082,10 +5175,18 @@ static void kick_cpus_irq_workfn(struct irq_work *irq_work)
{
struct rq *this_rq = this_rq();
struct scx_rq *this_scx = &this_rq->scx;
- unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs);
+ struct scx_kick_pseqs __rcu *pseqs_pcpu = __this_cpu_read(scx_kick_pseqs);
bool should_wait = false;
+ unsigned long *pseqs;
s32 cpu;
+ if (unlikely(!pseqs_pcpu)) {
+ pr_warn_once("kick_cpus_irq_workfn() called with NULL scx_kick_pseqs");
+ return;
+ }
+
+ pseqs = rcu_dereference_bh(pseqs_pcpu)->seqs;
+
for_each_cpu(cpu, this_scx->cpus_to_kick) {
should_wait |= kick_one_cpu(cpu, this_rq, pseqs);
cpumask_clear_cpu(cpu, this_scx->cpus_to_kick);
@@ -5208,11 +5309,6 @@ void __init init_sched_ext_class(void)
scx_idle_init_masks();
- scx_kick_cpus_pnt_seqs =
- __alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids,
- __alignof__(scx_kick_cpus_pnt_seqs[0]));
- BUG_ON(!scx_kick_cpus_pnt_seqs);
-
for_each_possible_cpu(cpu) {
struct rq *rq = cpu_rq(cpu);
int n = cpu_to_node(cpu);
@@ -5688,8 +5784,8 @@ BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)
BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)
BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local)
-BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
-BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
+BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
BTF_KFUNCS_END(scx_kfunc_ids_dispatch)
@@ -5820,8 +5916,8 @@ __bpf_kfunc_end_defs();
BTF_KFUNCS_START(scx_kfunc_ids_unlocked)
BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
-BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice)
-BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime)
+BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU)
+BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU)
BTF_KFUNCS_END(scx_kfunc_ids_unlocked)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index cee1793e8277..25970dbbb279 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6437,6 +6437,16 @@ static void sync_throttle(struct task_group *tg, int cpu)
cfs_rq->throttle_count = pcfs_rq->throttle_count;
cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu));
+
+ /*
+ * It is not enough to sync the "pelt_clock_throttled" indicator
+ * with the parent cfs_rq when the hierarchy is not queued.
+ * Always join a throttled hierarchy with PELT clock throttled
+ * and leaf it to the first enqueue, or distribution to
+ * unthrottle the PELT clock.
+ */
+ if (cfs_rq->throttle_count)
+ cfs_rq->pelt_clock_throttled = 1;
}
/* conditionally throttle active cfs_rq's from put_prev_entity() */
@@ -13187,6 +13197,8 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
if (!cfs_rq_pelt_clock_throttled(cfs_rq))
list_add_leaf_cfs_rq(cfs_rq);
}
+
+ assert_list_leaf_cfs_rq(rq_of(cfs_rq));
}
#else /* !CONFIG_FAIR_GROUP_SCHED: */
static void propagate_entity_cfs_rq(struct sched_entity *se) { }
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 1f5d07067f60..adfb6e3409d7 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -784,6 +784,7 @@ enum scx_rq_flags {
SCX_RQ_BAL_KEEP = 1 << 3, /* balance decided to keep current */
SCX_RQ_BYPASSING = 1 << 4,
SCX_RQ_CLK_VALID = 1 << 5, /* RQ clock is fresh and valid */
+ SCX_RQ_BAL_CB_PENDING = 1 << 6, /* must queue a cb after dispatching */
SCX_RQ_IN_WAKEUP = 1 << 16,
SCX_RQ_IN_BALANCE = 1 << 17,
@@ -3740,11 +3741,9 @@ static inline int mm_cid_get(struct rq *rq, struct task_struct *t,
struct mm_struct *mm)
{
struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
- struct cpumask *cpumask;
int cid;
lockdep_assert_rq_held(rq);
- cpumask = mm_cidmask(mm);
cid = __this_cpu_read(pcpu_cid->cid);
if (mm_cid_is_valid(cid)) {
mm_cid_snapshot_time(rq, mm);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index b6974fce800c..3a4d3b2e3f74 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -3070,7 +3070,7 @@ static int __init tk_aux_sysfs_init(void)
return -ENOMEM;
}
- for (int i = 0; i <= MAX_AUX_CLOCKS; i++) {
+ for (int i = 0; i < MAX_AUX_CLOCKS; i++) {
char id[2] = { [0] = '0' + i, };
struct kobject *clk = kobject_create_and_add(id, auxo);
diff --git a/kernel/trace/rv/monitors/pagefault/Kconfig b/kernel/trace/rv/monitors/pagefault/Kconfig
index 5e16625f1653..0e013f00c33b 100644
--- a/kernel/trace/rv/monitors/pagefault/Kconfig
+++ b/kernel/trace/rv/monitors/pagefault/Kconfig
@@ -5,6 +5,7 @@ config RV_MON_PAGEFAULT
select RV_LTL_MONITOR
depends on RV_MON_RTAPP
depends on X86 || RISCV
+ depends on MMU
default y
select LTL_MON_EVENTS_ID
bool "pagefault monitor"
diff --git a/kernel/trace/rv/rv.c b/kernel/trace/rv/rv.c
index 48338520376f..43e9ea473cda 100644
--- a/kernel/trace/rv/rv.c
+++ b/kernel/trace/rv/rv.c
@@ -501,7 +501,7 @@ static void *enabled_monitors_next(struct seq_file *m, void *p, loff_t *pos)
list_for_each_entry_continue(mon, &rv_monitors_list, list) {
if (mon->enabled)
- return mon;
+ return &mon->list;
}
return NULL;
@@ -509,7 +509,7 @@ static void *enabled_monitors_next(struct seq_file *m, void *p, loff_t *pos)
static void *enabled_monitors_start(struct seq_file *m, loff_t *pos)
{
- struct rv_monitor *mon;
+ struct list_head *head;
loff_t l;
mutex_lock(&rv_interface_lock);
@@ -517,15 +517,15 @@ static void *enabled_monitors_start(struct seq_file *m, loff_t *pos)
if (list_empty(&rv_monitors_list))
return NULL;
- mon = list_entry(&rv_monitors_list, struct rv_monitor, list);
+ head = &rv_monitors_list;
for (l = 0; l <= *pos; ) {
- mon = enabled_monitors_next(m, mon, &l);
- if (!mon)
+ head = enabled_monitors_next(m, head, &l);
+ if (!head)
break;
}
- return mon;
+ return head;
}
/*
diff --git a/lib/Kconfig.kmsan b/lib/Kconfig.kmsan
index 7251b6b59e69..cae1ddcc18e1 100644
--- a/lib/Kconfig.kmsan
+++ b/lib/Kconfig.kmsan
@@ -3,7 +3,7 @@ config HAVE_ARCH_KMSAN
bool
config HAVE_KMSAN_COMPILER
- def_bool CC_IS_CLANG
+ def_bool $(cc-option,-fsanitize=kernel-memory)
config KMSAN
bool "KMSAN: detector of uninitialized values use"
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index eea17e36a22b..8886055e938f 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -97,7 +97,7 @@ config CRYPTO_LIB_POLY1305
config CRYPTO_LIB_POLY1305_ARCH
bool
- depends on CRYPTO_LIB_POLY1305 && !UML
+ depends on CRYPTO_LIB_POLY1305 && !UML && !KMSAN
default y if ARM
default y if ARM64 && KERNEL_MODE_NEON
default y if MIPS
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index 8c01eabd4eaf..63130a48e237 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -739,7 +739,7 @@ static struct kunit_case kunit_current_test_cases[] = {
static void test_dev_action(void *priv)
{
- *(void **)priv = (void *)1;
+ *(long *)priv = 1;
}
static void kunit_device_test(struct kunit *test)
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index bb66ea1a3eac..62eb529824c6 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -745,7 +745,8 @@ int kunit_run_tests(struct kunit_suite *suite)
.param_index = ++test.param_index,
.parent = &test,
};
- kunit_init_test(&param_test, test_case->name, test_case->log);
+ kunit_init_test(&param_test, test_case->name, NULL);
+ param_test.log = test_case->log;
kunit_run_case_catch_errors(suite, test_case, &param_test);
if (param_desc[0] == '\0') {
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 93848b4c6944..109b050c795a 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -452,6 +452,9 @@ void damon_destroy_scheme(struct damos *s)
damos_for_each_filter_safe(f, next, s)
damos_destroy_filter(f);
+ damos_for_each_ops_filter_safe(f, next, s)
+ damos_destroy_filter(f);
+
kfree(s->migrate_dests.node_id_arr);
kfree(s->migrate_dests.weight_arr);
damon_del_scheme(s);
@@ -832,7 +835,7 @@ int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src)
src_goal->metric, src_goal->target_value);
if (!new_goal)
return -ENOMEM;
- damos_commit_quota_goal_union(new_goal, src_goal);
+ damos_commit_quota_goal(new_goal, src_goal);
damos_add_quota_goal(dst, new_goal);
}
return 0;
@@ -1450,7 +1453,7 @@ int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
INIT_LIST_HEAD(&control->list);
mutex_lock(&ctx->call_controls_lock);
- list_add_tail(&ctx->call_controls, &control->list);
+ list_add_tail(&control->list, &ctx->call_controls);
mutex_unlock(&ctx->call_controls_lock);
if (!damon_is_running(ctx))
return -EINVAL;
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index 2fc722f998f8..cd6815ecc04e 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -1473,13 +1473,14 @@ static int damon_sysfs_commit_input(void *data)
if (IS_ERR(param_ctx))
return PTR_ERR(param_ctx);
test_ctx = damon_new_ctx();
+ if (!test_ctx)
+ return -ENOMEM;
err = damon_commit_ctx(test_ctx, param_ctx);
- if (err) {
- damon_destroy_ctx(test_ctx);
+ if (err)
goto out;
- }
err = damon_commit_ctx(kdamond->damon_ctx, param_ctx);
out:
+ damon_destroy_ctx(test_ctx);
damon_destroy_ctx(param_ctx);
return err;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1b81680b4225..1d1b74950332 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -4109,6 +4109,9 @@ static bool thp_underused(struct folio *folio)
if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1)
return false;
+ if (folio_contain_hwpoisoned_page(folio))
+ return false;
+
for (i = 0; i < folio_nr_pages(folio); i++) {
if (pages_identical(folio_page(folio, i), ZERO_PAGE(0))) {
if (++num_zero_pages > khugepaged_max_ptes_none)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 795ee393eac0..0455119716ec 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -7614,13 +7614,12 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
p4d_t *p4d = p4d_offset(pgd, addr);
pud_t *pud = pud_offset(p4d, addr);
- i_mmap_assert_write_locked(vma->vm_file->f_mapping);
- hugetlb_vma_assert_locked(vma);
if (sz != PMD_SIZE)
return 0;
if (!ptdesc_pmd_is_shared(virt_to_ptdesc(ptep)))
return 0;
-
+ i_mmap_assert_write_locked(vma->vm_file->f_mapping);
+ hugetlb_vma_assert_locked(vma);
pud_clear(pud);
/*
* Once our caller drops the rmap lock, some other process might be
diff --git a/mm/migrate.c b/mm/migrate.c
index e3065c9edb55..c0e9f15be2a2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -301,8 +301,9 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
struct page *page = folio_page(folio, idx);
pte_t newpte;
- if (PageCompound(page))
+ if (PageCompound(page) || PageHWPoison(page))
return false;
+
VM_BUG_ON_PAGE(!PageAnon(page), page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(pte_present(old_pte), page);
diff --git a/mm/mremap.c b/mm/mremap.c
index 35de0a7b910e..bd7314898ec5 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -1237,10 +1237,10 @@ static int copy_vma_and_data(struct vma_remap_struct *vrm,
}
/*
- * Perform final tasks for MADV_DONTUNMAP operation, clearing mlock() and
- * account flags on remaining VMA by convention (it cannot be mlock()'d any
- * longer, as pages in range are no longer mapped), and removing anon_vma_chain
- * links from it (if the entire VMA was copied over).
+ * Perform final tasks for MADV_DONTUNMAP operation, clearing mlock() flag on
+ * remaining VMA by convention (it cannot be mlock()'d any longer, as pages in
+ * range are no longer mapped), and removing anon_vma_chain links from it if the
+ * entire VMA was copied over.
*/
static void dontunmap_complete(struct vma_remap_struct *vrm,
struct vm_area_struct *new_vma)
@@ -1250,11 +1250,8 @@ static void dontunmap_complete(struct vma_remap_struct *vrm,
unsigned long old_start = vrm->vma->vm_start;
unsigned long old_end = vrm->vma->vm_end;
- /*
- * We always clear VM_LOCKED[ONFAULT] | VM_ACCOUNT on the old
- * vma.
- */
- vm_flags_clear(vrm->vma, VM_LOCKED_MASK | VM_ACCOUNT);
+ /* We always clear VM_LOCKED[ONFAULT] on the old VMA. */
+ vm_flags_clear(vrm->vma, VM_LOCKED_MASK);
/*
* anon_vma links of the old vma is no longer needed after its page
diff --git a/mm/page_owner.c b/mm/page_owner.c
index c3ca21132c2c..589ec37c94aa 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -168,6 +168,9 @@ static void add_stack_record_to_list(struct stack_record *stack_record,
unsigned long flags;
struct stack *stack;
+ if (!gfpflags_allow_spinning(gfp_mask))
+ return;
+
set_current_in_page_owner();
stack = kmalloc(sizeof(*stack), gfp_nested_mask(gfp_mask));
if (!stack) {
diff --git a/mm/slub.c b/mm/slub.c
index a8fcc7e6f25a..d4367f25b20d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2052,9 +2052,9 @@ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
}
}
-static inline void mark_failed_objexts_alloc(struct slab *slab)
+static inline bool mark_failed_objexts_alloc(struct slab *slab)
{
- slab->obj_exts = OBJEXTS_ALLOC_FAIL;
+ return cmpxchg(&slab->obj_exts, 0, OBJEXTS_ALLOC_FAIL) == 0;
}
static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
@@ -2076,7 +2076,7 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
-static inline void mark_failed_objexts_alloc(struct slab *slab) {}
+static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
struct slabobj_ext *vec, unsigned int objects) {}
@@ -2124,8 +2124,14 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
slab_nid(slab));
}
if (!vec) {
- /* Mark vectors which failed to allocate */
- mark_failed_objexts_alloc(slab);
+ /*
+ * Try to mark vectors which failed to allocate.
+ * If this operation fails, there may be a racing process
+ * that has already completed the allocation.
+ */
+ if (!mark_failed_objexts_alloc(slab) &&
+ slab_obj_exts(slab))
+ return 0;
return -ENOMEM;
}
@@ -2136,6 +2142,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
#ifdef CONFIG_MEMCG
new_exts |= MEMCG_DATA_OBJEXTS;
#endif
+retry:
old_exts = READ_ONCE(slab->obj_exts);
handle_failed_objexts_alloc(old_exts, vec, objects);
if (new_slab) {
@@ -2145,8 +2152,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
* be simply assigned.
*/
slab->obj_exts = new_exts;
- } else if ((old_exts & ~OBJEXTS_FLAGS_MASK) ||
- cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
+ } else if (old_exts & ~OBJEXTS_FLAGS_MASK) {
/*
* If the slab is already in use, somebody can allocate and
* assign slabobj_exts in parallel. In this case the existing
@@ -2158,6 +2164,9 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
else
kfree(vec);
return 0;
+ } else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
+ /* Retry if a racing thread changed slab->obj_exts from under us. */
+ goto retry;
}
if (allow_spin)
@@ -3419,7 +3428,6 @@ static void *alloc_single_from_new_slab(struct kmem_cache *s, struct slab *slab,
if (!allow_spin && !spin_trylock_irqsave(&n->list_lock, flags)) {
/* Unlucky, discard newly allocated slab */
- slab->frozen = 1;
defer_deactivate_slab(slab, NULL);
return NULL;
}
@@ -6468,9 +6476,12 @@ static void free_deferred_objects(struct irq_work *work)
struct slab *slab = container_of(pos, struct slab, llnode);
#ifdef CONFIG_SLUB_TINY
- discard_slab(slab->slab_cache, slab);
+ free_slab(slab->slab_cache, slab);
#else
- deactivate_slab(slab->slab_cache, slab, slab->flush_freelist);
+ if (slab->frozen)
+ deactivate_slab(slab->slab_cache, slab, slab->flush_freelist);
+ else
+ free_slab(slab->slab_cache, slab);
#endif
}
}
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index c84420cb410d..a662408ad867 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -763,11 +763,16 @@ int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb)
bat_priv = netdev_priv(mesh_iface);
primary_if = batadv_primary_if_get_selected(bat_priv);
- if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
+ if (!primary_if) {
ret = -ENOENT;
goto out_put_mesh_iface;
}
+ if (primary_if->if_status != BATADV_IF_ACTIVE) {
+ ret = -ENOENT;
+ goto out_put_primary_if;
+ }
+
hard_iface = batadv_netlink_get_hardif(bat_priv, cb);
if (IS_ERR(hard_iface) && PTR_ERR(hard_iface) != -ENONET) {
ret = PTR_ERR(hard_iface);
@@ -1327,11 +1332,16 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)
bat_priv = netdev_priv(mesh_iface);
primary_if = batadv_primary_if_get_selected(bat_priv);
- if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
+ if (!primary_if) {
ret = -ENOENT;
goto out_put_mesh_iface;
}
+ if (primary_if->if_status != BATADV_IF_ACTIVE) {
+ ret = -ENOENT;
+ goto out_put_primary_if;
+ }
+
hard_iface = batadv_netlink_get_hardif(bat_priv, cb);
if (IS_ERR(hard_iface) && PTR_ERR(hard_iface) != -ENONET) {
ret = PTR_ERR(hard_iface);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 111f0e37b672..c5dedf39a129 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -843,6 +843,13 @@ static void bis_cleanup(struct hci_conn *conn)
if (bis)
return;
+ bis = hci_conn_hash_lookup_big_state(hdev,
+ conn->iso_qos.bcast.big,
+ BT_OPEN,
+ HCI_ROLE_MASTER);
+ if (bis)
+ return;
+
hci_le_terminate_big(hdev, conn);
} else {
hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index d790b0d4eb9a..d37db364acf7 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1607,8 +1607,10 @@ static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
hci_dev_set_flag(hdev, HCI_LE_ADV);
- if (adv && !adv->periodic)
+ if (adv)
adv->enabled = true;
+ else if (!set->handle)
+ hci_dev_set_flag(hdev, HCI_LE_ADV_0);
conn = hci_lookup_le_connect(hdev);
if (conn)
@@ -1619,6 +1621,8 @@ static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
if (cp->num_of_sets) {
if (adv)
adv->enabled = false;
+ else if (!set->handle)
+ hci_dev_clear_flag(hdev, HCI_LE_ADV_0);
/* If just one instance was disabled check if there are
* any other instance enabled before clearing HCI_LE_ADV
@@ -3959,8 +3963,11 @@ static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
if (adv)
- adv->enabled = true;
+ adv->periodic_enabled = true;
} else {
+ if (adv)
+ adv->periodic_enabled = false;
+
/* If just one instance was disabled check if there are
* any other instance enabled before clearing HCI_LE_PER_ADV.
* The current periodic adv instance will be marked as
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index eefdb6134ca5..73fc41b68b68 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -863,11 +863,17 @@ bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
{
struct hci_cmd_sync_work_entry *entry;
- entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
- if (!entry)
+ mutex_lock(&hdev->cmd_sync_work_lock);
+
+ entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
+ if (!entry) {
+ mutex_unlock(&hdev->cmd_sync_work_lock);
return false;
+ }
- hci_cmd_sync_cancel_entry(hdev, entry);
+ _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
+
+ mutex_unlock(&hdev->cmd_sync_work_lock);
return true;
}
@@ -1601,7 +1607,7 @@ int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
/* If periodic advertising already disabled there is nothing to do. */
adv = hci_find_adv_instance(hdev, instance);
- if (!adv || !adv->periodic || !adv->enabled)
+ if (!adv || !adv->periodic_enabled)
return 0;
memset(&cp, 0, sizeof(cp));
@@ -1666,7 +1672,7 @@ static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
/* If periodic advertising already enabled there is nothing to do. */
adv = hci_find_adv_instance(hdev, instance);
- if (adv && adv->periodic && adv->enabled)
+ if (adv && adv->periodic_enabled)
return 0;
memset(&cp, 0, sizeof(cp));
@@ -2600,9 +2606,8 @@ static int hci_resume_advertising_sync(struct hci_dev *hdev)
/* If current advertising instance is set to instance 0x00
* then we need to re-enable it.
*/
- if (!hdev->cur_adv_instance)
- err = hci_enable_ext_advertising_sync(hdev,
- hdev->cur_adv_instance);
+ if (hci_dev_test_and_clear_flag(hdev, HCI_LE_ADV_0))
+ err = hci_enable_ext_advertising_sync(hdev, 0x00);
} else {
/* Schedule for most recent instance to be restarted and begin
* the software rotation loop
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index 9b263d061e05..3d98cb6291da 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -2032,7 +2032,7 @@ static void iso_conn_ready(struct iso_conn *conn)
*/
if (!bacmp(&hcon->dst, BDADDR_ANY)) {
bacpy(&hcon->dst, &iso_pi(parent)->dst);
- hcon->dst_type = iso_pi(parent)->dst_type;
+ hcon->dst_type = le_addr_type(iso_pi(parent)->dst_type);
}
if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) {
@@ -2046,7 +2046,13 @@ static void iso_conn_ready(struct iso_conn *conn)
}
bacpy(&iso_pi(sk)->dst, &hcon->dst);
- iso_pi(sk)->dst_type = hcon->dst_type;
+
+ /* Convert from HCI to three-value type */
+ if (hcon->dst_type == ADDR_LE_DEV_PUBLIC)
+ iso_pi(sk)->dst_type = BDADDR_LE_PUBLIC;
+ else
+ iso_pi(sk)->dst_type = BDADDR_LE_RANDOM;
+
iso_pi(sk)->sync_handle = iso_pi(parent)->sync_handle;
memcpy(iso_pi(sk)->base, iso_pi(parent)->base, iso_pi(parent)->base_len);
iso_pi(sk)->base_len = iso_pi(parent)->base_len;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 805c752ac0a9..d08320380ad6 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -282,7 +282,7 @@ static void __set_retrans_timer(struct l2cap_chan *chan)
if (!delayed_work_pending(&chan->monitor_timer) &&
chan->retrans_timeout) {
l2cap_set_timer(chan, &chan->retrans_timer,
- secs_to_jiffies(chan->retrans_timeout));
+ msecs_to_jiffies(chan->retrans_timeout));
}
}
@@ -291,7 +291,7 @@ static void __set_monitor_timer(struct l2cap_chan *chan)
__clear_retrans_timer(chan);
if (chan->monitor_timeout) {
l2cap_set_timer(chan, &chan->monitor_timer,
- secs_to_jiffies(chan->monitor_timeout));
+ msecs_to_jiffies(chan->monitor_timeout));
}
}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index a3d16eece0d2..24e335e3a727 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2175,19 +2175,24 @@ static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
sk = cmd->sk;
if (status) {
+ mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
+ status);
mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,
cmd_status_rsp, &status);
- return;
+ goto done;
}
- mgmt_pending_remove(cmd);
mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
+
+done:
+ mgmt_pending_free(cmd);
}
static int set_mesh_sync(struct hci_dev *hdev, void *data)
{
struct mgmt_pending_cmd *cmd = data;
- struct mgmt_cp_set_mesh cp;
+ DEFINE_FLEX(struct mgmt_cp_set_mesh, cp, ad_types, num_ad_types,
+ sizeof(hdev->mesh_ad_types));
size_t len;
mutex_lock(&hdev->mgmt_pending_lock);
@@ -2197,27 +2202,26 @@ static int set_mesh_sync(struct hci_dev *hdev, void *data)
return -ECANCELED;
}
- memcpy(&cp, cmd->param, sizeof(cp));
+ len = cmd->param_len;
+ memcpy(cp, cmd->param, min(__struct_size(cp), len));
mutex_unlock(&hdev->mgmt_pending_lock);
- len = cmd->param_len;
-
memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
- if (cp.enable)
+ if (cp->enable)
hci_dev_set_flag(hdev, HCI_MESH);
else
hci_dev_clear_flag(hdev, HCI_MESH);
- hdev->le_scan_interval = __le16_to_cpu(cp.period);
- hdev->le_scan_window = __le16_to_cpu(cp.window);
+ hdev->le_scan_interval = __le16_to_cpu(cp->period);
+ hdev->le_scan_window = __le16_to_cpu(cp->window);
- len -= sizeof(cp);
+ len -= sizeof(struct mgmt_cp_set_mesh);
/* If filters don't fit, forward all adv pkts */
if (len <= sizeof(hdev->mesh_ad_types))
- memcpy(hdev->mesh_ad_types, cp.ad_types, len);
+ memcpy(hdev->mesh_ad_types, cp->ad_types, len);
hci_update_passive_scan_sync(hdev);
return 0;
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 376ce6de84be..b783526ab588 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -643,8 +643,8 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
tty_port_tty_hangup(&dev->port, true);
dev->modem_status =
- ((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) |
- ((v24_sig & RFCOMM_V24_RTR) ? (TIOCM_RTS | TIOCM_CTS) : 0) |
+ ((v24_sig & RFCOMM_V24_RTC) ? TIOCM_DSR : 0) |
+ ((v24_sig & RFCOMM_V24_RTR) ? TIOCM_CTS : 0) |
((v24_sig & RFCOMM_V24_IC) ? TIOCM_RI : 0) |
((v24_sig & RFCOMM_V24_DV) ? TIOCM_CD : 0);
}
@@ -1055,10 +1055,14 @@ static void rfcomm_tty_hangup(struct tty_struct *tty)
static int rfcomm_tty_tiocmget(struct tty_struct *tty)
{
struct rfcomm_dev *dev = tty->driver_data;
+ struct rfcomm_dlc *dlc = dev->dlc;
+ u8 v24_sig;
BT_DBG("tty %p dev %p", tty, dev);
- return dev->modem_status;
+ rfcomm_dlc_get_modem_status(dlc, &v24_sig);
+
+ return (v24_sig & (TIOCM_DTR | TIOCM_RTS)) | dev->modem_status;
}
static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
@@ -1071,23 +1075,15 @@ static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigne
rfcomm_dlc_get_modem_status(dlc, &v24_sig);
- if (set & TIOCM_DSR || set & TIOCM_DTR)
+ if (set & TIOCM_DTR)
v24_sig |= RFCOMM_V24_RTC;
- if (set & TIOCM_RTS || set & TIOCM_CTS)
+ if (set & TIOCM_RTS)
v24_sig |= RFCOMM_V24_RTR;
- if (set & TIOCM_RI)
- v24_sig |= RFCOMM_V24_IC;
- if (set & TIOCM_CD)
- v24_sig |= RFCOMM_V24_DV;
- if (clear & TIOCM_DSR || clear & TIOCM_DTR)
+ if (clear & TIOCM_DTR)
v24_sig &= ~RFCOMM_V24_RTC;
- if (clear & TIOCM_RTS || clear & TIOCM_CTS)
+ if (clear & TIOCM_RTS)
v24_sig &= ~RFCOMM_V24_RTR;
- if (clear & TIOCM_RI)
- v24_sig &= ~RFCOMM_V24_IC;
- if (clear & TIOCM_CD)
- v24_sig &= ~RFCOMM_V24_DV;
rfcomm_dlc_set_modem_status(dlc, v24_sig);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index cb4b9ef2e4e3..c285c6465923 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -920,21 +920,22 @@ fault:
EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
/**
- * datagram_poll - generic datagram poll
+ * datagram_poll_queue - same as datagram_poll, but on a specific receive
+ * queue
* @file: file struct
* @sock: socket
* @wait: poll table
+ * @rcv_queue: receive queue to poll
*
- * Datagram poll: Again totally generic. This also handles
- * sequenced packet sockets providing the socket receive queue
- * is only ever holding data ready to receive.
+ * Performs polling on the given receive queue, handling shutdown, error,
+ * and connection state. This is useful for protocols that deliver
+ * userspace-bound packets through a custom queue instead of
+ * sk->sk_receive_queue.
*
- * Note: when you *don't* use this routine for this protocol,
- * and you use a different write policy from sock_writeable()
- * then please supply your own write_space callback.
+ * Return: poll bitmask indicating the socket's current state
*/
-__poll_t datagram_poll(struct file *file, struct socket *sock,
- poll_table *wait)
+__poll_t datagram_poll_queue(struct file *file, struct socket *sock,
+ poll_table *wait, struct sk_buff_head *rcv_queue)
{
struct sock *sk = sock->sk;
__poll_t mask;
@@ -956,7 +957,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
mask |= EPOLLHUP;
/* readable? */
- if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+ if (!skb_queue_empty_lockless(rcv_queue))
mask |= EPOLLIN | EPOLLRDNORM;
/* Connection-based need to check for termination and startup */
@@ -978,4 +979,27 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
return mask;
}
+EXPORT_SYMBOL(datagram_poll_queue);
+
+/**
+ * datagram_poll - generic datagram poll
+ * @file: file struct
+ * @sock: socket
+ * @wait: poll table
+ *
+ * Datagram poll: Again totally generic. This also handles
+ * sequenced packet sockets providing the socket receive queue
+ * is only ever holding data ready to receive.
+ *
+ * Note: when you *don't* use this routine for this protocol,
+ * and you use a different write policy from sock_writeable()
+ * then please supply your own write_space callback.
+ *
+ * Return: poll bitmask indicating the socket's current state
+ */
+__poll_t datagram_poll(struct file *file, struct socket *sock, poll_table *wait)
+{
+ return datagram_poll_queue(file, sock, wait,
+ &sock->sk->sk_receive_queue);
+}
EXPORT_SYMBOL(datagram_poll);
diff --git a/net/core/devmem.c b/net/core/devmem.c
index d9de31a6cc7f..1d04754bc756 100644
--- a/net/core/devmem.c
+++ b/net/core/devmem.c
@@ -17,6 +17,7 @@
#include <net/page_pool/helpers.h>
#include <net/page_pool/memory_provider.h>
#include <net/sock.h>
+#include <net/tcp.h>
#include <trace/events/page_pool.h>
#include "devmem.h"
@@ -357,7 +358,8 @@ struct net_devmem_dmabuf_binding *net_devmem_get_binding(struct sock *sk,
unsigned int dmabuf_id)
{
struct net_devmem_dmabuf_binding *binding;
- struct dst_entry *dst = __sk_dst_get(sk);
+ struct net_device *dst_dev;
+ struct dst_entry *dst;
int err = 0;
binding = net_devmem_lookup_dmabuf(dmabuf_id);
@@ -366,16 +368,35 @@ struct net_devmem_dmabuf_binding *net_devmem_get_binding(struct sock *sk,
goto out_err;
}
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ /* If dst is NULL (route expired), attempt to rebuild it. */
+ if (unlikely(!dst)) {
+ if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) {
+ err = -EHOSTUNREACH;
+ goto out_unlock;
+ }
+ dst = __sk_dst_get(sk);
+ if (unlikely(!dst)) {
+ err = -ENODEV;
+ goto out_unlock;
+ }
+ }
+
/* The dma-addrs in this binding are only reachable to the corresponding
* net_device.
*/
- if (!dst || !dst->dev || dst->dev->ifindex != binding->dev->ifindex) {
+ dst_dev = dst_dev_rcu(dst);
+ if (unlikely(!dst_dev) || unlikely(dst_dev != binding->dev)) {
err = -ENODEV;
- goto out_err;
+ goto out_unlock;
}
+ rcu_read_unlock();
return binding;
+out_unlock:
+ rcu_read_unlock();
out_err:
if (binding)
net_devmem_dmabuf_binding_put(binding);
diff --git a/net/core/gro.c b/net/core/gro.c
index 5ba4504cfd28..76f9c3712422 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -639,6 +639,8 @@ EXPORT_SYMBOL(gro_receive_skb);
static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
{
+ struct skb_shared_info *shinfo;
+
if (unlikely(skb->pfmemalloc)) {
consume_skb(skb);
return;
@@ -655,8 +657,12 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
skb->encapsulation = 0;
skb->ip_summed = CHECKSUM_NONE;
- skb_shinfo(skb)->gso_type = 0;
- skb_shinfo(skb)->gso_size = 0;
+
+ shinfo = skb_shinfo(skb);
+ shinfo->gso_type = 0;
+ shinfo->gso_size = 0;
+ shinfo->hwtstamps.hwtstamp = 0;
+
if (unlikely(skb->slow_gro)) {
skb_orphan(skb);
skb_ext_reset(skb);
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
index b43911562f4d..fd57b845de33 100644
--- a/net/core/gro_cells.c
+++ b/net/core/gro_cells.c
@@ -43,12 +43,11 @@ drop:
if (skb_queue_len(&cell->napi_skbs) == 1)
napi_schedule(&cell->napi);
- if (have_bh_lock)
- local_unlock_nested_bh(&gcells->cells->bh_lock);
-
res = NET_RX_SUCCESS;
unlock:
+ if (have_bh_lock)
+ local_unlock_nested_bh(&gcells->cells->bh_lock);
rcu_read_unlock();
return res;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 8040ff7c356e..576d5ec3bb36 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -4715,9 +4715,6 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
int err;
u16 vid;
- if (!netlink_capable(skb, CAP_NET_ADMIN))
- return -EPERM;
-
if (!del_bulk) {
err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
NULL, extack);
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
index b120470246cc..c96b63adf96f 100644
--- a/net/hsr/hsr_netlink.c
+++ b/net/hsr/hsr_netlink.c
@@ -34,12 +34,18 @@ static int hsr_newlink(struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct net *link_net = rtnl_newlink_link_net(params);
+ struct net_device *link[2], *interlink = NULL;
struct nlattr **data = params->data;
enum hsr_version proto_version;
unsigned char multicast_spec;
u8 proto = HSR_PROTOCOL_HSR;
- struct net_device *link[2], *interlink = NULL;
+ if (!net_eq(link_net, dev_net(dev))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "HSR slaves/interlink must be on the same net namespace than HSR link");
+ return -EINVAL;
+ }
+
if (!data) {
NL_SET_ERR_MSG_MOD(extack, "No slave devices specified");
return -EINVAL;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 31ea5af49f2d..e4a979b75cc6 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -891,18 +891,27 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
}
}
-void tcp_rcvbuf_grow(struct sock *sk)
+void tcp_rcvbuf_grow(struct sock *sk, u32 newval)
{
const struct net *net = sock_net(sk);
struct tcp_sock *tp = tcp_sk(sk);
- int rcvwin, rcvbuf, cap;
+ u32 rcvwin, rcvbuf, cap, oldval;
+ u64 grow;
+
+ oldval = tp->rcvq_space.space;
+ tp->rcvq_space.space = newval;
if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) ||
(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
return;
+ /* DRS is always one RTT late. */
+ rcvwin = newval << 1;
+
/* slow start: allow the sender to double its rate. */
- rcvwin = tp->rcvq_space.space << 1;
+ grow = (u64)rcvwin * (newval - oldval);
+ do_div(grow, oldval);
+ rcvwin += grow << 1;
if (!RB_EMPTY_ROOT(&tp->out_of_order_queue))
rcvwin += TCP_SKB_CB(tp->ooo_last_skb)->end_seq - tp->rcv_nxt;
@@ -943,9 +952,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
trace_tcp_rcvbuf_grow(sk, time);
- tp->rcvq_space.space = copied;
-
- tcp_rcvbuf_grow(sk);
+ tcp_rcvbuf_grow(sk, copied);
new_measure:
tp->rcvq_space.seq = tp->copied_seq;
@@ -5270,7 +5277,7 @@ end:
}
/* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */
if (sk->sk_socket)
- tcp_rcvbuf_grow(sk);
+ tcp_rcvbuf_grow(sk, tp->rcvq_space.space);
}
static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index d9aca1c3c097..c52b0456039d 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1876,6 +1876,9 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
link_conf->nontransmitted = false;
link_conf->ema_ap = false;
link_conf->bssid_indicator = 0;
+ link_conf->fils_discovery.min_interval = 0;
+ link_conf->fils_discovery.max_interval = 0;
+ link_conf->unsol_bcast_probe_resp_interval = 0;
__sta_info_flush(sdata, true, link_id, NULL);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index b14e9cd9713f..d5da7ccea66e 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -508,11 +508,16 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
ret = ieee80211_key_enable_hw_accel(new);
}
} else {
- if (!new->local->wowlan)
+ if (!new->local->wowlan) {
ret = ieee80211_key_enable_hw_accel(new);
- else if (link_id < 0 || !sdata->vif.active_links ||
- BIT(link_id) & sdata->vif.active_links)
+ } else if (link_id < 0 || !sdata->vif.active_links ||
+ BIT(link_id) & sdata->vif.active_links) {
new->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
+ if (!(new->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
+ IEEE80211_KEY_FLAG_PUT_MIC_SPACE |
+ IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
+ decrease_tailroom_need_count(sdata, 1);
+ }
}
if (ret)
diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
index 6003e47c770a..171643815076 100644
--- a/net/mptcp/mib.c
+++ b/net/mptcp/mib.c
@@ -85,6 +85,7 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("DssFallback", MPTCP_MIB_DSSFALLBACK),
SNMP_MIB_ITEM("SimultConnectFallback", MPTCP_MIB_SIMULTCONNFALLBACK),
SNMP_MIB_ITEM("FallbackFailed", MPTCP_MIB_FALLBACKFAILED),
+ SNMP_MIB_ITEM("WinProbe", MPTCP_MIB_WINPROBE),
};
/* mptcp_mib_alloc - allocate percpu mib counters
diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
index 309bac6fea32..a1d3e9369fbb 100644
--- a/net/mptcp/mib.h
+++ b/net/mptcp/mib.h
@@ -88,6 +88,7 @@ enum linux_mptcp_mib_field {
MPTCP_MIB_DSSFALLBACK, /* Bad or missing DSS */
MPTCP_MIB_SIMULTCONNFALLBACK, /* Simultaneous connect */
MPTCP_MIB_FALLBACKFAILED, /* Can't fallback due to msk status */
+ MPTCP_MIB_WINPROBE, /* MPTCP-level zero window probe */
__MPTCP_MIB_MAX
};
diff --git a/net/mptcp/pm_kernel.c b/net/mptcp/pm_kernel.c
index e0f44dc232aa..2ae95476dba3 100644
--- a/net/mptcp/pm_kernel.c
+++ b/net/mptcp/pm_kernel.c
@@ -370,6 +370,10 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
}
subflow:
+ /* No need to try establishing subflows to remote id0 if not allowed */
+ if (mptcp_pm_add_addr_c_flag_case(msk))
+ goto exit;
+
/* check if should create a new subflow */
while (msk->pm.local_addr_used < endp_subflow_max &&
msk->pm.extra_subflows < limit_extra_subflows) {
@@ -401,6 +405,8 @@ subflow:
__mptcp_subflow_connect(sk, &local, &addrs[i]);
spin_lock_bh(&msk->pm.lock);
}
+
+exit:
mptcp_pm_nl_check_work_pending(msk);
}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 0292162a14ee..2d6b8de35c44 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -194,17 +194,26 @@ static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
* - mptcp does not maintain a msk-level window clamp
* - returns true when the receive buffer is actually updated
*/
-static bool mptcp_rcvbuf_grow(struct sock *sk)
+static bool mptcp_rcvbuf_grow(struct sock *sk, u32 newval)
{
struct mptcp_sock *msk = mptcp_sk(sk);
const struct net *net = sock_net(sk);
- int rcvwin, rcvbuf, cap;
+ u32 rcvwin, rcvbuf, cap, oldval;
+ u64 grow;
+ oldval = msk->rcvq_space.space;
+ msk->rcvq_space.space = newval;
if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) ||
(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
return false;
- rcvwin = msk->rcvq_space.space << 1;
+ /* DRS is always one RTT late. */
+ rcvwin = newval << 1;
+
+ /* slow start: allow the sender to double its rate. */
+ grow = (u64)rcvwin * (newval - oldval);
+ do_div(grow, oldval);
+ rcvwin += grow << 1;
if (!RB_EMPTY_ROOT(&msk->out_of_order_queue))
rcvwin += MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq - msk->ack_seq;
@@ -334,7 +343,7 @@ end:
skb_set_owner_r(skb, sk);
/* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */
if (sk->sk_socket)
- mptcp_rcvbuf_grow(sk);
+ mptcp_rcvbuf_grow(sk, msk->rcvq_space.space);
}
static void mptcp_init_skb(struct sock *ssk, struct sk_buff *skb, int offset,
@@ -998,7 +1007,7 @@ static void __mptcp_clean_una(struct sock *sk)
if (WARN_ON_ONCE(!msk->recovery))
break;
- WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
+ msk->first_pending = mptcp_send_next(sk);
}
dfrag_clear(sk, dfrag);
@@ -1290,7 +1299,12 @@ alloc_skb:
if (copy == 0) {
u64 snd_una = READ_ONCE(msk->snd_una);
- if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) {
+ /* No need for zero probe if there are any data pending
+ * either at the msk or ssk level; skb is the current write
+ * queue tail and can be empty at this point.
+ */
+ if (snd_una != msk->snd_nxt || skb->len ||
+ skb != tcp_send_head(ssk)) {
tcp_remove_empty_skb(ssk);
return 0;
}
@@ -1341,6 +1355,7 @@ alloc_skb:
mpext->dsn64);
if (zero_window_probe) {
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_WINPROBE);
mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
mpext->frozen = 1;
if (READ_ONCE(msk->csum_enabled))
@@ -1543,7 +1558,7 @@ static int __subflow_push_pending(struct sock *sk, struct sock *ssk,
mptcp_update_post_push(msk, dfrag, ret);
}
- WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
+ msk->first_pending = mptcp_send_next(sk);
if (msk->snd_burst <= 0 ||
!sk_stream_memory_free(ssk) ||
@@ -1903,7 +1918,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
get_page(dfrag->page);
list_add_tail(&dfrag->list, &msk->rtx_queue);
if (!msk->first_pending)
- WRITE_ONCE(msk->first_pending, dfrag);
+ msk->first_pending = dfrag;
}
pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk,
dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
@@ -1936,22 +1951,36 @@ do_error:
static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied);
-static int __mptcp_recvmsg_mskq(struct sock *sk,
- struct msghdr *msg,
- size_t len, int flags,
+static int __mptcp_recvmsg_mskq(struct sock *sk, struct msghdr *msg,
+ size_t len, int flags, int copied_total,
struct scm_timestamping_internal *tss,
int *cmsg_flags)
{
struct mptcp_sock *msk = mptcp_sk(sk);
struct sk_buff *skb, *tmp;
+ int total_data_len = 0;
int copied = 0;
skb_queue_walk_safe(&sk->sk_receive_queue, skb, tmp) {
- u32 offset = MPTCP_SKB_CB(skb)->offset;
+ u32 delta, offset = MPTCP_SKB_CB(skb)->offset;
u32 data_len = skb->len - offset;
- u32 count = min_t(size_t, len - copied, data_len);
+ u32 count;
int err;
+ if (flags & MSG_PEEK) {
+ /* skip already peeked skbs */
+ if (total_data_len + data_len <= copied_total) {
+ total_data_len += data_len;
+ continue;
+ }
+
+ /* skip the already peeked data in the current skb */
+ delta = copied_total - total_data_len;
+ offset += delta;
+ data_len -= delta;
+ }
+
+ count = min_t(size_t, len - copied, data_len);
if (!(flags & MSG_TRUNC)) {
err = skb_copy_datagram_msg(skb, offset, msg, count);
if (unlikely(err < 0)) {
@@ -1968,16 +1997,14 @@ static int __mptcp_recvmsg_mskq(struct sock *sk,
copied += count;
- if (count < data_len) {
- if (!(flags & MSG_PEEK)) {
+ if (!(flags & MSG_PEEK)) {
+ msk->bytes_consumed += count;
+ if (count < data_len) {
MPTCP_SKB_CB(skb)->offset += count;
MPTCP_SKB_CB(skb)->map_seq += count;
- msk->bytes_consumed += count;
+ break;
}
- break;
- }
- if (!(flags & MSG_PEEK)) {
/* avoid the indirect call, we know the destructor is sock_rfree */
skb->destructor = NULL;
skb->sk = NULL;
@@ -1985,7 +2012,6 @@ static int __mptcp_recvmsg_mskq(struct sock *sk,
sk_mem_uncharge(sk, skb->truesize);
__skb_unlink(skb, &sk->sk_receive_queue);
skb_attempt_defer_free(skb);
- msk->bytes_consumed += count;
}
if (copied >= len)
@@ -2049,9 +2075,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
if (msk->rcvq_space.copied <= msk->rcvq_space.space)
goto new_measure;
- msk->rcvq_space.space = msk->rcvq_space.copied;
- if (mptcp_rcvbuf_grow(sk)) {
-
+ if (mptcp_rcvbuf_grow(sk, msk->rcvq_space.copied)) {
/* Make subflows follow along. If we do not do this, we
* get drops at subflow level if skbs can't be moved to
* the mptcp rx queue fast enough (announced rcv_win can
@@ -2063,8 +2087,9 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
ssk = mptcp_subflow_tcp_sock(subflow);
slow = lock_sock_fast(ssk);
- tcp_sk(ssk)->rcvq_space.space = msk->rcvq_space.copied;
- tcp_rcvbuf_grow(ssk);
+ /* subflows can be added before tcp_init_transfer() */
+ if (tcp_sk(ssk)->rcvq_space.space)
+ tcp_rcvbuf_grow(ssk, msk->rcvq_space.copied);
unlock_sock_fast(ssk, slow);
}
}
@@ -2183,7 +2208,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
while (copied < len) {
int err, bytes_read;
- bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags, &tss, &cmsg_flags);
+ bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags,
+ copied, &tss, &cmsg_flags);
if (unlikely(bytes_read < 0)) {
if (!copied)
copied = bytes_read;
@@ -2874,7 +2900,7 @@ static void __mptcp_clear_xmit(struct sock *sk)
struct mptcp_sock *msk = mptcp_sk(sk);
struct mptcp_data_frag *dtmp, *dfrag;
- WRITE_ONCE(msk->first_pending, NULL);
+ msk->first_pending = NULL;
list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
dfrag_clear(sk, dfrag);
}
@@ -3414,9 +3440,6 @@ void __mptcp_data_acked(struct sock *sk)
void __mptcp_check_push(struct sock *sk, struct sock *ssk)
{
- if (!mptcp_send_head(sk))
- return;
-
if (!sock_owned_by_user(sk))
__mptcp_subflow_push_pending(sk, ssk, false);
else
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 52f9cfa4ce95..379a88e14e8d 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -414,7 +414,7 @@ static inline struct mptcp_data_frag *mptcp_send_head(const struct sock *sk)
{
const struct mptcp_sock *msk = mptcp_sk(sk);
- return READ_ONCE(msk->first_pending);
+ return msk->first_pending;
}
static inline struct mptcp_data_frag *mptcp_send_next(struct sock *sk)
diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c
index 92b984fa8175..fc35a11cdca2 100644
--- a/net/netfilter/nft_connlimit.c
+++ b/net/netfilter/nft_connlimit.c
@@ -48,7 +48,7 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
return;
}
- count = priv->list->count;
+ count = READ_ONCE(priv->list->count);
if ((count > priv->limit) ^ priv->invert) {
regs->verdict.code = NFT_BREAK;
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index d526e69a2a2b..6f2ae7cad731 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -22,6 +22,7 @@
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
struct nft_ct_helper_obj {
struct nf_conntrack_helper *helper4;
@@ -379,6 +380,14 @@ static bool nft_ct_tmpl_alloc_pcpu(void)
}
#endif
+static void __nft_ct_get_destroy(const struct nft_ctx *ctx, struct nft_ct *priv)
+{
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+ if (priv->key == NFT_CT_LABELS)
+ nf_connlabels_put(ctx->net);
+#endif
+}
+
static int nft_ct_get_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
@@ -413,6 +422,10 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
if (tb[NFTA_CT_DIRECTION] != NULL)
return -EINVAL;
len = NF_CT_LABELS_MAX_SIZE;
+
+ err = nf_connlabels_get(ctx->net, (len * BITS_PER_BYTE) - 1);
+ if (err)
+ return err;
break;
#endif
case NFT_CT_HELPER:
@@ -494,7 +507,8 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
case IP_CT_DIR_REPLY:
break;
default:
- return -EINVAL;
+ err = -EINVAL;
+ goto err;
}
}
@@ -502,11 +516,11 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
err = nft_parse_register_store(ctx, tb[NFTA_CT_DREG], &priv->dreg, NULL,
NFT_DATA_VALUE, len);
if (err < 0)
- return err;
+ goto err;
err = nf_ct_netns_get(ctx->net, ctx->family);
if (err < 0)
- return err;
+ goto err;
if (priv->key == NFT_CT_BYTES ||
priv->key == NFT_CT_PKTS ||
@@ -514,6 +528,9 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
nf_ct_set_acct(ctx->net, true);
return 0;
+err:
+ __nft_ct_get_destroy(ctx, priv);
+ return err;
}
static void __nft_ct_set_destroy(const struct nft_ctx *ctx, struct nft_ct *priv)
@@ -626,6 +643,9 @@ err1:
static void nft_ct_get_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
+ struct nft_ct *priv = nft_expr_priv(expr);
+
+ __nft_ct_get_destroy(ctx, priv);
nf_ct_netns_put(ctx->net, ctx->family);
}
@@ -1173,6 +1193,10 @@ static void nft_ct_helper_obj_eval(struct nft_object *obj,
if (help) {
rcu_assign_pointer(help->helper, to_assign);
set_bit(IPS_HELPER_BIT, &ct->status);
+
+ if ((ct->status & IPS_NAT_MASK) && !nfct_seqadj(ct))
+ if (!nfct_seqadj_ext_add(ct))
+ regs->verdict.code = NF_DROP;
}
}
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 7e99894778d4..e119e460ccde 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -190,7 +190,7 @@ int sctp_rcv(struct sk_buff *skb)
goto discard_release;
nf_reset_ct(skb);
- if (sk_filter(sk, skb))
+ if (sk_filter(sk, skb) || skb->len < sizeof(struct sctp_chunkhdr))
goto discard_release;
/* Create an SCTP packet structure. */
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 5c1652181805..f5a7d5a38755 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -169,13 +169,14 @@ next_chunk:
chunk->head_skb = chunk->skb;
/* skbs with "cover letter" */
- if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len)
+ if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len) {
+ if (WARN_ON(!skb_shinfo(chunk->skb)->frag_list)) {
+ __SCTP_INC_STATS(dev_net(chunk->skb->dev),
+ SCTP_MIB_IN_PKT_DISCARDS);
+ sctp_chunk_free(chunk);
+ goto next_chunk;
+ }
chunk->skb = skb_shinfo(chunk->skb)->frag_list;
-
- if (WARN_ON(!chunk->skb)) {
- __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
- sctp_chunk_free(chunk);
- goto next_chunk;
}
}
diff --git a/net/smc/smc_inet.c b/net/smc/smc_inet.c
index a944e7dcb8b9..a94084b4a498 100644
--- a/net/smc/smc_inet.c
+++ b/net/smc/smc_inet.c
@@ -56,7 +56,6 @@ static struct inet_protosw smc_inet_protosw = {
.protocol = IPPROTO_SMC,
.prot = &smc_inet_prot,
.ops = &smc_inet_stream_ops,
- .flags = INET_PROTOSW_ICSK,
};
#if IS_ENABLED(CONFIG_IPV6)
@@ -104,27 +103,15 @@ static struct inet_protosw smc_inet6_protosw = {
.protocol = IPPROTO_SMC,
.prot = &smc_inet6_prot,
.ops = &smc_inet6_stream_ops,
- .flags = INET_PROTOSW_ICSK,
};
#endif /* CONFIG_IPV6 */
-static unsigned int smc_sync_mss(struct sock *sk, u32 pmtu)
-{
- /* No need pass it through to clcsock, mss can always be set by
- * sock_create_kern or smc_setsockopt.
- */
- return 0;
-}
-
static int smc_inet_init_sock(struct sock *sk)
{
struct net *net = sock_net(sk);
/* init common smc sock */
smc_sk_init(net, sk, IPPROTO_SMC);
-
- inet_csk(sk)->icsk_sync_mss = smc_sync_mss;
-
/* create clcsock */
return smc_create_clcsk(net, sk, sk->sk_family);
}
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index a64ae15b1a60..71734411ff4c 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -723,8 +723,10 @@ tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
/* shouldn't get to wraparound:
* too long in async stage, something bad happened
*/
- if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
+ if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) {
+ tls_offload_rx_resync_async_request_cancel(resync_async);
return false;
+ }
/* asynchronous stage: log all headers seq such that
* req_seq <= seq <= end_seq, and wait for real resync request
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 4c2db6cca557..76763247a377 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -487,12 +487,26 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
goto err;
}
- if (vsk->transport) {
- if (vsk->transport == new_transport) {
- ret = 0;
- goto err;
- }
+ if (vsk->transport && vsk->transport == new_transport) {
+ ret = 0;
+ goto err;
+ }
+ /* We increase the module refcnt to prevent the transport unloading
+ * while there are open sockets assigned to it.
+ */
+ if (!new_transport || !try_module_get(new_transport->module)) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* It's safe to release the mutex after a successful try_module_get().
+ * Whichever transport `new_transport` points at, it won't go away until
+ * the last module_put() below or in vsock_deassign_transport().
+ */
+ mutex_unlock(&vsock_register_mutex);
+
+ if (vsk->transport) {
/* transport->release() must be called with sock lock acquired.
* This path can only be taken during vsock_connect(), where we
* have already held the sock lock. In the other cases, this
@@ -512,20 +526,6 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
vsk->peer_shutdown = 0;
}
- /* We increase the module refcnt to prevent the transport unloading
- * while there are open sockets assigned to it.
- */
- if (!new_transport || !try_module_get(new_transport->module)) {
- ret = -ENODEV;
- goto err;
- }
-
- /* It's safe to release the mutex after a successful try_module_get().
- * Whichever transport `new_transport` points at, it won't go away until
- * the last module_put() below or in vsock_deassign_transport().
- */
- mutex_unlock(&vsock_register_mutex);
-
if (sk->sk_type == SOCK_SEQPACKET) {
if (!new_transport->seqpacket_allow ||
!new_transport->seqpacket_allow(remote_cid)) {
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 346dfd2bd987..03d07b54359a 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4136,8 +4136,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
rdev->wiphy.txq_quantum = old_txq_quantum;
}
- if (old_rts_threshold)
- kfree(old_radio_rts_threshold);
+ kfree(old_radio_rts_threshold);
return result;
}
diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
index fc7a603b04f1..bf744ac9d5a7 100644
--- a/net/xfrm/espintcp.c
+++ b/net/xfrm/espintcp.c
@@ -555,14 +555,10 @@ static void espintcp_close(struct sock *sk, long timeout)
static __poll_t espintcp_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
- __poll_t mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk;
struct espintcp_ctx *ctx = espintcp_getctx(sk);
- if (!skb_queue_empty(&ctx->ike_queue))
- mask |= EPOLLIN | EPOLLRDNORM;
-
- return mask;
+ return datagram_poll_queue(file, sock, wait, &ctx->ike_queue);
}
static void build_protos(struct proto *espintcp_prot,
diff --git a/rust/kernel/auxiliary.rs b/rust/kernel/auxiliary.rs
index e11848bbf206..7a3b0b9c418e 100644
--- a/rust/kernel/auxiliary.rs
+++ b/rust/kernel/auxiliary.rs
@@ -217,13 +217,7 @@ impl<Ctx: device::DeviceContext> Device<Ctx> {
/// Returns a reference to the parent [`device::Device`], if any.
pub fn parent(&self) -> Option<&device::Device> {
- let ptr: *const Self = self;
- // CAST: `Device<Ctx: DeviceContext>` types are transparent to each other.
- let ptr: *const Device = ptr.cast();
- // SAFETY: `ptr` was derived from `&self`.
- let this = unsafe { &*ptr };
-
- this.as_ref().parent()
+ self.as_ref().parent()
}
}
diff --git a/rust/kernel/device.rs b/rust/kernel/device.rs
index 1321e6f0b53c..a849b7dde2fd 100644
--- a/rust/kernel/device.rs
+++ b/rust/kernel/device.rs
@@ -251,7 +251,7 @@ impl<Ctx: DeviceContext> Device<Ctx> {
/// Returns a reference to the parent device, if any.
#[cfg_attr(not(CONFIG_AUXILIARY_BUS), expect(dead_code))]
- pub(crate) fn parent(&self) -> Option<&Self> {
+ pub(crate) fn parent(&self) -> Option<&Device> {
// SAFETY:
// - By the type invariant `self.as_raw()` is always valid.
// - The parent device is only ever set at device creation.
@@ -264,7 +264,7 @@ impl<Ctx: DeviceContext> Device<Ctx> {
// - Since `parent` is not NULL, it must be a valid pointer to a `struct device`.
// - `parent` is valid for the lifetime of `self`, since a `struct device` holds a
// reference count of its parent.
- Some(unsafe { Self::from_raw(parent) })
+ Some(unsafe { Device::from_raw(parent) })
}
}
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 84ea9215c0a7..b8b7bba84a65 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -12,6 +12,7 @@
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
+#include <locale.h>
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
@@ -931,6 +932,8 @@ int main(int ac, char **av)
signal(SIGINT, sig_handler);
+ setlocale(LC_ALL, "");
+
if (ac > 1 && strcmp(av[1], "-s") == 0) {
silent = 1;
/* Silence conf_read() until the real callback is set up */
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index ae1fe5f60327..521700ed7152 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -7,6 +7,7 @@
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
+#include <locale.h>
#include <string.h>
#include <strings.h>
#include <stdlib.h>
@@ -1478,6 +1479,8 @@ int main(int ac, char **av)
int lines, columns;
char *mode;
+ setlocale(LC_ALL, "");
+
if (ac > 1 && strcmp(av[1], "-s") == 0) {
/* Silence conf_read() until the real callback is set up */
conf_set_message_callback(NULL);
diff --git a/scripts/package/install-extmod-build b/scripts/package/install-extmod-build
index b96538787f3d..054fdf45cc37 100755
--- a/scripts/package/install-extmod-build
+++ b/scripts/package/install-extmod-build
@@ -63,7 +63,7 @@ if [ "${CC}" != "${HOSTCC}" ]; then
# Clear VPATH and srcroot because the source files reside in the output
# directory.
# shellcheck disable=SC2016 # $(MAKE) and $(build) will be expanded by Make
- "${MAKE}" run-command KBUILD_RUN_COMMAND='+$(MAKE) HOSTCC='"${CC}"' VPATH= srcroot=. $(build)='"$(realpath --relative-base=. "${destdir}")"/scripts
+ "${MAKE}" run-command KBUILD_RUN_COMMAND='+$(MAKE) HOSTCC='"${CC}"' VPATH= srcroot=. $(build)='"$(realpath --relative-to=. "${destdir}")"/scripts
rm -f "${destdir}/scripts/Kbuild"
fi
diff --git a/sound/hda/codecs/realtek/alc269.c b/sound/hda/codecs/realtek/alc269.c
index 8ad5febd822a..4aec5067c59d 100644
--- a/sound/hda/codecs/realtek/alc269.c
+++ b/sound/hda/codecs/realtek/alc269.c
@@ -3736,6 +3736,7 @@ enum {
ALC285_FIXUP_ASUS_GA605K_I2C_SPEAKER2_TO_DAC1,
ALC269_FIXUP_POSITIVO_P15X_HEADSET_MIC,
ALC289_FIXUP_ASUS_ZEPHYRUS_DUAL_SPK,
+ ALC256_FIXUP_VAIO_RPL_MIC_NO_PRESENCE,
};
/* A special fixup for Lenovo C940 and Yoga Duet 7;
@@ -6172,6 +6173,16 @@ static const struct hda_fixup alc269_fixups[] = {
{ 0x1e, 0x90170150 }, /* Internal Speaker */
{ }
},
+ },
+ [ALC256_FIXUP_VAIO_RPL_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x03a1113c }, /* use as headset mic, without its own jack detect */
+ { 0x1a, 0x22a190a0 }, /* dock mic */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
}
};
@@ -6578,6 +6589,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8c16, "HP Spectre x360 2-in-1 Laptop 16-aa0xxx", ALC245_FIXUP_HP_SPECTRE_X360_16_AA0XXX),
SND_PCI_QUIRK(0x103c, 0x8c17, "HP Spectre 16", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8c21, "HP Pavilion Plus Laptop 14-ey0XXX", ALC245_FIXUP_HP_X360_MUTE_LEDS),
+ SND_PCI_QUIRK(0x103c, 0x8c2d, "HP Victus 15-fa1xxx (MB 8C2D)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
SND_PCI_QUIRK(0x103c, 0x8c30, "HP Victus 15-fb1xxx", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
SND_PCI_QUIRK(0x103c, 0x8c46, "HP EliteBook 830 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c47, "HP EliteBook 840 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
@@ -6959,6 +6971,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL5[03]RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xa554, "VAIO VJFH52", ALC269_FIXUP_VAIO_VJFH52_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0xa559, "VAIO RPL", ALC256_FIXUP_VAIO_RPL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL50NU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xa650, "Clevo NP[567]0SN[CD]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0xa671, "Clevo NP70SN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -7080,6 +7093,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x38a9, "Thinkbook 16P", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x38ab, "Thinkbook 16P", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x38b4, "Legion Slim 7 16IRH8", ALC287_FIXUP_CS35L41_I2C_2),
+ HDA_CODEC_QUIRK(0x17aa, 0x391c, "Lenovo Yoga 7 2-in-1 14AKP10", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
SND_PCI_QUIRK(0x17aa, 0x38b5, "Legion Slim 7 16IRH8", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x38b6, "Legion Slim 7 16APH8", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x38b7, "Legion Slim 7 16APH8", ALC287_FIXUP_CS35L41_I2C_2),
diff --git a/sound/soc/amd/acp/amd-acp70-acpi-match.c b/sound/soc/amd/acp/amd-acp70-acpi-match.c
index dcecac792e6d..871b4f054a84 100644
--- a/sound/soc/amd/acp/amd-acp70-acpi-match.c
+++ b/sound/soc/amd/acp/amd-acp70-acpi-match.c
@@ -30,6 +30,20 @@ static const struct snd_soc_acpi_endpoint spk_r_endpoint = {
.group_id = 1
};
+static const struct snd_soc_acpi_endpoint spk_2_endpoint = {
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 2,
+ .group_id = 1
+};
+
+static const struct snd_soc_acpi_endpoint spk_3_endpoint = {
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 3,
+ .group_id = 1
+};
+
static const struct snd_soc_acpi_adr_device rt711_rt1316_group_adr[] = {
{
.adr = 0x000030025D071101ull,
@@ -112,6 +126,134 @@ static const struct snd_soc_acpi_adr_device rt1320_1_single_adr[] = {
}
};
+static const struct snd_soc_acpi_endpoint cs42l43_endpoints[] = {
+ { /* Jack Playback Endpoint */
+ .num = 0,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+ { /* DMIC Capture Endpoint */
+ .num = 1,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+ { /* Jack Capture Endpoint */
+ .num = 2,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+ { /* Speaker Playback Endpoint */
+ .num = 3,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+};
+
+static const struct snd_soc_acpi_adr_device cs42l43_0_adr[] = {
+ {
+ .adr = 0x00003001FA424301ull,
+ .num_endpoints = ARRAY_SIZE(cs42l43_endpoints),
+ .endpoints = cs42l43_endpoints,
+ .name_prefix = "cs42l43"
+ }
+};
+
+static const struct snd_soc_acpi_adr_device cs42l43_1_cs35l56x4_1_adr[] = {
+ {
+ .adr = 0x00013001FA424301ull,
+ .num_endpoints = ARRAY_SIZE(cs42l43_endpoints),
+ .endpoints = cs42l43_endpoints,
+ .name_prefix = "cs42l43"
+ },
+ {
+ .adr = 0x00013001FA355601ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_l_endpoint,
+ .name_prefix = "AMP1"
+ },
+ {
+ .adr = 0x00013101FA355601ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_r_endpoint,
+ .name_prefix = "AMP2"
+ },
+ {
+ .adr = 0x00013201FA355601ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_2_endpoint,
+ .name_prefix = "AMP3"
+ },
+ {
+ .adr = 0x00013301FA355601ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_3_endpoint,
+ .name_prefix = "AMP4"
+ },
+};
+
+static const struct snd_soc_acpi_adr_device cs35l56x4_1_adr[] = {
+ {
+ .adr = 0x00013301FA355601ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_l_endpoint,
+ .name_prefix = "AMP1"
+ },
+ {
+ .adr = 0x00013201FA355601ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_r_endpoint,
+ .name_prefix = "AMP2"
+ },
+ {
+ .adr = 0x00013101FA355601ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_2_endpoint,
+ .name_prefix = "AMP3"
+ },
+ {
+ .adr = 0x00013001FA355601ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_3_endpoint,
+ .name_prefix = "AMP4"
+ },
+};
+
+static const struct snd_soc_acpi_link_adr acp70_cs42l43_l1_cs35l56x4_l1[] = {
+ {
+ .mask = BIT(1),
+ .num_adr = ARRAY_SIZE(cs42l43_1_cs35l56x4_1_adr),
+ .adr_d = cs42l43_1_cs35l56x4_1_adr,
+ },
+ {}
+};
+
+static const struct snd_soc_acpi_link_adr acp70_cs42l43_l0_cs35l56x4_l1[] = {
+ {
+ .mask = BIT(0),
+ .num_adr = ARRAY_SIZE(cs42l43_0_adr),
+ .adr_d = cs42l43_0_adr,
+ },
+ {
+ .mask = BIT(1),
+ .num_adr = ARRAY_SIZE(cs35l56x4_1_adr),
+ .adr_d = cs35l56x4_1_adr,
+ },
+ {}
+};
+
+static const struct snd_soc_acpi_link_adr acp70_cs35l56x4_l1[] = {
+ {
+ .mask = BIT(1),
+ .num_adr = ARRAY_SIZE(cs35l56x4_1_adr),
+ .adr_d = cs35l56x4_1_adr,
+ },
+ {}
+};
+
static const struct snd_soc_acpi_link_adr acp70_rt722_only[] = {
{
.mask = BIT(0),
@@ -151,6 +293,21 @@ struct snd_soc_acpi_mach snd_soc_acpi_amd_acp70_sdw_machines[] = {
.links = acp70_4_in_1_sdca,
.drv_name = "amd_sdw",
},
+ {
+ .link_mask = BIT(0) | BIT(1),
+ .links = acp70_cs42l43_l0_cs35l56x4_l1,
+ .drv_name = "amd_sdw",
+ },
+ {
+ .link_mask = BIT(1),
+ .links = acp70_cs42l43_l1_cs35l56x4_l1,
+ .drv_name = "amd_sdw",
+ },
+ {
+ .link_mask = BIT(1),
+ .links = acp70_cs35l56x4_l1,
+ .drv_name = "amd_sdw",
+ },
{},
};
EXPORT_SYMBOL(snd_soc_acpi_amd_acp70_sdw_machines);
diff --git a/sound/soc/codecs/cs-amp-lib-test.c b/sound/soc/codecs/cs-amp-lib-test.c
index 2fde84309338..3406887cdfa2 100644
--- a/sound/soc/codecs/cs-amp-lib-test.c
+++ b/sound/soc/codecs/cs-amp-lib-test.c
@@ -7,6 +7,7 @@
#include <kunit/resource.h>
#include <kunit/test.h>
+#include <kunit/test-bug.h>
#include <kunit/static_stub.h>
#include <linux/device/faux.h>
#include <linux/firmware/cirrus/cs_dsp.h>
diff --git a/sound/soc/codecs/cs530x.c b/sound/soc/codecs/cs530x.c
index b9eff240b929..535387cd7aa3 100644
--- a/sound/soc/codecs/cs530x.c
+++ b/sound/soc/codecs/cs530x.c
@@ -793,7 +793,7 @@ static int cs530x_set_sysclk(struct snd_soc_component *component, int clk_id,
case CS530X_SYSCLK_SRC_PLL:
break;
default:
- dev_err(component->dev, "Invalid clock id %d\n", clk_id);
+ dev_err(component->dev, "Invalid sysclk source: %d\n", source);
return -EINVAL;
}
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index cb1508fc99f8..5aff5a459a43 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -1239,6 +1239,8 @@ static const struct snd_soc_dapm_widget max98091_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("DMIC4_ENA", M98090_REG_DIGITAL_MIC_ENABLE,
M98090_DIGMIC4_SHIFT, 0, max98090_shdn_event,
SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_SUPPLY("DMIC34_HPF", M98090_REG_FILTER_CONFIG,
+ M98090_FLT_DMIC34HPF_SHIFT, 0, NULL, 0),
};
static const struct snd_soc_dapm_route max98090_dapm_routes[] = {
@@ -1427,8 +1429,8 @@ static const struct snd_soc_dapm_route max98091_dapm_routes[] = {
/* DMIC inputs */
{"DMIC3", NULL, "DMIC3_ENA"},
{"DMIC4", NULL, "DMIC4_ENA"},
- {"DMIC3", NULL, "AHPF"},
- {"DMIC4", NULL, "AHPF"},
+ {"DMIC3", NULL, "DMIC34_HPF"},
+ {"DMIC4", NULL, "DMIC34_HPF"},
};
static int max98090_add_widgets(struct snd_soc_component *component)
diff --git a/sound/soc/codecs/rt721-sdca.c b/sound/soc/codecs/rt721-sdca.c
index a4bd29d7220b..5f7b505d5414 100644
--- a/sound/soc/codecs/rt721-sdca.c
+++ b/sound/soc/codecs/rt721-sdca.c
@@ -281,6 +281,10 @@ static void rt721_sdca_jack_preset(struct rt721_sdca_priv *rt721)
rt_sdca_index_write(rt721->mbq_regmap, RT721_BOOST_CTRL,
RT721_BST_4CH_TOP_GATING_CTRL1, 0x002a);
regmap_write(rt721->regmap, 0x2f58, 0x07);
+
+ regmap_write(rt721->regmap, 0x2f51, 0x00);
+ rt_sdca_index_write(rt721->mbq_regmap, RT721_HDA_SDCA_FLOAT,
+ RT721_MISC_CTL, 0x0004);
}
static void rt721_sdca_jack_init(struct rt721_sdca_priv *rt721)
diff --git a/sound/soc/codecs/rt721-sdca.h b/sound/soc/codecs/rt721-sdca.h
index 71fac9cd8739..24ce188562ba 100644
--- a/sound/soc/codecs/rt721-sdca.h
+++ b/sound/soc/codecs/rt721-sdca.h
@@ -137,6 +137,7 @@ struct rt721_sdca_dmic_kctrl_priv {
#define RT721_HDA_LEGACY_UAJ_CTL 0x02
#define RT721_HDA_LEGACY_CTL1 0x05
#define RT721_HDA_LEGACY_RESET_CTL 0x06
+#define RT721_MISC_CTL 0x07
#define RT721_XU_REL_CTRL 0x0c
#define RT721_GE_REL_CTRL1 0x0d
#define RT721_HDA_LEGACY_GPIO_WAKE_EN_CTL 0x0e
diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
index aabd90a8b3ec..cac26ba0aa4b 100644
--- a/sound/soc/fsl/fsl_micfil.c
+++ b/sound/soc/fsl/fsl_micfil.c
@@ -131,7 +131,7 @@ static struct fsl_micfil_soc_data fsl_micfil_imx943 = {
.fifos = 8,
.fifo_depth = 32,
.dataline = 0xf,
- .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_DSD_U32_BE,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_DSD_U32_LE,
.use_edma = true,
.use_verid = true,
.volume_sx = false,
@@ -823,7 +823,7 @@ static int fsl_micfil_hw_params(struct snd_pcm_substream *substream,
break;
}
- if (format == SNDRV_PCM_FORMAT_DSD_U32_BE) {
+ if (format == SNDRV_PCM_FORMAT_DSD_U32_LE) {
micfil->dec_bypass = true;
/*
* According to equation 29 in RM:
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 757e7868e322..72bfc91e21b9 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -353,7 +353,6 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
break;
case SND_SOC_DAIFMT_PDM:
val_cr2 |= FSL_SAI_CR2_BCP;
- val_cr4 &= ~FSL_SAI_CR4_MF;
sai->is_pdm_mode = true;
break;
case SND_SOC_DAIFMT_RIGHT_J:
@@ -638,7 +637,7 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
val_cr5 |= FSL_SAI_CR5_WNW(slot_width);
val_cr5 |= FSL_SAI_CR5_W0W(slot_width);
- if (sai->is_lsb_first || sai->is_pdm_mode)
+ if (sai->is_lsb_first)
val_cr5 |= FSL_SAI_CR5_FBT(0);
else
val_cr5 |= FSL_SAI_CR5_FBT(word_width - 1);
@@ -653,12 +652,12 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
val_cr4 |= FSL_SAI_CR4_CHMOD;
/*
- * For SAI provider mode, when Tx(Rx) sync with Rx(Tx) clock, Rx(Tx) will
- * generate bclk and frame clock for Tx(Rx), we should set RCR4(TCR4),
- * RCR5(TCR5) for playback(capture), or there will be sync error.
+ * When Tx(Rx) sync with Rx(Tx) clock, Rx(Tx) will provide bclk and
+ * frame clock for Tx(Rx). We should set RCR4(TCR4), RCR5(TCR5)
+ * for playback(capture), or there will be sync error.
*/
- if (!sai->is_consumer_mode[tx] && fsl_sai_dir_is_synced(sai, adir)) {
+ if (fsl_sai_dir_is_synced(sai, adir)) {
regmap_update_bits(sai->regmap, FSL_SAI_xCR4(!tx, ofs),
FSL_SAI_CR4_SYWD_MASK | FSL_SAI_CR4_FRSZ_MASK |
FSL_SAI_CR4_CHMOD_MASK,
diff --git a/sound/soc/intel/avs/pcm.c b/sound/soc/intel/avs/pcm.c
index d31058e2de5b..80c001120cdd 100644
--- a/sound/soc/intel/avs/pcm.c
+++ b/sound/soc/intel/avs/pcm.c
@@ -651,6 +651,7 @@ static void avs_dai_fe_shutdown(struct snd_pcm_substream *substream, struct snd_
data = snd_soc_dai_get_dma_data(dai, substream);
+ disable_work_sync(&data->period_elapsed_work);
snd_hdac_ext_stream_release(data->host_stream, HDAC_EXT_STREAM_TYPE_HOST);
avs_dai_shutdown(substream, dai);
}
@@ -754,6 +755,8 @@ static int avs_dai_fe_prepare(struct snd_pcm_substream *substream, struct snd_so
data = snd_soc_dai_get_dma_data(dai, substream);
host_stream = data->host_stream;
+ if (runtime->state == SNDRV_PCM_STATE_XRUN)
+ hdac_stream(host_stream)->prepared = false;
if (hdac_stream(host_stream)->prepared)
return 0;
diff --git a/sound/soc/intel/avs/probes.c b/sound/soc/intel/avs/probes.c
index 693ecfe68fd0..74096236984a 100644
--- a/sound/soc/intel/avs/probes.c
+++ b/sound/soc/intel/avs/probes.c
@@ -14,8 +14,8 @@
#include "debug.h"
#include "messages.h"
-static int avs_dsp_init_probe(struct avs_dev *adev, union avs_connector_node_id node_id,
- size_t buffer_size)
+static int avs_dsp_init_probe(struct avs_dev *adev, struct snd_compr_params *params, int bps,
+ union avs_connector_node_id node_id, size_t buffer_size)
{
struct avs_probe_cfg cfg = {{0}};
struct avs_module_entry mentry;
@@ -27,12 +27,16 @@ static int avs_dsp_init_probe(struct avs_dev *adev, union avs_connector_node_id
return ret;
/*
- * Probe module uses no cycles, audio data format and input and output
- * frame sizes are unused. It is also not owned by any pipeline.
+ * Probe module uses no cycles, input and output frame sizes are unused.
+ * It is also not owned by any pipeline.
*/
cfg.base.ibs = 1;
/* BSS module descriptor is always segment of index=2. */
cfg.base.is_pages = mentry.segments[2].flags.length;
+ cfg.base.audio_fmt.sampling_freq = params->codec.sample_rate;
+ cfg.base.audio_fmt.bit_depth = bps;
+ cfg.base.audio_fmt.num_channels = params->codec.ch_out;
+ cfg.base.audio_fmt.valid_bit_depth = bps;
cfg.gtw_cfg.node_id = node_id;
cfg.gtw_cfg.dma_buffer_size = buffer_size;
@@ -128,8 +132,6 @@ static int avs_probe_compr_set_params(struct snd_compr_stream *cstream,
struct hdac_ext_stream *host_stream = avs_compr_get_host_stream(cstream);
struct snd_compr_runtime *rtd = cstream->runtime;
struct avs_dev *adev = to_avs_dev(dai->dev);
- /* compr params do not store bit depth, default to S32_LE. */
- snd_pcm_format_t format = SNDRV_PCM_FORMAT_S32_LE;
unsigned int format_val;
int bps, ret;
@@ -142,7 +144,7 @@ static int avs_probe_compr_set_params(struct snd_compr_stream *cstream,
ret = snd_compr_malloc_pages(cstream, rtd->buffer_size);
if (ret < 0)
return ret;
- bps = snd_pcm_format_physical_width(format);
+ bps = snd_pcm_format_physical_width(params->codec.format);
if (bps < 0)
return bps;
format_val = snd_hdac_stream_format(params->codec.ch_out, bps, params->codec.sample_rate);
@@ -166,7 +168,7 @@ static int avs_probe_compr_set_params(struct snd_compr_stream *cstream,
node_id.vindex = hdac_stream(host_stream)->stream_tag - 1;
node_id.dma_type = AVS_DMA_HDA_HOST_INPUT;
- ret = avs_dsp_init_probe(adev, node_id, rtd->dma_bytes);
+ ret = avs_dsp_init_probe(adev, params, bps, node_id, rtd->dma_bytes);
if (ret < 0) {
dev_err(dai->dev, "probe init failed: %d\n", ret);
avs_dsp_enable_d0ix(adev);
diff --git a/sound/soc/intel/common/soc-acpi-intel-ptl-match.c b/sound/soc/intel/common/soc-acpi-intel-ptl-match.c
index 3c8b10e21ceb..4853f4f31786 100644
--- a/sound/soc/intel/common/soc-acpi-intel-ptl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-ptl-match.c
@@ -227,33 +227,6 @@ static const struct snd_soc_acpi_endpoint cs42l43_amp_spkagg_endpoints[] = {
},
};
-static const struct snd_soc_acpi_endpoint cs42l43_endpoints[] = {
- { /* Jack Playback Endpoint */
- .num = 0,
- .aggregated = 0,
- .group_position = 0,
- .group_id = 0,
- },
- { /* DMIC Capture Endpoint */
- .num = 1,
- .aggregated = 0,
- .group_position = 0,
- .group_id = 0,
- },
- { /* Jack Capture Endpoint */
- .num = 2,
- .aggregated = 0,
- .group_position = 0,
- .group_id = 0,
- },
- { /* Speaker Playback Endpoint */
- .num = 3,
- .aggregated = 0,
- .group_position = 0,
- .group_id = 0,
- },
-};
-
static const struct snd_soc_acpi_adr_device cs42l43_2_adr[] = {
{
.adr = 0x00023001fa424301ull,
@@ -305,15 +278,6 @@ static const struct snd_soc_acpi_adr_device cs35l56_3_3amp_adr[] = {
}
};
-static const struct snd_soc_acpi_adr_device cs42l43_3_adr[] = {
- {
- .adr = 0x00033001FA424301ull,
- .num_endpoints = ARRAY_SIZE(cs42l43_endpoints),
- .endpoints = cs42l43_endpoints,
- .name_prefix = "cs42l43"
- }
-};
-
static const struct snd_soc_acpi_adr_device rt711_sdca_0_adr[] = {
{
.adr = 0x000030025D071101ull,
@@ -486,15 +450,6 @@ static const struct snd_soc_acpi_link_adr ptl_cs42l43_l2_cs35l56x6_l13[] = {
{}
};
-static const struct snd_soc_acpi_link_adr ptl_cs42l43_l3[] = {
- {
- .mask = BIT(3),
- .num_adr = ARRAY_SIZE(cs42l43_3_adr),
- .adr_d = cs42l43_3_adr,
- },
- {}
-};
-
static const struct snd_soc_acpi_link_adr ptl_rt721_l0[] = {
{
.mask = BIT(0),
@@ -714,13 +669,6 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_ptl_sdw_machines[] = {
},
{
.link_mask = BIT(3),
- .links = ptl_cs42l43_l3,
- .drv_name = "sof_sdw",
- .sof_tplg_filename = "sof-ptl-cs42l43-l3.tplg",
- .get_function_tplg_files = sof_sdw_get_tplg_files,
- },
- {
- .link_mask = BIT(3),
.links = ptl_sdw_rt712_vb_l3_rt1320_l3,
.drv_name = "sof_sdw",
.machine_check = snd_soc_acpi_intel_sdca_is_device_rt712_vb,
diff --git a/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c b/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c
index 5d025ad72263..c63b3444bc17 100644
--- a/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c
+++ b/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c
@@ -3176,7 +3176,6 @@ err_pm_put:
static void mt8195_afe_pcm_dev_remove(struct platform_device *pdev)
{
- pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
mt8195_afe_runtime_suspend(&pdev->dev);
}
diff --git a/sound/soc/mediatek/mt8365/mt8365-afe-pcm.c b/sound/soc/mediatek/mt8365/mt8365-afe-pcm.c
index 10793bbe9275..d48252cd96ac 100644
--- a/sound/soc/mediatek/mt8365/mt8365-afe-pcm.c
+++ b/sound/soc/mediatek/mt8365/mt8365-afe-pcm.c
@@ -2238,7 +2238,6 @@ static void mt8365_afe_pcm_dev_remove(struct platform_device *pdev)
mt8365_afe_disable_top_cg(afe, MT8365_TOP_CG_AFE);
- pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
mt8365_afe_runtime_suspend(&pdev->dev);
}
diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c
index 06a802f9dba5..67e9ca18883c 100644
--- a/sound/soc/qcom/qdsp6/q6asm.c
+++ b/sound/soc/qcom/qdsp6/q6asm.c
@@ -377,9 +377,9 @@ static void q6asm_audio_client_free_buf(struct audio_client *ac,
spin_lock_irqsave(&ac->lock, flags);
port->num_periods = 0;
+ spin_unlock_irqrestore(&ac->lock, flags);
kfree(port->buf);
port->buf = NULL;
- spin_unlock_irqrestore(&ac->lock, flags);
}
/**
diff --git a/sound/soc/renesas/rz-ssi.c b/sound/soc/renesas/rz-ssi.c
index e00940814157..81b883e8ac92 100644
--- a/sound/soc/renesas/rz-ssi.c
+++ b/sound/soc/renesas/rz-ssi.c
@@ -85,6 +85,7 @@ struct rz_ssi_stream {
struct snd_pcm_substream *substream;
int fifo_sample_size; /* sample capacity of SSI FIFO */
int dma_buffer_pos; /* The address for the next DMA descriptor */
+ int completed_dma_buf_pos; /* The address of the last completed DMA descriptor. */
int period_counter; /* for keeping track of periods transferred */
int sample_width;
int buffer_pos; /* current frame position in the buffer */
@@ -215,6 +216,7 @@ static void rz_ssi_stream_init(struct rz_ssi_stream *strm,
rz_ssi_set_substream(strm, substream);
strm->sample_width = samples_to_bytes(runtime, 1);
strm->dma_buffer_pos = 0;
+ strm->completed_dma_buf_pos = 0;
strm->period_counter = 0;
strm->buffer_pos = 0;
@@ -437,6 +439,10 @@ static void rz_ssi_pointer_update(struct rz_ssi_stream *strm, int frames)
snd_pcm_period_elapsed(strm->substream);
strm->period_counter = current_period;
}
+
+ strm->completed_dma_buf_pos += runtime->period_size;
+ if (strm->completed_dma_buf_pos >= runtime->buffer_size)
+ strm->completed_dma_buf_pos = 0;
}
static int rz_ssi_pio_recv(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm)
@@ -778,10 +784,14 @@ no_dma:
return -ENODEV;
}
-static int rz_ssi_trigger_resume(struct rz_ssi_priv *ssi)
+static int rz_ssi_trigger_resume(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm)
{
+ struct snd_pcm_substream *substream = strm->substream;
+ struct snd_pcm_runtime *runtime = substream->runtime;
int ret;
+ strm->dma_buffer_pos = strm->completed_dma_buf_pos + runtime->period_size;
+
if (rz_ssi_is_stream_running(&ssi->playback) ||
rz_ssi_is_stream_running(&ssi->capture))
return 0;
@@ -794,16 +804,6 @@ static int rz_ssi_trigger_resume(struct rz_ssi_priv *ssi)
ssi->hw_params_cache.channels);
}
-static void rz_ssi_streams_suspend(struct rz_ssi_priv *ssi)
-{
- if (rz_ssi_is_stream_running(&ssi->playback) ||
- rz_ssi_is_stream_running(&ssi->capture))
- return;
-
- ssi->playback.dma_buffer_pos = 0;
- ssi->capture.dma_buffer_pos = 0;
-}
-
static int rz_ssi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
@@ -813,7 +813,7 @@ static int rz_ssi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
switch (cmd) {
case SNDRV_PCM_TRIGGER_RESUME:
- ret = rz_ssi_trigger_resume(ssi);
+ ret = rz_ssi_trigger_resume(ssi, strm);
if (ret)
return ret;
@@ -852,7 +852,6 @@ static int rz_ssi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
case SNDRV_PCM_TRIGGER_SUSPEND:
rz_ssi_stop(ssi, strm);
- rz_ssi_streams_suspend(ssi);
break;
case SNDRV_PCM_TRIGGER_STOP:
diff --git a/sound/soc/sdw_utils/soc_sdw_utils.c b/sound/soc/sdw_utils/soc_sdw_utils.c
index 270c66b90228..f7c8c16308de 100644
--- a/sound/soc/sdw_utils/soc_sdw_utils.c
+++ b/sound/soc/sdw_utils/soc_sdw_utils.c
@@ -638,7 +638,6 @@ struct asoc_sdw_codec_info codec_info_list[] = {
{
.direction = {true, false},
.dai_name = "cs42l43-dp6",
- .component_name = "cs42l43",
.dai_type = SOC_SDW_DAI_TYPE_AMP,
.dailink = {SOC_SDW_AMP_OUT_DAI_ID, SOC_SDW_UNUSED_DAI_ID},
.init = asoc_sdw_cs42l43_spk_init,
diff --git a/sound/usb/mixer_s1810c.c b/sound/usb/mixer_s1810c.c
index 15960d25e748..6e09e074c0e7 100644
--- a/sound/usb/mixer_s1810c.c
+++ b/sound/usb/mixer_s1810c.c
@@ -178,7 +178,7 @@ snd_sc1810c_get_status_field(struct usb_device *dev,
pkt_out.fields[SC1810C_STATE_F1_IDX] = SC1810C_SET_STATE_F1;
pkt_out.fields[SC1810C_STATE_F2_IDX] = SC1810C_SET_STATE_F2;
- ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0),
+ ret = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0),
SC1810C_SET_STATE_REQ,
SC1810C_SET_STATE_REQTYPE,
(*seqnum), 0, &pkt_out, sizeof(pkt_out));
@@ -597,15 +597,6 @@ int snd_sc1810_init_mixer(struct usb_mixer_interface *mixer)
if (!list_empty(&chip->mixer_list))
return 0;
- dev_info(&dev->dev,
- "Presonus Studio 1810c, device_setup: %u\n", chip->setup);
- if (chip->setup == 1)
- dev_info(&dev->dev, "(8out/18in @ 48kHz)\n");
- else if (chip->setup == 2)
- dev_info(&dev->dev, "(6out/8in @ 192kHz)\n");
- else
- dev_info(&dev->dev, "(8out/14in @ 96kHz)\n");
-
ret = snd_s1810c_init_mixer_maps(chip);
if (ret < 0)
return ret;
@@ -634,16 +625,28 @@ int snd_sc1810_init_mixer(struct usb_mixer_interface *mixer)
if (ret < 0)
return ret;
- // The 1824c has a Mono Main switch instead of a
- // A/B select switch.
- if (mixer->chip->usb_id == USB_ID(0x194f, 0x010d)) {
- ret = snd_s1810c_switch_init(mixer, &snd_s1824c_mono_sw);
+ switch (chip->usb_id) {
+ case USB_ID(0x194f, 0x010c): /* Presonus Studio 1810c */
+ dev_info(&dev->dev,
+ "Presonus Studio 1810c, device_setup: %u\n", chip->setup);
+ if (chip->setup == 1)
+ dev_info(&dev->dev, "(8out/18in @ 48kHz)\n");
+ else if (chip->setup == 2)
+ dev_info(&dev->dev, "(6out/8in @ 192kHz)\n");
+ else
+ dev_info(&dev->dev, "(8out/14in @ 96kHz)\n");
+
+ ret = snd_s1810c_switch_init(mixer, &snd_s1810c_ab_sw);
if (ret < 0)
return ret;
- } else if (mixer->chip->usb_id == USB_ID(0x194f, 0x010c)) {
- ret = snd_s1810c_switch_init(mixer, &snd_s1810c_ab_sw);
+
+ break;
+ case USB_ID(0x194f, 0x010d): /* Presonus Studio 1824c */
+ ret = snd_s1810c_switch_init(mixer, &snd_s1824c_mono_sw);
if (ret < 0)
return ret;
+
+ break;
}
return ret;
diff --git a/tools/net/ynl/lib/ynl-priv.h b/tools/net/ynl/lib/ynl-priv.h
index 29481989ea76..ced7dce44efb 100644
--- a/tools/net/ynl/lib/ynl-priv.h
+++ b/tools/net/ynl/lib/ynl-priv.h
@@ -313,7 +313,7 @@ ynl_attr_put_str(struct nlmsghdr *nlh, unsigned int attr_type, const char *str)
struct nlattr *attr;
size_t len;
- len = strlen(str);
+ len = strlen(str) + 1;
if (__ynl_attr_put_overflow(nlh, len))
return;
@@ -321,7 +321,7 @@ ynl_attr_put_str(struct nlmsghdr *nlh, unsigned int attr_type, const char *str)
attr->nla_type = attr_type;
strcpy((char *)ynl_attr_data(attr), str);
- attr->nla_len = NLA_HDRLEN + NLA_ALIGN(len);
+ attr->nla_len = NLA_HDRLEN + len;
nlh->nlmsg_len += NLMSG_ALIGN(attr->nla_len);
}
diff --git a/tools/net/ynl/pyynl/ethtool.py b/tools/net/ynl/pyynl/ethtool.py
index 9b523cbb3568..fd0f6b8d54d1 100755
--- a/tools/net/ynl/pyynl/ethtool.py
+++ b/tools/net/ynl/pyynl/ethtool.py
@@ -44,6 +44,9 @@ def print_field(reply, *desc):
Pretty-print a set of fields from the reply. desc specifies the
fields and the optional type (bool/yn).
"""
+ if not reply:
+ return
+
if len(desc) == 0:
return print_field(reply, *zip(reply.keys(), reply.keys()))
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index a5770570b106..9004fbc06769 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -217,6 +217,7 @@ static bool is_rust_noreturn(const struct symbol *func)
* these come from the Rust standard library).
*/
return str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") ||
+ str_ends_with(func->name, "_4core6option13expect_failed") ||
str_ends_with(func->name, "_4core6option13unwrap_failed") ||
str_ends_with(func->name, "_4core6result13unwrap_failed") ||
str_ends_with(func->name, "_4core9panicking5panic") ||
@@ -3515,8 +3516,11 @@ static bool skip_alt_group(struct instruction *insn)
{
struct instruction *alt_insn = insn->alts ? insn->alts->insn : NULL;
+ if (!insn->alt_group)
+ return false;
+
/* ANNOTATE_IGNORE_ALTERNATIVE */
- if (insn->alt_group && insn->alt_group->ignore)
+ if (insn->alt_group->ignore)
return true;
/*
@@ -4710,8 +4714,8 @@ static int check_abs_references(struct objtool_file *file)
for_each_reloc(sec->rsec, reloc) {
if (arch_absolute_reloc(file->elf, reloc)) {
- WARN("section %s has absolute relocation at offset 0x%lx",
- sec->name, reloc_offset(reloc));
+ WARN("section %s has absolute relocation at offset 0x%llx",
+ sec->name, (unsigned long long)reloc_offset(reloc));
ret++;
}
}
diff --git a/tools/testing/selftests/cachestat/.gitignore b/tools/testing/selftests/cachestat/.gitignore
index d6c30b43a4bb..abbb13b6e96b 100644
--- a/tools/testing/selftests/cachestat/.gitignore
+++ b/tools/testing/selftests/cachestat/.gitignore
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
test_cachestat
+tmpshmcstat
diff --git a/tools/testing/selftests/cachestat/test_cachestat.c b/tools/testing/selftests/cachestat/test_cachestat.c
index c952640f163b..ab838bcb9ec5 100644
--- a/tools/testing/selftests/cachestat/test_cachestat.c
+++ b/tools/testing/selftests/cachestat/test_cachestat.c
@@ -226,7 +226,7 @@ bool run_cachestat_test(enum file_type type)
int syscall_ret;
size_t compute_len = PS * 512;
struct cachestat_range cs_range = { PS, compute_len };
- char *filename = "tmpshmcstat";
+ char *filename = "tmpshmcstat", *map;
struct cachestat cs;
bool ret = true;
int fd;
@@ -257,7 +257,7 @@ bool run_cachestat_test(enum file_type type)
}
break;
case FILE_MMAP:
- char *map = mmap(NULL, filesize, PROT_READ | PROT_WRITE,
+ map = mmap(NULL, filesize, PROT_READ | PROT_WRITE,
MAP_SHARED, fd, 0);
if (map == MAP_FAILED) {
diff --git a/tools/testing/selftests/net/bareudp.sh b/tools/testing/selftests/net/bareudp.sh
index 4046131e7888..d9e5b967f815 100755
--- a/tools/testing/selftests/net/bareudp.sh
+++ b/tools/testing/selftests/net/bareudp.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
# Test various bareudp tunnel configurations.
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index c90d8e8b95cb..78a1aa4ecff2 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -2324,7 +2324,7 @@ laminar_endp_tests()
{
# no laminar endpoints: routing rules are used
if reset_with_tcp_filter "without a laminar endpoint" ns1 10.0.2.2 REJECT &&
- mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
+ continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
pm_nl_set_limits $ns1 0 2
pm_nl_set_limits $ns2 2 2
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
@@ -2336,7 +2336,7 @@ laminar_endp_tests()
# laminar endpoints: this endpoint is used
if reset_with_tcp_filter "with a laminar endpoint" ns1 10.0.2.2 REJECT &&
- mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
+ continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
pm_nl_set_limits $ns1 0 2
pm_nl_set_limits $ns2 2 2
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
@@ -2348,7 +2348,7 @@ laminar_endp_tests()
# laminar endpoints: these endpoints are used
if reset_with_tcp_filter "with multiple laminar endpoints" ns1 10.0.2.2 REJECT &&
- mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
+ continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
pm_nl_set_limits $ns1 0 2
pm_nl_set_limits $ns2 2 2
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
@@ -2363,7 +2363,7 @@ laminar_endp_tests()
# laminar endpoints: only one endpoint is used
if reset_with_tcp_filter "single laminar endpoint" ns1 10.0.2.2 REJECT &&
- mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
+ continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
pm_nl_set_limits $ns1 0 2
pm_nl_set_limits $ns2 2 2
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
@@ -2376,7 +2376,7 @@ laminar_endp_tests()
# laminar endpoints: subflow and laminar flags
if reset_with_tcp_filter "sublow + laminar endpoints" ns1 10.0.2.2 REJECT &&
- mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
+ continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
pm_nl_set_limits $ns1 0 4
pm_nl_set_limits $ns2 2 4
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
@@ -3939,7 +3939,7 @@ endpoint_tests()
# subflow_rebuild_header is needed to support the implicit flag
# userspace pm type prevents add_addr
if reset "implicit EP" &&
- mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
pm_nl_set_limits $ns1 2 2
pm_nl_set_limits $ns2 2 2
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
@@ -3964,7 +3964,7 @@ endpoint_tests()
fi
if reset_with_tcp_filter "delete and re-add" ns2 10.0.3.2 REJECT OUTPUT &&
- mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
start_events
pm_nl_set_limits $ns1 0 3
pm_nl_set_limits $ns2 0 3
@@ -4040,7 +4040,7 @@ endpoint_tests()
# remove and re-add
if reset_with_events "delete re-add signal" &&
- mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=0
pm_nl_set_limits $ns1 0 3
pm_nl_set_limits $ns2 3 3
@@ -4115,7 +4115,7 @@ endpoint_tests()
# flush and re-add
if reset_with_tcp_filter "flush re-add" ns2 10.0.3.2 REJECT OUTPUT &&
- mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
pm_nl_set_limits $ns1 0 2
pm_nl_set_limits $ns2 1 2
# broadcast IP: no packet for this address will be received on ns1
diff --git a/tools/testing/selftests/net/sctp_hello.c b/tools/testing/selftests/net/sctp_hello.c
index f02f1f95d227..a04dac0b8027 100644
--- a/tools/testing/selftests/net/sctp_hello.c
+++ b/tools/testing/selftests/net/sctp_hello.c
@@ -29,7 +29,6 @@ static void set_addr(struct sockaddr_storage *ss, char *ip, char *port, int *len
static int do_client(int argc, char *argv[])
{
struct sockaddr_storage ss;
- char buf[] = "hello";
int csk, ret, len;
if (argc < 5) {
@@ -56,16 +55,10 @@ static int do_client(int argc, char *argv[])
set_addr(&ss, argv[3], argv[4], &len);
ret = connect(csk, (struct sockaddr *)&ss, len);
- if (ret < 0) {
- printf("failed to connect to peer\n");
+ if (ret < 0)
return -1;
- }
- ret = send(csk, buf, strlen(buf) + 1, 0);
- if (ret < 0) {
- printf("failed to send msg %d\n", ret);
- return -1;
- }
+ recv(csk, NULL, 0, 0);
close(csk);
return 0;
@@ -75,7 +68,6 @@ int main(int argc, char *argv[])
{
struct sockaddr_storage ss;
int lsk, csk, ret, len;
- char buf[20];
if (argc < 2 || (strcmp(argv[1], "server") && strcmp(argv[1], "client"))) {
printf("%s server|client ...\n", argv[0]);
@@ -125,11 +117,6 @@ int main(int argc, char *argv[])
return -1;
}
- ret = recv(csk, buf, sizeof(buf), 0);
- if (ret <= 0) {
- printf("failed to recv msg %d\n", ret);
- return -1;
- }
close(csk);
close(lsk);
diff --git a/tools/testing/selftests/net/sctp_vrf.sh b/tools/testing/selftests/net/sctp_vrf.sh
index c854034b6aa1..667b211aa8a1 100755
--- a/tools/testing/selftests/net/sctp_vrf.sh
+++ b/tools/testing/selftests/net/sctp_vrf.sh
@@ -20,9 +20,9 @@ setup() {
modprobe sctp_diag
setup_ns CLIENT_NS1 CLIENT_NS2 SERVER_NS
- ip net exec $CLIENT_NS1 sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null
- ip net exec $CLIENT_NS2 sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null
- ip net exec $SERVER_NS sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null
+ ip net exec $CLIENT_NS1 sysctl -wq net.ipv6.conf.default.accept_dad=0
+ ip net exec $CLIENT_NS2 sysctl -wq net.ipv6.conf.default.accept_dad=0
+ ip net exec $SERVER_NS sysctl -wq net.ipv6.conf.default.accept_dad=0
ip -n $SERVER_NS link add veth1 type veth peer name veth1 netns $CLIENT_NS1
ip -n $SERVER_NS link add veth2 type veth peer name veth1 netns $CLIENT_NS2
@@ -62,17 +62,40 @@ setup() {
}
cleanup() {
- ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null
+ wait_client $CLIENT_NS1
+ wait_client $CLIENT_NS2
+ stop_server
cleanup_ns $CLIENT_NS1 $CLIENT_NS2 $SERVER_NS
}
-wait_server() {
+start_server() {
local IFACE=$1
local CNT=0
- until ip netns exec $SERVER_NS ss -lS src $SERVER_IP:$SERVER_PORT | \
- grep LISTEN | grep "$IFACE" 2>&1 >/dev/null; do
- [ $((CNT++)) = "20" ] && { RET=3; return $RET; }
+ ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP $SERVER_PORT $IFACE &
+ disown
+ until ip netns exec $SERVER_NS ss -SlH | grep -q "$IFACE"; do
+ [ $((CNT++)) -eq 30 ] && { RET=3; return $RET; }
+ sleep 0.1
+ done
+}
+
+stop_server() {
+ local CNT=0
+
+ ip netns exec $SERVER_NS pkill sctp_hello
+ while ip netns exec $SERVER_NS ss -SaH | grep -q .; do
+ [ $((CNT++)) -eq 30 ] && break
+ sleep 0.1
+ done
+}
+
+wait_client() {
+ local CLIENT_NS=$1
+ local CNT=0
+
+ while ip netns exec $CLIENT_NS ss -SaH | grep -q .; do
+ [ $((CNT++)) -eq 30 ] && break
sleep 0.1
done
}
@@ -81,14 +104,12 @@ do_test() {
local CLIENT_NS=$1
local IFACE=$2
- ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null
- ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \
- $SERVER_PORT $IFACE 2>&1 >/dev/null &
- disown
- wait_server $IFACE || return $RET
+ start_server $IFACE || return $RET
timeout 3 ip netns exec $CLIENT_NS ./sctp_hello client $AF \
- $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null
+ $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT
RET=$?
+ wait_client $CLIENT_NS
+ stop_server
return $RET
}
@@ -96,25 +117,21 @@ do_testx() {
local IFACE1=$1
local IFACE2=$2
- ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null
- ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \
- $SERVER_PORT $IFACE1 2>&1 >/dev/null &
- disown
- wait_server $IFACE1 || return $RET
- ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \
- $SERVER_PORT $IFACE2 2>&1 >/dev/null &
- disown
- wait_server $IFACE2 || return $RET
+ start_server $IFACE1 || return $RET
+ start_server $IFACE2 || return $RET
timeout 3 ip netns exec $CLIENT_NS1 ./sctp_hello client $AF \
- $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null && \
+ $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT && \
timeout 3 ip netns exec $CLIENT_NS2 ./sctp_hello client $AF \
- $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null
+ $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT
RET=$?
+ wait_client $CLIENT_NS1
+ wait_client $CLIENT_NS2
+ stop_server
return $RET
}
testup() {
- ip netns exec $SERVER_NS sysctl -w net.sctp.l3mdev_accept=1 2>&1 >/dev/null
+ ip netns exec $SERVER_NS sysctl -wq net.sctp.l3mdev_accept=1
echo -n "TEST 01: nobind, connect from client 1, l3mdev_accept=1, Y "
do_test $CLIENT_NS1 || { echo "[FAIL]"; return $RET; }
echo "[PASS]"
@@ -123,7 +140,7 @@ testup() {
do_test $CLIENT_NS2 && { echo "[FAIL]"; return $RET; }
echo "[PASS]"
- ip netns exec $SERVER_NS sysctl -w net.sctp.l3mdev_accept=0 2>&1 >/dev/null
+ ip netns exec $SERVER_NS sysctl -wq net.sctp.l3mdev_accept=0
echo -n "TEST 03: nobind, connect from client 1, l3mdev_accept=0, N "
do_test $CLIENT_NS1 && { echo "[FAIL]"; return $RET; }
echo "[PASS]"
@@ -160,7 +177,7 @@ testup() {
do_testx vrf-1 vrf-2 || { echo "[FAIL]"; return $RET; }
echo "[PASS]"
- echo -n "TEST 12: bind vrf-2 & 1 in server, connect from client 1 & 2, N "
+ echo -n "TEST 12: bind vrf-2 & 1 in server, connect from client 1 & 2, Y "
do_testx vrf-2 vrf-1 || { echo "[FAIL]"; return $RET; }
echo "[PASS]"
}
diff --git a/tools/testing/selftests/vfio/lib/include/vfio_util.h b/tools/testing/selftests/vfio/lib/include/vfio_util.h
index ed31606e01b7..240409bf5f8a 100644
--- a/tools/testing/selftests/vfio/lib/include/vfio_util.h
+++ b/tools/testing/selftests/vfio/lib/include/vfio_util.h
@@ -206,10 +206,29 @@ struct vfio_pci_device *vfio_pci_device_init(const char *bdf, const char *iommu_
void vfio_pci_device_cleanup(struct vfio_pci_device *device);
void vfio_pci_device_reset(struct vfio_pci_device *device);
-void vfio_pci_dma_map(struct vfio_pci_device *device,
- struct vfio_dma_region *region);
-void vfio_pci_dma_unmap(struct vfio_pci_device *device,
- struct vfio_dma_region *region);
+int __vfio_pci_dma_map(struct vfio_pci_device *device,
+ struct vfio_dma_region *region);
+int __vfio_pci_dma_unmap(struct vfio_pci_device *device,
+ struct vfio_dma_region *region,
+ u64 *unmapped);
+int __vfio_pci_dma_unmap_all(struct vfio_pci_device *device, u64 *unmapped);
+
+static inline void vfio_pci_dma_map(struct vfio_pci_device *device,
+ struct vfio_dma_region *region)
+{
+ VFIO_ASSERT_EQ(__vfio_pci_dma_map(device, region), 0);
+}
+
+static inline void vfio_pci_dma_unmap(struct vfio_pci_device *device,
+ struct vfio_dma_region *region)
+{
+ VFIO_ASSERT_EQ(__vfio_pci_dma_unmap(device, region, NULL), 0);
+}
+
+static inline void vfio_pci_dma_unmap_all(struct vfio_pci_device *device)
+{
+ VFIO_ASSERT_EQ(__vfio_pci_dma_unmap_all(device, NULL), 0);
+}
void vfio_pci_config_access(struct vfio_pci_device *device, bool write,
size_t config, size_t size, void *data);
diff --git a/tools/testing/selftests/vfio/lib/vfio_pci_device.c b/tools/testing/selftests/vfio/lib/vfio_pci_device.c
index 0921b2451ba5..a381fd253aa7 100644
--- a/tools/testing/selftests/vfio/lib/vfio_pci_device.c
+++ b/tools/testing/selftests/vfio/lib/vfio_pci_device.c
@@ -2,6 +2,7 @@
#include <dirent.h>
#include <fcntl.h>
#include <libgen.h>
+#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
@@ -141,7 +142,7 @@ static void vfio_pci_irq_get(struct vfio_pci_device *device, u32 index,
ioctl_assert(device->fd, VFIO_DEVICE_GET_IRQ_INFO, irq_info);
}
-static void vfio_iommu_dma_map(struct vfio_pci_device *device,
+static int vfio_iommu_dma_map(struct vfio_pci_device *device,
struct vfio_dma_region *region)
{
struct vfio_iommu_type1_dma_map args = {
@@ -152,10 +153,13 @@ static void vfio_iommu_dma_map(struct vfio_pci_device *device,
.size = region->size,
};
- ioctl_assert(device->container_fd, VFIO_IOMMU_MAP_DMA, &args);
+ if (ioctl(device->container_fd, VFIO_IOMMU_MAP_DMA, &args))
+ return -errno;
+
+ return 0;
}
-static void iommufd_dma_map(struct vfio_pci_device *device,
+static int iommufd_dma_map(struct vfio_pci_device *device,
struct vfio_dma_region *region)
{
struct iommu_ioas_map args = {
@@ -169,54 +173,108 @@ static void iommufd_dma_map(struct vfio_pci_device *device,
.ioas_id = device->ioas_id,
};
- ioctl_assert(device->iommufd, IOMMU_IOAS_MAP, &args);
+ if (ioctl(device->iommufd, IOMMU_IOAS_MAP, &args))
+ return -errno;
+
+ return 0;
}
-void vfio_pci_dma_map(struct vfio_pci_device *device,
+int __vfio_pci_dma_map(struct vfio_pci_device *device,
struct vfio_dma_region *region)
{
+ int ret;
+
if (device->iommufd)
- iommufd_dma_map(device, region);
+ ret = iommufd_dma_map(device, region);
else
- vfio_iommu_dma_map(device, region);
+ ret = vfio_iommu_dma_map(device, region);
+
+ if (ret)
+ return ret;
list_add(&region->link, &device->dma_regions);
+
+ return 0;
}
-static void vfio_iommu_dma_unmap(struct vfio_pci_device *device,
- struct vfio_dma_region *region)
+static int vfio_iommu_dma_unmap(int fd, u64 iova, u64 size, u32 flags,
+ u64 *unmapped)
{
struct vfio_iommu_type1_dma_unmap args = {
.argsz = sizeof(args),
- .iova = region->iova,
- .size = region->size,
+ .iova = iova,
+ .size = size,
+ .flags = flags,
};
- ioctl_assert(device->container_fd, VFIO_IOMMU_UNMAP_DMA, &args);
+ if (ioctl(fd, VFIO_IOMMU_UNMAP_DMA, &args))
+ return -errno;
+
+ if (unmapped)
+ *unmapped = args.size;
+
+ return 0;
}
-static void iommufd_dma_unmap(struct vfio_pci_device *device,
- struct vfio_dma_region *region)
+static int iommufd_dma_unmap(int fd, u64 iova, u64 length, u32 ioas_id,
+ u64 *unmapped)
{
struct iommu_ioas_unmap args = {
.size = sizeof(args),
- .iova = region->iova,
- .length = region->size,
- .ioas_id = device->ioas_id,
+ .iova = iova,
+ .length = length,
+ .ioas_id = ioas_id,
};
- ioctl_assert(device->iommufd, IOMMU_IOAS_UNMAP, &args);
+ if (ioctl(fd, IOMMU_IOAS_UNMAP, &args))
+ return -errno;
+
+ if (unmapped)
+ *unmapped = args.length;
+
+ return 0;
}
-void vfio_pci_dma_unmap(struct vfio_pci_device *device,
- struct vfio_dma_region *region)
+int __vfio_pci_dma_unmap(struct vfio_pci_device *device,
+ struct vfio_dma_region *region, u64 *unmapped)
{
+ int ret;
+
+ if (device->iommufd)
+ ret = iommufd_dma_unmap(device->iommufd, region->iova,
+ region->size, device->ioas_id,
+ unmapped);
+ else
+ ret = vfio_iommu_dma_unmap(device->container_fd, region->iova,
+ region->size, 0, unmapped);
+
+ if (ret)
+ return ret;
+
+ list_del_init(&region->link);
+
+ return 0;
+}
+
+int __vfio_pci_dma_unmap_all(struct vfio_pci_device *device, u64 *unmapped)
+{
+ int ret;
+ struct vfio_dma_region *curr, *next;
+
if (device->iommufd)
- iommufd_dma_unmap(device, region);
+ ret = iommufd_dma_unmap(device->iommufd, 0, UINT64_MAX,
+ device->ioas_id, unmapped);
else
- vfio_iommu_dma_unmap(device, region);
+ ret = vfio_iommu_dma_unmap(device->container_fd, 0, 0,
+ VFIO_DMA_UNMAP_FLAG_ALL, unmapped);
+
+ if (ret)
+ return ret;
+
+ list_for_each_entry_safe(curr, next, &device->dma_regions, link)
+ list_del_init(&curr->link);
- list_del(&region->link);
+ return 0;
}
static void vfio_pci_region_get(struct vfio_pci_device *device, int index,
diff --git a/tools/testing/selftests/vfio/vfio_dma_mapping_test.c b/tools/testing/selftests/vfio/vfio_dma_mapping_test.c
index ab19c54a774d..4f1ea79a200c 100644
--- a/tools/testing/selftests/vfio/vfio_dma_mapping_test.c
+++ b/tools/testing/selftests/vfio/vfio_dma_mapping_test.c
@@ -112,6 +112,8 @@ FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous, 0, 0);
FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_2mb, SZ_2M, MAP_HUGETLB | MAP_HUGE_2MB);
FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_1gb, SZ_1G, MAP_HUGETLB | MAP_HUGE_1GB);
+#undef FIXTURE_VARIANT_ADD_IOMMU_MODE
+
FIXTURE_SETUP(vfio_dma_mapping_test)
{
self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode);
@@ -129,6 +131,7 @@ TEST_F(vfio_dma_mapping_test, dma_map_unmap)
struct vfio_dma_region region;
struct iommu_mapping mapping;
u64 mapping_size = size;
+ u64 unmapped;
int rc;
region.vaddr = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0);
@@ -184,7 +187,9 @@ TEST_F(vfio_dma_mapping_test, dma_map_unmap)
}
unmap:
- vfio_pci_dma_unmap(self->device, &region);
+ rc = __vfio_pci_dma_unmap(self->device, &region, &unmapped);
+ ASSERT_EQ(rc, 0);
+ ASSERT_EQ(unmapped, region.size);
printf("Unmapped IOVA 0x%lx\n", region.iova);
ASSERT_EQ(INVALID_IOVA, __to_iova(self->device, region.vaddr));
ASSERT_NE(0, iommu_mapping_get(device_bdf, region.iova, &mapping));
@@ -192,6 +197,94 @@ unmap:
ASSERT_TRUE(!munmap(region.vaddr, size));
}
+FIXTURE(vfio_dma_map_limit_test) {
+ struct vfio_pci_device *device;
+ struct vfio_dma_region region;
+ size_t mmap_size;
+};
+
+FIXTURE_VARIANT(vfio_dma_map_limit_test) {
+ const char *iommu_mode;
+};
+
+#define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode) \
+FIXTURE_VARIANT_ADD(vfio_dma_map_limit_test, _iommu_mode) { \
+ .iommu_mode = #_iommu_mode, \
+}
+
+FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES();
+
+#undef FIXTURE_VARIANT_ADD_IOMMU_MODE
+
+FIXTURE_SETUP(vfio_dma_map_limit_test)
+{
+ struct vfio_dma_region *region = &self->region;
+ u64 region_size = getpagesize();
+
+ /*
+ * Over-allocate mmap by double the size to provide enough backing vaddr
+ * for overflow tests
+ */
+ self->mmap_size = 2 * region_size;
+
+ self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode);
+ region->vaddr = mmap(NULL, self->mmap_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ ASSERT_NE(region->vaddr, MAP_FAILED);
+
+ /* One page prior to the end of address space */
+ region->iova = ~(iova_t)0 & ~(region_size - 1);
+ region->size = region_size;
+}
+
+FIXTURE_TEARDOWN(vfio_dma_map_limit_test)
+{
+ vfio_pci_device_cleanup(self->device);
+ ASSERT_EQ(munmap(self->region.vaddr, self->mmap_size), 0);
+}
+
+TEST_F(vfio_dma_map_limit_test, unmap_range)
+{
+ struct vfio_dma_region *region = &self->region;
+ u64 unmapped;
+ int rc;
+
+ vfio_pci_dma_map(self->device, region);
+ ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr));
+
+ rc = __vfio_pci_dma_unmap(self->device, region, &unmapped);
+ ASSERT_EQ(rc, 0);
+ ASSERT_EQ(unmapped, region->size);
+}
+
+TEST_F(vfio_dma_map_limit_test, unmap_all)
+{
+ struct vfio_dma_region *region = &self->region;
+ u64 unmapped;
+ int rc;
+
+ vfio_pci_dma_map(self->device, region);
+ ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr));
+
+ rc = __vfio_pci_dma_unmap_all(self->device, &unmapped);
+ ASSERT_EQ(rc, 0);
+ ASSERT_EQ(unmapped, region->size);
+}
+
+TEST_F(vfio_dma_map_limit_test, overflow)
+{
+ struct vfio_dma_region *region = &self->region;
+ int rc;
+
+ region->size = self->mmap_size;
+
+ rc = __vfio_pci_dma_map(self->device, region);
+ ASSERT_EQ(rc, -EOVERFLOW);
+
+ rc = __vfio_pci_dma_unmap(self->device, region, NULL);
+ ASSERT_EQ(rc, -EOVERFLOW);
+}
+
int main(int argc, char *argv[])
{
device_bdf = vfio_selftests_get_bdf(&argc, argv);