summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu1
-rw-r--r--Documentation/admin-guide/hw-vuln/index.rst1
-rw-r--r--Documentation/admin-guide/hw-vuln/vmscape.rst110
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt11
-rw-r--r--Documentation/devicetree/bindings/net/apm,xgene-enet.yaml115
-rw-r--r--Documentation/devicetree/bindings/net/apm,xgene-mdio-rgmii.yaml54
-rw-r--r--Documentation/devicetree/bindings/net/apm-xgene-enet.txt91
-rw-r--r--Documentation/devicetree/bindings/net/apm-xgene-mdio.txt37
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt50
-rw-r--r--Documentation/devicetree/bindings/spi/spi-fsl-lpspi.yaml5
-rw-r--r--Documentation/netlink/specs/mptcp_pm.yaml2
-rw-r--r--Documentation/networking/can.rst2
-rw-r--r--Documentation/networking/mptcp.rst8
-rw-r--r--MAINTAINERS10
-rw-r--r--Makefile2
-rw-r--r--arch/arm64/include/asm/module.h1
-rw-r--r--arch/arm64/include/asm/module.lds.h1
-rw-r--r--arch/arm64/include/uapi/asm/bitsperlong.h5
-rw-r--r--arch/arm64/kernel/ftrace.c13
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c2
-rw-r--r--arch/arm64/kernel/module-plts.c12
-rw-r--r--arch/arm64/kernel/module.c11
-rw-r--r--arch/riscv/Kconfig2
-rw-r--r--arch/riscv/include/asm/asm.h2
-rw-r--r--arch/riscv/include/asm/uaccess.h8
-rw-r--r--arch/riscv/kernel/entry.S2
-rw-r--r--arch/riscv/kernel/kexec_elf.c4
-rw-r--r--arch/riscv/kernel/kexec_image.c2
-rw-r--r--arch/riscv/kernel/machine_kexec_file.c2
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c4
-rw-r--r--arch/s390/kernel/kexec_elf.c2
-rw-r--r--arch/s390/kernel/kexec_image.c2
-rw-r--r--arch/s390/kernel/machine_kexec_file.c6
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c4
-rw-r--r--arch/s390/kernel/perf_pai_crypto.c4
-rw-r--r--arch/s390/kernel/perf_pai_ext.c2
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/x86/Kconfig9
-rw-r--r--arch/x86/include/asm/cpufeatures.h2
-rw-r--r--arch/x86/include/asm/entry-common.h7
-rw-r--r--arch/x86/include/asm/nospec-branch.h2
-rw-r--r--arch/x86/kernel/cpu/bugs.c285
-rw-r--r--arch/x86/kernel/cpu/common.c86
-rw-r--r--arch/x86/kvm/x86.c9
-rw-r--r--block/fops.c13
-rw-r--r--crypto/sha1.c39
-rw-r--r--crypto/sha256.c71
-rw-r--r--crypto/sha512.c71
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c2
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c4
-rw-r--r--drivers/accel/ivpu/ivpu_pm.h2
-rw-r--r--drivers/acpi/arm64/iort.c4
-rw-r--r--drivers/acpi/riscv/cppc.c4
-rw-r--r--drivers/base/cpu.c3
-rw-r--r--drivers/cpufreq/amd-pstate.c19
-rw-r--r--drivers/cpufreq/intel_pstate.c4
-rw-r--r--drivers/edac/altera_edac.c1
-rw-r--r--drivers/gpio/Kconfig6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c72
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h3
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c11
-rw-r--r--drivers/gpu/drm/nouveau/gv100_fence.c7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h85
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c1
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c11
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c3
-rw-r--r--drivers/hwmon/ina238.c9
-rw-r--r--drivers/hwmon/mlxreg-fan.c5
-rw-r--r--drivers/i2c/busses/i2c-i801.c2
-rw-r--r--drivers/i2c/busses/i2c-rtl9300.c22
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/net/can/rcar/rcar_can.c8
-rw-r--r--drivers/net/can/xilinx_can.c16
-rw-r--r--drivers/net/dsa/b53/b53_common.c17
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c3
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/realtek/Kconfig2
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.c76
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.h33
-rw-r--r--drivers/net/ethernet/renesas/rswitch_main.c3
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c3
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c20
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c4
-rw-r--r--drivers/net/geneve.c4
-rw-r--r--drivers/net/macsec.c1
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/phy.c12
-rw-r--r--drivers/net/phy/phy_device.c5
-rw-r--r--drivers/net/phy/phylink.c28
-rw-r--r--drivers/net/wireguard/queueing.h13
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c122
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c23
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c95
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c80
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/sap.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c553
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.c75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c131
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c384
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c94
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c809
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c124
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c138
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h136
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c133
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c433
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c237
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/nvm_parse.c72
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c113
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c58
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c2
-rw-r--r--drivers/net/wireless/virtual/virt_wifi.c4
-rw-r--r--drivers/nvme/host/core.c18
-rw-r--r--drivers/pcmcia/Kconfig3
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/cs.c17
-rw-r--r--drivers/pcmcia/cs_internal.h1
-rw-r--r--drivers/pcmcia/ds.c2
-rw-r--r--drivers/pcmcia/omap_cf.c10
-rw-r--r--drivers/pcmcia/rsrc_iodyn.c168
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c4
-rw-r--r--drivers/pcmcia/socket_sysfs.c5
-rw-r--r--drivers/platform/x86/acer-wmi.c71
-rw-r--r--drivers/platform/x86/amd/hfi/hfi.c14
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c14
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c28
-rw-r--r--drivers/platform/x86/asus-wmi.c9
-rw-r--r--drivers/platform/x86/asus-wmi.h3
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c4
-rw-r--r--drivers/platform/x86/intel/pmc/core.c1
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c10
-rw-r--r--drivers/scsi/sr.c16
-rw-r--r--drivers/spi/spi-cadence-quadspi.c33
-rw-r--r--drivers/spi/spi-fsl-lpspi.c47
-rw-r--r--drivers/spi/spi-microchip-core-qspi.c12
-rw-r--r--drivers/spi/spi-qpic-snand.c6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c8
-rw-r--r--fs/btrfs/extent_io.c40
-rw-r--r--fs/btrfs/inode.c12
-rw-r--r--fs/btrfs/qgroup.c6
-rw-r--r--fs/btrfs/super.c9
-rw-r--r--fs/btrfs/volumes.c5
-rw-r--r--fs/coredump.c4
-rw-r--r--fs/exec.c2
-rw-r--r--fs/fhandle.c8
-rw-r--r--fs/fuse/dev.c2
-rw-r--r--fs/fuse/dir.c3
-rw-r--r--fs/fuse/file.c5
-rw-r--r--fs/fuse/fuse_i.h14
-rw-r--r--fs/fuse/inode.c16
-rw-r--r--fs/fuse/passthrough.c5
-rw-r--r--fs/fuse/virtio_fs.c2
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/file.c40
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c21
-rw-r--r--fs/nfs/inode.c13
-rw-r--r--fs/nfs/internal.h12
-rw-r--r--fs/nfs/io.c13
-rw-r--r--fs/nfs/localio.c21
-rw-r--r--fs/nfs/nfs42proc.c35
-rw-r--r--fs/nfs/nfs4file.c2
-rw-r--r--fs/nfs/nfs4proc.c7
-rw-r--r--fs/nfs/nfstrace.h1
-rw-r--r--fs/nfs/write.c53
-rw-r--r--fs/ocfs2/extent_map.c10
-rw-r--r--fs/proc/generic.c3
-rw-r--r--fs/smb/client/cifs_debug.c31
-rw-r--r--fs/smb/client/cifs_unicode.c3
-rw-r--r--fs/smb/client/reparse.c2
-rw-r--r--fs/smb/client/smb1ops.c4
-rw-r--r--fs/smb/client/smb2misc.c19
-rw-r--r--fs/smb/client/smb2pdu.c4
-rw-r--r--fs/smb/client/trace.h52
-rw-r--r--include/linux/compiler-clang.h29
-rw-r--r--include/linux/cpu.h1
-rw-r--r--include/linux/energy_model.h10
-rw-r--r--include/linux/ethtool.h4
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/ieee80211.h265
-rw-r--r--include/linux/kasan.h6
-rw-r--r--include/linux/mmc/sdio_ids.h2
-rw-r--r--include/linux/phy_fixed.h10
-rw-r--r--include/linux/timekeeper_internal.h9
-rw-r--r--include/net/cfg80211.h34
-rw-r--r--include/net/netfilter/nf_tables.h1
-rw-r--r--include/net/netfilter/nf_tables_core.h10
-rw-r--r--include/net/netns/nftables.h1
-rw-r--r--include/net/tcp.h4
-rw-r--r--include/net/tcp_ao.h1
-rw-r--r--include/pcmcia/ss.h8
-rw-r--r--include/uapi/linux/if_bridge.h3
-rw-r--r--include/uapi/linux/nl80211.h51
-rw-r--r--init/Kconfig3
-rw-r--r--init/main.c2
-rw-r--r--io_uring/rw.c3
-rw-r--r--kernel/auditfilter.c2
-rw-r--r--kernel/bpf/Makefile1
-rw-r--r--kernel/bpf/core.c21
-rw-r--r--kernel/bpf/cpumap.c4
-rw-r--r--kernel/bpf/crypto.c2
-rw-r--r--kernel/bpf/helpers.c16
-rw-r--r--kernel/bpf/rqspinlock.c2
-rw-r--r--kernel/bpf/verifier.c6
-rw-r--r--kernel/dma/debug.c48
-rw-r--r--kernel/dma/debug.h20
-rw-r--r--kernel/dma/mapping.c4
-rw-r--r--kernel/events/core.c1
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/futex/core.c16
-rw-r--r--kernel/power/energy_model.c29
-rw-r--r--kernel/power/hibernate.c1
-rw-r--r--kernel/time/timekeeping.c10
-rw-r--r--kernel/time/vsyscall.c4
-rw-r--r--kernel/trace/fgraph.c3
-rw-r--r--kernel/trace/trace.c10
-rw-r--r--kernel/trace/trace_events_user.c2
-rw-r--r--kernel/trace/trace_osnoise.c3
-rw-r--r--mm/damon/core.c4
-rw-r--r--mm/damon/lru_sort.c5
-rw-r--r--mm/damon/reclaim.c5
-rw-r--r--mm/damon/sysfs.c14
-rw-r--r--mm/hugetlb.c9
-rw-r--r--mm/kasan/shadow.c31
-rw-r--r--mm/khugepaged.c4
-rw-r--r--mm/memory-failure.c20
-rw-r--r--mm/memory_hotplug.c10
-rw-r--r--mm/mremap.c9
-rw-r--r--mm/percpu.c20
-rw-r--r--mm/vmalloc.c8
-rw-r--r--net/bridge/br.c29
-rw-r--r--net/bridge/br_fdb.c114
-rw-r--r--net/bridge/br_input.c8
-rw-r--r--net/bridge/br_private.h3
-rw-r--r--net/bridge/br_vlan.c10
-rw-r--r--net/can/j1939/bus.c5
-rw-r--r--net/can/j1939/j1939-priv.h1
-rw-r--r--net/can/j1939/main.c3
-rw-r--r--net/can/j1939/socket.c52
-rw-r--r--net/core/dev_ioctl.c22
-rw-r--r--net/hsr/hsr_device.c28
-rw-r--r--net/hsr/hsr_main.c4
-rw-r--r--net/hsr/hsr_main.h3
-rw-r--r--net/ipv4/ip_tunnel_core.c6
-rw-r--r--net/ipv4/tcp.c20
-rw-r--r--net/ipv4/tcp_ao.c5
-rw-r--r--net/ipv4/tcp_bpf.c5
-rw-r--r--net/ipv4/tcp_ipv4.c37
-rw-r--r--net/ipv4/tcp_minisocks.c19
-rw-r--r--net/ipv4/udp_tunnel_nic.c2
-rw-r--r--net/ipv6/tcp_ipv6.c8
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/mac80211/cfg.c44
-rw-r--r--net/mac80211/chan.c11
-rw-r--r--net/mac80211/ethtool.c6
-rw-r--r--net/mac80211/ieee80211_i.h9
-rw-r--r--net/mac80211/main.c11
-rw-r--r--net/mac80211/mesh.c3
-rw-r--r--net/mac80211/mesh_ps.c2
-rw-r--r--net/mac80211/mlme.c38
-rw-r--r--net/mac80211/sta_info.c13
-rw-r--r--net/mac80211/tests/Makefile2
-rw-r--r--net/mac80211/tests/s1g_tim.c356
-rw-r--r--net/mac80211/tx.c173
-rw-r--r--net/mac80211/util.c20
-rw-r--r--net/mptcp/sockopt.c11
-rw-r--r--net/netfilter/nf_tables_api.c66
-rw-r--r--net/netfilter/nft_lookup.c46
-rw-r--r--net/netfilter/nft_set_bitmap.c3
-rw-r--r--net/netfilter/nft_set_pipapo.c20
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.c3
-rw-r--r--net/netfilter/nft_set_rbtree.c6
-rw-r--r--net/netlink/genetlink.c3
-rw-r--r--net/packet/af_packet.c132
-rw-r--r--net/packet/diag.c2
-rw-r--r--net/packet/internal.h14
-rw-r--r--net/sunrpc/sched.c2
-rw-r--r--net/sunrpc/xprtsock.c6
-rw-r--r--net/wireless/core.c9
-rw-r--r--net/wireless/ethtool.c2
-rw-r--r--net/wireless/nl80211.c331
-rw-r--r--net/wireless/scan.c9
-rw-r--r--net/wireless/trace.h56
-rw-r--r--net/wireless/util.c2
-rw-r--r--net/xdp/xsk.c113
-rw-r--r--net/xdp/xsk_queue.h12
-rw-r--r--rust/kernel/lib.rs15
-rw-r--r--samples/ftrace/ftrace-direct-modify.c2
-rw-r--r--scripts/generate_rust_target.rs12
-rw-r--r--tools/gpio/Makefile2
-rw-r--r--tools/perf/tests/pe-file-parsing.c4
-rwxr-xr-xtools/perf/tests/shell/test_bpf_metadata.sh2
-rw-r--r--tools/perf/util/bpf-event.c39
-rw-r--r--tools/perf/util/bpf-utils.c61
-rw-r--r--tools/perf/util/symbol-elf.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/free_timer.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer_crash.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer_lockup.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer_mim.c4
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h4
-rw-r--r--tools/testing/selftests/bpf/progs/crypto_sanity.c46
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list_fail.c5
-rw-r--r--tools/testing/selftests/bpf/progs/string_kfuncs_success.c8
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c17
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/mount-notify_test_ns.c18
-rw-r--r--tools/testing/selftests/net/Makefile1
-rwxr-xr-xtools/testing/selftests/net/broadcast_ether_dst.sh83
-rw-r--r--tools/testing/selftests/net/can/config3
-rwxr-xr-xtools/testing/selftests/net/fcnal-test.sh4
-rw-r--r--tools/testing/selftests/net/forwarding/Makefile1
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_fdb_local_vlan_0.sh374
-rw-r--r--tools/testing/selftests/net/lib.sh32
-rw-r--r--tools/testing/selftests/net/lib/sh/defer.sh20
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh2
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh2
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh2
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_sockopt.sh2
-rwxr-xr-xtools/testing/selftests/net/mptcp/pm_netlink.sh5
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh2
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh2
-rw-r--r--tools/testing/selftests/wireguard/qemu/kernel.config8
427 files changed, 6427 insertions, 5464 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index ab8cd337f43a..8aed6d94c4cd 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -586,6 +586,7 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/srbds
/sys/devices/system/cpu/vulnerabilities/tsa
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
+ /sys/devices/system/cpu/vulnerabilities/vmscape
Date: January 2018
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description: Information about CPU vulnerabilities
diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst
index 89ca636081b7..55d747511f83 100644
--- a/Documentation/admin-guide/hw-vuln/index.rst
+++ b/Documentation/admin-guide/hw-vuln/index.rst
@@ -26,3 +26,4 @@ are configurable at compile, boot or run time.
rsb
old_microcode
indirect-target-selection
+ vmscape
diff --git a/Documentation/admin-guide/hw-vuln/vmscape.rst b/Documentation/admin-guide/hw-vuln/vmscape.rst
new file mode 100644
index 000000000000..d9b9a2b6c114
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/vmscape.rst
@@ -0,0 +1,110 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+VMSCAPE
+=======
+
+VMSCAPE is a vulnerability that may allow a guest to influence the branch
+prediction in host userspace. It particularly affects hypervisors like QEMU.
+
+Even if a hypervisor may not have any sensitive data like disk encryption keys,
+guest-userspace may be able to attack the guest-kernel using the hypervisor as
+a confused deputy.
+
+Affected processors
+-------------------
+
+The following CPU families are affected by VMSCAPE:
+
+**Intel processors:**
+ - Skylake generation (Parts without Enhanced-IBRS)
+ - Cascade Lake generation - (Parts affected by ITS guest/host separation)
+ - Alder Lake and newer (Parts affected by BHI)
+
+Note that, BHI affected parts that use BHB clearing software mitigation e.g.
+Icelake are not vulnerable to VMSCAPE.
+
+**AMD processors:**
+ - Zen series (families 0x17, 0x19, 0x1a)
+
+** Hygon processors:**
+ - Family 0x18
+
+Mitigation
+----------
+
+Conditional IBPB
+----------------
+
+Kernel tracks when a CPU has run a potentially malicious guest and issues an
+IBPB before the first exit to userspace after VM-exit. If userspace did not run
+between VM-exit and the next VM-entry, no IBPB is issued.
+
+Note that the existing userspace mitigation against Spectre-v2 is effective in
+protecting the userspace. They are insufficient to protect the userspace VMMs
+from a malicious guest. This is because Spectre-v2 mitigations are applied at
+context switch time, while the userspace VMM can run after a VM-exit without a
+context switch.
+
+Vulnerability enumeration and mitigation is not applied inside a guest. This is
+because nested hypervisors should already be deploying IBPB to isolate
+themselves from nested guests.
+
+SMT considerations
+------------------
+
+When Simultaneous Multi-Threading (SMT) is enabled, hypervisors can be
+vulnerable to cross-thread attacks. For complete protection against VMSCAPE
+attacks in SMT environments, STIBP should be enabled.
+
+The kernel will issue a warning if SMT is enabled without adequate STIBP
+protection. Warning is not issued when:
+
+- SMT is disabled
+- STIBP is enabled system-wide
+- Intel eIBRS is enabled (which implies STIBP protection)
+
+System information and options
+------------------------------
+
+The sysfs file showing VMSCAPE mitigation status is:
+
+ /sys/devices/system/cpu/vulnerabilities/vmscape
+
+The possible values in this file are:
+
+ * 'Not affected':
+
+ The processor is not vulnerable to VMSCAPE attacks.
+
+ * 'Vulnerable':
+
+ The processor is vulnerable and no mitigation has been applied.
+
+ * 'Mitigation: IBPB before exit to userspace':
+
+ Conditional IBPB mitigation is enabled. The kernel tracks when a CPU has
+ run a potentially malicious guest and issues an IBPB before the first
+ exit to userspace after VM-exit.
+
+ * 'Mitigation: IBPB on VMEXIT':
+
+ IBPB is issued on every VM-exit. This occurs when other mitigations like
+ RETBLEED or SRSO are already issuing IBPB on VM-exit.
+
+Mitigation control on the kernel command line
+----------------------------------------------
+
+The mitigation can be controlled via the ``vmscape=`` command line parameter:
+
+ * ``vmscape=off``:
+
+ Disable the VMSCAPE mitigation.
+
+ * ``vmscape=ibpb``:
+
+ Enable conditional IBPB mitigation (default when CONFIG_MITIGATION_VMSCAPE=y).
+
+ * ``vmscape=force``:
+
+ Force vulnerability detection and mitigation even on processors that are
+ not known to be affected.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 747a55abf494..5a7a83c411e9 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3829,6 +3829,7 @@
srbds=off [X86,INTEL]
ssbd=force-off [ARM64]
tsx_async_abort=off [X86]
+ vmscape=off [X86]
Exceptions:
This does not have any effect on
@@ -8041,6 +8042,16 @@
vmpoff= [KNL,S390] Perform z/VM CP command after power off.
Format: <command>
+ vmscape= [X86] Controls mitigation for VMscape attacks.
+ VMscape attacks can leak information from a userspace
+ hypervisor to a guest via speculative side-channels.
+
+ off - disable the mitigation
+ ibpb - use Indirect Branch Prediction Barrier
+ (IBPB) mitigation (default)
+ force - force vulnerability detection even on
+ unaffected processors
+
vsyscall= [X86-64,EARLY]
Controls the behavior of vsyscalls (i.e. calls to
fixed addresses of 0xffffffffff600x00 from legacy
diff --git a/Documentation/devicetree/bindings/net/apm,xgene-enet.yaml b/Documentation/devicetree/bindings/net/apm,xgene-enet.yaml
new file mode 100644
index 000000000000..1c767ef8fcc5
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/apm,xgene-enet.yaml
@@ -0,0 +1,115 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/apm,xgene-enet.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: APM X-Gene SoC Ethernet
+
+maintainers:
+ - Iyappan Subramanian <iyappan@os.amperecomputing.com>
+ - Keyur Chudgar <keyur@os.amperecomputing.com>
+ - Quan Nguyen <quan@os.amperecomputing.com>
+
+allOf:
+ - $ref: ethernet-controller.yaml#
+
+properties:
+ compatible:
+ enum:
+ - apm,xgene-enet
+ - apm,xgene1-sgenet
+ - apm,xgene1-xgenet
+ - apm,xgene2-sgenet
+ - apm,xgene2-xgenet
+
+ reg:
+ maxItems: 3
+
+ reg-names:
+ items:
+ - const: enet_csr
+ - const: ring_csr
+ - const: ring_cmd
+
+ clocks:
+ maxItems: 1
+
+ dma-coherent: true
+
+ interrupts:
+ description: An rx and tx completion interrupt pair per queue
+ minItems: 1
+ maxItems: 16
+
+ channel:
+ description: Ethernet to CPU start channel number
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ port-id:
+ description: Port number
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 1
+
+ tx-delay:
+ description: Delay value for RGMII bridge TX clock
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 7
+ default: 4
+
+ rx-delay:
+ description: Delay value for RGMII bridge RX clock
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 7
+ default: 2
+
+ rxlos-gpios:
+ description: Input GPIO from SFP+ module indicating incoming signal
+ maxItems: 1
+
+ mdio:
+ description: MDIO bus subnode
+ $ref: mdio.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ compatible:
+ const: apm,xgene-mdio
+
+ required:
+ - compatible
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ ethernet@17020000 {
+ compatible = "apm,xgene-enet";
+ reg = <0x17020000 0xd100>,
+ <0x17030000 0x400>,
+ <0x10000000 0x200>;
+ reg-names = "enet_csr", "ring_csr", "ring_cmd";
+ interrupts = <0x0 0x3c 0x4>;
+ channel = <0>;
+ port-id = <0>;
+ clocks = <&menetclk 0>;
+ local-mac-address = [00 01 73 00 00 01];
+ phy-connection-type = "rgmii";
+ phy-handle = <&menetphy>;
+
+ mdio {
+ compatible = "apm,xgene-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ menetphy: ethernet-phy@3 {
+ compatible = "ethernet-phy-id001c.c915";
+ reg = <3>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/apm,xgene-mdio-rgmii.yaml b/Documentation/devicetree/bindings/net/apm,xgene-mdio-rgmii.yaml
new file mode 100644
index 000000000000..470fb5f7f7b5
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/apm,xgene-mdio-rgmii.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/apm,xgene-mdio-rgmii.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: APM X-Gene SoC MDIO
+
+maintainers:
+ - Iyappan Subramanian <iyappan@os.amperecomputing.com>
+ - Keyur Chudgar <keyur@os.amperecomputing.com>
+ - Quan Nguyen <quan@os.amperecomputing.com>
+
+allOf:
+ - $ref: mdio.yaml#
+
+properties:
+ compatible:
+ enum:
+ - apm,xgene-mdio-rgmii
+ - apm,xgene-mdio-xfi
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+examples:
+ - |
+ mdio@17020000 {
+ compatible = "apm,xgene-mdio-rgmii";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x17020000 0xd100>;
+ clocks = <&menetclk 0>;
+
+ phy@3 {
+ reg = <0x3>;
+ };
+ phy@4 {
+ reg = <0x4>;
+ };
+ phy@5 {
+ reg = <0x5>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
deleted file mode 100644
index f591ab782dbc..000000000000
--- a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
+++ /dev/null
@@ -1,91 +0,0 @@
-APM X-Gene SoC Ethernet nodes
-
-Ethernet nodes are defined to describe on-chip ethernet interfaces in
-APM X-Gene SoC.
-
-Required properties for all the ethernet interfaces:
-- compatible: Should state binding information from the following list,
- - "apm,xgene-enet": RGMII based 1G interface
- - "apm,xgene1-sgenet": SGMII based 1G interface
- - "apm,xgene1-xgenet": XFI based 10G interface
-- reg: Address and length of the register set for the device. It contains the
- information of registers in the same order as described by reg-names
-- reg-names: Should contain the register set names
- - "enet_csr": Ethernet control and status register address space
- - "ring_csr": Descriptor ring control and status register address space
- - "ring_cmd": Descriptor ring command register address space
-- interrupts: Two interrupt specifiers can be specified.
- - First is the Rx interrupt. This irq is mandatory.
- - Second is the Tx completion interrupt.
- This is supported only on SGMII based 1GbE and 10GbE interfaces.
-- channel: Ethernet to CPU, start channel (prefetch buffer) number
- - Must map to the first irq and irqs must be sequential
-- port-id: Port number (0 or 1)
-- clocks: Reference to the clock entry.
-- local-mac-address: MAC address assigned to this device
-- phy-connection-type: Interface type between ethernet device and PHY device
-
-Required properties for ethernet interfaces that have external PHY:
-- phy-handle: Reference to a PHY node connected to this device
-
-- mdio: Device tree subnode with the following required properties:
- - compatible: Must be "apm,xgene-mdio".
- - #address-cells: Must be <1>.
- - #size-cells: Must be <0>.
-
- For the phy on the mdio bus, there must be a node with the following fields:
- - compatible: PHY identifier. Please refer ./phy.txt for the format.
- - reg: The ID number for the phy.
-
-Optional properties:
-- status: Should be "ok" or "disabled" for enabled/disabled. Default is "ok".
-- tx-delay: Delay value for RGMII bridge TX clock.
- Valid values are between 0 to 7, that maps to
- 417, 717, 1020, 1321, 1611, 1913, 2215, 2514 ps
- Default value is 4, which corresponds to 1611 ps
-- rx-delay: Delay value for RGMII bridge RX clock.
- Valid values are between 0 to 7, that maps to
- 273, 589, 899, 1222, 1480, 1806, 2147, 2464 ps
- Default value is 2, which corresponds to 899 ps
-- rxlos-gpios: Input gpio from SFP+ module to indicate availability of
- incoming signal.
-
-
-Example:
- menetclk: menetclk {
- compatible = "apm,xgene-device-clock";
- clock-output-names = "menetclk";
- status = "ok";
- };
-
- menet: ethernet@17020000 {
- compatible = "apm,xgene-enet";
- status = "disabled";
- reg = <0x0 0x17020000 0x0 0xd100>,
- <0x0 0x17030000 0x0 0x400>,
- <0x0 0x10000000 0x0 0x200>;
- reg-names = "enet_csr", "ring_csr", "ring_cmd";
- interrupts = <0x0 0x3c 0x4>;
- port-id = <0>;
- clocks = <&menetclk 0>;
- local-mac-address = [00 01 73 00 00 01];
- phy-connection-type = "rgmii";
- phy-handle = <&menetphy>;
- mdio {
- compatible = "apm,xgene-mdio";
- #address-cells = <1>;
- #size-cells = <0>;
- menetphy: menetphy@3 {
- compatible = "ethernet-phy-id001c.c915";
- reg = <0x3>;
- };
-
- };
- };
-
-/* Board-specific peripheral configurations */
-&menet {
- tx-delay = <4>;
- rx-delay = <2>;
- status = "ok";
-};
diff --git a/Documentation/devicetree/bindings/net/apm-xgene-mdio.txt b/Documentation/devicetree/bindings/net/apm-xgene-mdio.txt
deleted file mode 100644
index 78722d74cea8..000000000000
--- a/Documentation/devicetree/bindings/net/apm-xgene-mdio.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-APM X-Gene SoC MDIO node
-
-MDIO node is defined to describe on-chip MDIO controller.
-
-Required properties:
- - compatible: Must be "apm,xgene-mdio-rgmii" or "apm,xgene-mdio-xfi"
- - #address-cells: Must be <1>.
- - #size-cells: Must be <0>.
- - reg: Address and length of the register set
- - clocks: Reference to the clock entry
-
-For the phys on the mdio bus, there must be a node with the following fields:
- - compatible: PHY identifier. Please refer ./phy.txt for the format.
- - reg: The ID number for the phy.
-
-Example:
-
- mdio: mdio@17020000 {
- compatible = "apm,xgene-mdio-rgmii";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x0 0x17020000 0x0 0xd100>;
- clocks = <&menetclk 0>;
- };
-
- /* Board-specific peripheral configurations */
- &mdio {
- menetphy: phy@3 {
- reg = <0x3>;
- };
- sgenet0phy: phy@4 {
- reg = <0x4>;
- };
- sgenet1phy: phy@5 {
- reg = <0x5>;
- };
- };
diff --git a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
deleted file mode 100644
index 284cddb3118e..000000000000
--- a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-* Broadcom Starfighter 2 integrated switch
-
-See dsa/brcm,bcm7445-switch-v4.0.yaml for the documentation.
-
-*Deprecated* binding required properties:
-
-- dsa,mii-bus: phandle to the MDIO bus controller, see dsa/dsa.txt
-- dsa,ethernet: phandle to the CPU network interface controller, see dsa/dsa.txt
-- #address-cells: must be 2, see dsa/dsa.txt
-
-Example using the old DSA DeviceTree binding:
-
-switch_top@f0b00000 {
- compatible = "simple-bus";
- #size-cells = <1>;
- #address-cells = <1>;
- ranges = <0 0xf0b00000 0x40804>;
-
- ethernet_switch@0 {
- compatible = "brcm,bcm7445-switch-v4.0";
- #size-cells = <0>;
- #address-cells = <2>;
- reg = <0x0 0x40000
- 0x40000 0x110
- 0x40340 0x30
- 0x40380 0x30
- 0x40400 0x34
- 0x40600 0x208>;
- interrupts = <0 0x18 0
- 0 0x19 0>;
- brcm,num-gphy = <1>;
- brcm,num-rgmii-ports = <2>;
- brcm,fcb-pause-override;
- brcm,acb-packets-inflight;
-
- ...
- switch@0 {
- reg = <0 0>;
- #size-cells = <0>;
- #address-cells = <1>;
-
- port@0 {
- label = "gphy";
- reg = <0>;
- brcm,use-bcm-hdr;
- };
- ...
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.yaml b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.yaml
index a65a42ccaafe..a82360bed188 100644
--- a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.yaml
@@ -20,6 +20,7 @@ properties:
- enum:
- fsl,imx7ulp-spi
- fsl,imx8qxp-spi
+ - nxp,s32g2-lpspi
- items:
- enum:
- fsl,imx8ulp-spi
@@ -27,6 +28,10 @@ properties:
- fsl,imx94-spi
- fsl,imx95-spi
- const: fsl,imx7ulp-spi
+ - items:
+ - const: nxp,s32g3-lpspi
+ - const: nxp,s32g2-lpspi
+
reg:
maxItems: 1
diff --git a/Documentation/netlink/specs/mptcp_pm.yaml b/Documentation/netlink/specs/mptcp_pm.yaml
index 02f1ddcfbf1c..d15335684ec3 100644
--- a/Documentation/netlink/specs/mptcp_pm.yaml
+++ b/Documentation/netlink/specs/mptcp_pm.yaml
@@ -256,7 +256,7 @@ attribute-sets:
type: u32
-
name: if-idx
- type: u32
+ type: s32
-
name: reset-reason
type: u32
diff --git a/Documentation/networking/can.rst b/Documentation/networking/can.rst
index bc1b585355f7..7650c4b5be5f 100644
--- a/Documentation/networking/can.rst
+++ b/Documentation/networking/can.rst
@@ -742,7 +742,7 @@ The broadcast manager sends responses to user space in the same form:
struct timeval ival1, ival2; /* count and subsequent interval */
canid_t can_id; /* unique can_id for task */
__u32 nframes; /* number of can_frames following */
- struct can_frame frames[0];
+ struct can_frame frames[];
};
The aligned payload 'frames' uses the same basic CAN frame structure defined
diff --git a/Documentation/networking/mptcp.rst b/Documentation/networking/mptcp.rst
index fdc7bfd5d5c5..b6753ffb9c9a 100644
--- a/Documentation/networking/mptcp.rst
+++ b/Documentation/networking/mptcp.rst
@@ -60,10 +60,10 @@ address announcements. Typically, it is the client side that initiates subflows,
and the server side that announces additional addresses via the ``ADD_ADDR`` and
``REMOVE_ADDR`` options.
-Path managers are controlled by the ``net.mptcp.pm_type`` sysctl knob -- see
-mptcp-sysctl.rst. There are two types: the in-kernel one (type ``0``) where the
-same rules are applied for all the connections (see: ``ip mptcp``) ; and the
-userspace one (type ``1``), controlled by a userspace daemon (i.e. `mptcpd
+Path managers are controlled by the ``net.mptcp.path_manager`` sysctl knob --
+see mptcp-sysctl.rst. There are two types: the in-kernel one (``kernel``) where
+the same rules are applied for all the connections (see: ``ip mptcp``) ; and the
+userspace one (``userspace``), controlled by a userspace daemon (i.e. `mptcpd
<https://mptcpd.mptcp.dev/>`_) where different rules can be applied for each
connection. The path managers can be controlled via a Netlink API; see
../netlink/specs/mptcp_pm.rst.
diff --git a/MAINTAINERS b/MAINTAINERS
index e3907f0c1243..c930a961435e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1886,8 +1886,8 @@ M: Iyappan Subramanian <iyappan@os.amperecomputing.com>
M: Keyur Chudgar <keyur@os.amperecomputing.com>
M: Quan Nguyen <quan@os.amperecomputing.com>
S: Maintained
-F: Documentation/devicetree/bindings/net/apm-xgene-enet.txt
-F: Documentation/devicetree/bindings/net/apm-xgene-mdio.txt
+F: Documentation/devicetree/bindings/net/apm,xgene-enet.yaml
+F: Documentation/devicetree/bindings/net/apm,xgene-mdio-rgmii.yaml
F: drivers/net/ethernet/apm/xgene/
F: drivers/net/mdio/mdio-xgene.c
@@ -4683,7 +4683,6 @@ F: security/bpf/
BPF [SELFTESTS] (Test Runners & Infrastructure)
M: Andrii Nakryiko <andrii@kernel.org>
M: Eduard Zingerman <eddyz87@gmail.com>
-R: Mykola Lysenko <mykolal@fb.com>
L: bpf@vger.kernel.org
S: Maintained
F: tools/testing/selftests/bpf/
@@ -5259,7 +5258,6 @@ F: drivers/gpio/gpio-bt8xx.c
BTRFS FILE SYSTEM
M: Chris Mason <clm@fb.com>
-M: Josef Bacik <josef@toxicpanda.com>
M: David Sterba <dsterba@suse.com>
L: linux-btrfs@vger.kernel.org
S: Maintained
@@ -7822,7 +7820,7 @@ Q: https://patchwork.freedesktop.org/project/nouveau/
Q: https://gitlab.freedesktop.org/drm/nouveau/-/merge_requests
B: https://gitlab.freedesktop.org/drm/nouveau/-/issues
C: irc://irc.oftc.net/nouveau
-T: git https://gitlab.freedesktop.org/drm/nouveau.git
+T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/nouveau/
F: include/uapi/drm/nouveau_drm.h
@@ -16127,6 +16125,7 @@ M: Andrew Morton <akpm@linux-foundation.org>
M: Mike Rapoport <rppt@kernel.org>
L: linux-mm@kvack.org
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock.git
F: include/linux/numa_memblks.h
F: mm/numa.c
F: mm/numa_emulation.c
@@ -17479,6 +17478,7 @@ NETFILTER
M: Pablo Neira Ayuso <pablo@netfilter.org>
M: Jozsef Kadlecsik <kadlec@netfilter.org>
M: Florian Westphal <fw@strlen.de>
+R: Phil Sutter <phil@nwl.cc>
L: netfilter-devel@vger.kernel.org
L: coreteam@netfilter.org
S: Maintained
diff --git a/Makefile b/Makefile
index b9c661913250..cf37b9407821 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 17
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
NAME = Baby Opossum Posse
# *DOCUMENTATION*
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index 79550b22ba19..fb9b88eebeb1 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -19,6 +19,7 @@ struct mod_arch_specific {
/* for CONFIG_DYNAMIC_FTRACE */
struct plt_entry *ftrace_trampolines;
+ struct plt_entry *init_ftrace_trampolines;
};
u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h
index b9ae8349e35d..fb944b46846d 100644
--- a/arch/arm64/include/asm/module.lds.h
+++ b/arch/arm64/include/asm/module.lds.h
@@ -2,6 +2,7 @@ SECTIONS {
.plt 0 : { BYTE(0) }
.init.plt 0 : { BYTE(0) }
.text.ftrace_trampoline 0 : { BYTE(0) }
+ .init.text.ftrace_trampoline 0 : { BYTE(0) }
#ifdef CONFIG_KASAN_SW_TAGS
/*
diff --git a/arch/arm64/include/uapi/asm/bitsperlong.h b/arch/arm64/include/uapi/asm/bitsperlong.h
index 485d60bee26c..d59730975f30 100644
--- a/arch/arm64/include/uapi/asm/bitsperlong.h
+++ b/arch/arm64/include/uapi/asm/bitsperlong.h
@@ -17,7 +17,12 @@
#ifndef __ASM_BITSPERLONG_H
#define __ASM_BITSPERLONG_H
+#if defined(__KERNEL__) && !defined(__aarch64__)
+/* Used by the compat vDSO */
+#define __BITS_PER_LONG 32
+#else
#define __BITS_PER_LONG 64
+#endif
#include <asm-generic/bitsperlong.h>
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 5a890714ee2e..5adad37ab4fa 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -258,10 +258,17 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
return ftrace_modify_code(pc, 0, new, false);
}
-static struct plt_entry *get_ftrace_plt(struct module *mod)
+static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
{
#ifdef CONFIG_MODULES
- struct plt_entry *plt = mod->arch.ftrace_trampolines;
+ struct plt_entry *plt = NULL;
+
+ if (within_module_mem_type(addr, mod, MOD_INIT_TEXT))
+ plt = mod->arch.init_ftrace_trampolines;
+ else if (within_module_mem_type(addr, mod, MOD_TEXT))
+ plt = mod->arch.ftrace_trampolines;
+ else
+ return NULL;
return &plt[FTRACE_PLT_IDX];
#else
@@ -332,7 +339,7 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
if (WARN_ON(!mod))
return false;
- plt = get_ftrace_plt(mod);
+ plt = get_ftrace_plt(mod, pc);
if (!plt) {
pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
return false;
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index af1ca875c52c..410060ebd86d 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -94,7 +94,7 @@ int load_other_segments(struct kimage *image,
char *initrd, unsigned long initrd_len,
char *cmdline)
{
- struct kexec_buf kbuf;
+ struct kexec_buf kbuf = {};
void *dtb = NULL;
unsigned long initrd_load_addr = 0, dtb_len,
orig_segments = image->nr_segments;
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index bde32979c06a..7afd370da9f4 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -283,7 +283,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
unsigned long core_plts = 0;
unsigned long init_plts = 0;
Elf64_Sym *syms = NULL;
- Elf_Shdr *pltsec, *tramp = NULL;
+ Elf_Shdr *pltsec, *tramp = NULL, *init_tramp = NULL;
int i;
/*
@@ -298,6 +298,9 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
else if (!strcmp(secstrings + sechdrs[i].sh_name,
".text.ftrace_trampoline"))
tramp = sechdrs + i;
+ else if (!strcmp(secstrings + sechdrs[i].sh_name,
+ ".init.text.ftrace_trampoline"))
+ init_tramp = sechdrs + i;
else if (sechdrs[i].sh_type == SHT_SYMTAB)
syms = (Elf64_Sym *)sechdrs[i].sh_addr;
}
@@ -363,5 +366,12 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry);
}
+ if (init_tramp) {
+ init_tramp->sh_type = SHT_NOBITS;
+ init_tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ init_tramp->sh_addralign = __alignof__(struct plt_entry);
+ init_tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry);
+ }
+
return 0;
}
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 40148d2725ce..d6d443c4a01a 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -466,6 +466,17 @@ static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
__init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
mod->arch.ftrace_trampolines = plts;
+
+ s = find_section(hdr, sechdrs, ".init.text.ftrace_trampoline");
+ if (!s)
+ return -ENOEXEC;
+
+ plts = (void *)s->sh_addr;
+
+ __init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
+
+ mod->arch.init_ftrace_trampolines = plts;
+
#endif
return 0;
}
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index a4b233a0659e..51dcd8eaa243 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -65,7 +65,7 @@ config RISCV
select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE
select ARCH_SUPPORTS_HUGETLBFS if MMU
# LLD >= 14: https://github.com/llvm/llvm-project/issues/50505
- select ARCH_SUPPORTS_LTO_CLANG if LLD_VERSION >= 140000
+ select ARCH_SUPPORTS_LTO_CLANG if LLD_VERSION >= 140000 && CMODEL_MEDANY
select ARCH_SUPPORTS_LTO_CLANG_THIN if LLD_VERSION >= 140000
select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS if 64BIT && MMU
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index a8a2af6dfe9d..2a16e88e13de 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -91,7 +91,7 @@
#endif
.macro asm_per_cpu dst sym tmp
- REG_L \tmp, TASK_TI_CPU_NUM(tp)
+ lw \tmp, TASK_TI_CPU_NUM(tp)
slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT
la \dst, __per_cpu_offset
add \dst, \dst, \tmp
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index b88a6218b7f2..f5f4f7f85543 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -209,7 +209,7 @@ do { \
err = 0; \
break; \
__gu_failed: \
- x = 0; \
+ x = (__typeof__(x))0; \
err = -EFAULT; \
} while (0)
@@ -311,7 +311,7 @@ do { \
do { \
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \
!IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \
- __inttype(x) ___val = (__inttype(x))x; \
+ __typeof__(*(__gu_ptr)) ___val = (x); \
if (__asm_copy_to_user_sum_enabled(__gu_ptr, &(___val), sizeof(*__gu_ptr))) \
goto label; \
break; \
@@ -438,10 +438,10 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
}
#define __get_kernel_nofault(dst, src, type, err_label) \
- __get_user_nocheck(*((type *)(dst)), (type *)(src), err_label)
+ __get_user_nocheck(*((type *)(dst)), (__force __user type *)(src), err_label)
#define __put_kernel_nofault(dst, src, type, err_label) \
- __put_user_nocheck(*((type *)(src)), (type *)(dst), err_label)
+ __put_user_nocheck(*((type *)(src)), (__force __user type *)(dst), err_label)
static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
{
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 3a0ec6fd5956..d0ded2438533 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -46,7 +46,7 @@
* a0 = &new_vmalloc[BIT_WORD(cpu)]
* a1 = BIT_MASK(cpu)
*/
- REG_L a2, TASK_TI_CPU(tp)
+ lw a2, TASK_TI_CPU(tp)
/*
* Compute the new_vmalloc element position:
* (cpu / 64) * 8 = (cpu >> 6) << 3
diff --git a/arch/riscv/kernel/kexec_elf.c b/arch/riscv/kernel/kexec_elf.c
index 56444c7bd34e..531d348db84d 100644
--- a/arch/riscv/kernel/kexec_elf.c
+++ b/arch/riscv/kernel/kexec_elf.c
@@ -28,7 +28,7 @@ static int riscv_kexec_elf_load(struct kimage *image, struct elfhdr *ehdr,
int i;
int ret = 0;
size_t size;
- struct kexec_buf kbuf;
+ struct kexec_buf kbuf = {};
const struct elf_phdr *phdr;
kbuf.image = image;
@@ -66,7 +66,7 @@ static int elf_find_pbase(struct kimage *image, unsigned long kernel_len,
{
int i;
int ret;
- struct kexec_buf kbuf;
+ struct kexec_buf kbuf = {};
const struct elf_phdr *phdr;
unsigned long lowest_paddr = ULONG_MAX;
unsigned long lowest_vaddr = ULONG_MAX;
diff --git a/arch/riscv/kernel/kexec_image.c b/arch/riscv/kernel/kexec_image.c
index 26a81774a78a..8f2eb900910b 100644
--- a/arch/riscv/kernel/kexec_image.c
+++ b/arch/riscv/kernel/kexec_image.c
@@ -41,7 +41,7 @@ static void *image_load(struct kimage *image,
struct riscv_image_header *h;
u64 flags;
bool be_image, be_kernel;
- struct kexec_buf kbuf;
+ struct kexec_buf kbuf = {};
int ret;
/* Check Image header */
diff --git a/arch/riscv/kernel/machine_kexec_file.c b/arch/riscv/kernel/machine_kexec_file.c
index e36104af2e24..b9eb41b0a975 100644
--- a/arch/riscv/kernel/machine_kexec_file.c
+++ b/arch/riscv/kernel/machine_kexec_file.c
@@ -261,7 +261,7 @@ int load_extra_segments(struct kimage *image, unsigned long kernel_start,
int ret;
void *fdt;
unsigned long initrd_pbase = 0UL;
- struct kexec_buf kbuf;
+ struct kexec_buf kbuf = {};
char *modified_cmdline = NULL;
kbuf.image = image;
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 10e01ff06312..9883a55d61b5 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -1356,7 +1356,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
emit_mv(rd, rs, ctx);
#ifdef CONFIG_SMP
/* Load current CPU number in T1 */
- emit_ld(RV_REG_T1, offsetof(struct thread_info, cpu),
+ emit_lw(RV_REG_T1, offsetof(struct thread_info, cpu),
RV_REG_TP, ctx);
/* Load address of __per_cpu_offset array in T2 */
emit_addr(RV_REG_T2, (u64)&__per_cpu_offset, extra_pass, ctx);
@@ -1763,7 +1763,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
*/
if (insn->src_reg == 0 && insn->imm == BPF_FUNC_get_smp_processor_id) {
/* Load current CPU number in R0 */
- emit_ld(bpf_to_rv_reg(BPF_REG_0, ctx), offsetof(struct thread_info, cpu),
+ emit_lw(bpf_to_rv_reg(BPF_REG_0, ctx), offsetof(struct thread_info, cpu),
RV_REG_TP, ctx);
break;
}
diff --git a/arch/s390/kernel/kexec_elf.c b/arch/s390/kernel/kexec_elf.c
index 4d364de43799..143e34a4eca5 100644
--- a/arch/s390/kernel/kexec_elf.c
+++ b/arch/s390/kernel/kexec_elf.c
@@ -16,7 +16,7 @@
static int kexec_file_add_kernel_elf(struct kimage *image,
struct s390_load_data *data)
{
- struct kexec_buf buf;
+ struct kexec_buf buf = {};
const Elf_Ehdr *ehdr;
const Elf_Phdr *phdr;
Elf_Addr entry;
diff --git a/arch/s390/kernel/kexec_image.c b/arch/s390/kernel/kexec_image.c
index a32ce8bea745..9a439175723c 100644
--- a/arch/s390/kernel/kexec_image.c
+++ b/arch/s390/kernel/kexec_image.c
@@ -16,7 +16,7 @@
static int kexec_file_add_kernel_image(struct kimage *image,
struct s390_load_data *data)
{
- struct kexec_buf buf;
+ struct kexec_buf buf = {};
buf.image = image;
diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c
index c2bac14dd668..a36d7311c668 100644
--- a/arch/s390/kernel/machine_kexec_file.c
+++ b/arch/s390/kernel/machine_kexec_file.c
@@ -129,7 +129,7 @@ static int kexec_file_update_purgatory(struct kimage *image,
static int kexec_file_add_purgatory(struct kimage *image,
struct s390_load_data *data)
{
- struct kexec_buf buf;
+ struct kexec_buf buf = {};
int ret;
buf.image = image;
@@ -152,7 +152,7 @@ static int kexec_file_add_purgatory(struct kimage *image,
static int kexec_file_add_initrd(struct kimage *image,
struct s390_load_data *data)
{
- struct kexec_buf buf;
+ struct kexec_buf buf = {};
int ret;
buf.image = image;
@@ -184,7 +184,7 @@ static int kexec_file_add_ipl_report(struct kimage *image,
{
__u32 *lc_ipl_parmblock_ptr;
unsigned int len, ncerts;
- struct kexec_buf buf;
+ struct kexec_buf buf = {};
unsigned long addr;
void *ptr, *end;
int ret;
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 4d09954ebf49..04457d88e589 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -760,8 +760,6 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
break;
case PERF_TYPE_HARDWARE:
- if (is_sampling_event(event)) /* No sampling support */
- return -ENOENT;
ev = attr->config;
if (!attr->exclude_user && attr->exclude_kernel) {
/*
@@ -859,6 +857,8 @@ static int cpumf_pmu_event_init(struct perf_event *event)
unsigned int type = event->attr.type;
int err = -ENOENT;
+ if (is_sampling_event(event)) /* No sampling support */
+ return err;
if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW)
err = __hw_perf_event_init(event, type);
else if (event->pmu->type == type)
diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
index f373a1009c45..9455f213dc20 100644
--- a/arch/s390/kernel/perf_pai_crypto.c
+++ b/arch/s390/kernel/perf_pai_crypto.c
@@ -285,10 +285,10 @@ static int paicrypt_event_init(struct perf_event *event)
/* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
return -ENOENT;
- /* PAI crypto event must be in valid range */
+ /* PAI crypto event must be in valid range, try others if not */
if (a->config < PAI_CRYPTO_BASE ||
a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
- return -EINVAL;
+ return -ENOENT;
/* Allow only CRYPTO_ALL for sampling */
if (a->sample_period && a->config != PAI_CRYPTO_BASE)
return -EINVAL;
diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c
index d827473e7f87..7b32935273ce 100644
--- a/arch/s390/kernel/perf_pai_ext.c
+++ b/arch/s390/kernel/perf_pai_ext.c
@@ -265,7 +265,7 @@ static int paiext_event_valid(struct perf_event *event)
event->hw.config_base = offsetof(struct paiext_cb, acc);
return 0;
}
- return -EINVAL;
+ return -ENOENT;
}
/* Might be called on different CPU than the one the event is intended for. */
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 60688be4e876..50eb57c976bc 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -335,7 +335,6 @@ pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
int nodat;
struct mm_struct *mm = vma->vm_mm;
- preempt_disable();
pgste = ptep_xchg_start(mm, addr, ptep);
nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
old = ptep_flush_lazy(mm, addr, ptep, nodat);
@@ -360,7 +359,6 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
} else {
set_pte(ptep, pte);
}
- preempt_enable();
}
static inline void pmdp_idte_local(struct mm_struct *mm,
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 58d890fe2100..52c8910ba2ef 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2701,6 +2701,15 @@ config MITIGATION_TSA
security vulnerability on AMD CPUs which can lead to forwarding of
invalid info to subsequent instructions and thus can affect their
timing and thereby cause a leakage.
+
+config MITIGATION_VMSCAPE
+ bool "Mitigate VMSCAPE"
+ depends on KVM
+ default y
+ help
+ Enable mitigation for VMSCAPE attacks. VMSCAPE is a hardware security
+ vulnerability on Intel and AMD CPUs that may allow a guest to do
+ Spectre v2 style attacks on userspace hypervisor.
endif
config ARCH_HAS_ADD_PAGES
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 06fc0479a23f..751ca35386b0 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -495,6 +495,7 @@
#define X86_FEATURE_TSA_SQ_NO (21*32+11) /* AMD CPU not vulnerable to TSA-SQ */
#define X86_FEATURE_TSA_L1_NO (21*32+12) /* AMD CPU not vulnerable to TSA-L1 */
#define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* Clear CPU buffers using VERW before VMRUN */
+#define X86_FEATURE_IBPB_EXIT_TO_USER (21*32+14) /* Use IBPB on exit-to-userspace, see VMSCAPE bug */
/*
* BUG word(s)
@@ -551,4 +552,5 @@
#define X86_BUG_ITS X86_BUG( 1*32+ 7) /* "its" CPU is affected by Indirect Target Selection */
#define X86_BUG_ITS_NATIVE_ONLY X86_BUG( 1*32+ 8) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
#define X86_BUG_TSA X86_BUG( 1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */
+#define X86_BUG_VMSCAPE X86_BUG( 1*32+10) /* "vmscape" CPU is affected by VMSCAPE attacks from guests */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
index d535a97c7284..ce3eb6d5fdf9 100644
--- a/arch/x86/include/asm/entry-common.h
+++ b/arch/x86/include/asm/entry-common.h
@@ -93,6 +93,13 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
* 8 (ia32) bits.
*/
choose_random_kstack_offset(rdtsc());
+
+ /* Avoid unnecessary reads of 'x86_ibpb_exit_to_user' */
+ if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER) &&
+ this_cpu_read(x86_ibpb_exit_to_user)) {
+ indirect_branch_prediction_barrier();
+ this_cpu_write(x86_ibpb_exit_to_user, false);
+ }
}
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 10f261678749..e29f82466f43 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -530,6 +530,8 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
: "memory");
}
+DECLARE_PER_CPU(bool, x86_ibpb_exit_to_user);
+
static inline void indirect_branch_prediction_barrier(void)
{
asm_inline volatile(ALTERNATIVE("", "call write_ibpb", X86_FEATURE_IBPB)
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index af838b8d845c..36dcfc5105be 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -96,6 +96,9 @@ static void __init its_update_mitigation(void);
static void __init its_apply_mitigation(void);
static void __init tsa_select_mitigation(void);
static void __init tsa_apply_mitigation(void);
+static void __init vmscape_select_mitigation(void);
+static void __init vmscape_update_mitigation(void);
+static void __init vmscape_apply_mitigation(void);
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
u64 x86_spec_ctrl_base;
@@ -105,6 +108,14 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
+/*
+ * Set when the CPU has run a potentially malicious guest. An IBPB will
+ * be needed to before running userspace. That IBPB will flush the branch
+ * predictor content.
+ */
+DEFINE_PER_CPU(bool, x86_ibpb_exit_to_user);
+EXPORT_PER_CPU_SYMBOL_GPL(x86_ibpb_exit_to_user);
+
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
static u64 __ro_after_init x86_arch_cap_msr;
@@ -262,6 +273,7 @@ void __init cpu_select_mitigations(void)
its_select_mitigation();
bhi_select_mitigation();
tsa_select_mitigation();
+ vmscape_select_mitigation();
/*
* After mitigations are selected, some may need to update their
@@ -293,6 +305,7 @@ void __init cpu_select_mitigations(void)
bhi_update_mitigation();
/* srso_update_mitigation() depends on retbleed_update_mitigation(). */
srso_update_mitigation();
+ vmscape_update_mitigation();
spectre_v1_apply_mitigation();
spectre_v2_apply_mitigation();
@@ -310,6 +323,7 @@ void __init cpu_select_mitigations(void)
its_apply_mitigation();
bhi_apply_mitigation();
tsa_apply_mitigation();
+ vmscape_apply_mitigation();
}
/*
@@ -2538,88 +2552,6 @@ static void update_mds_branch_idle(void)
}
}
-#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
-#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
-#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
-
-void cpu_bugs_smt_update(void)
-{
- mutex_lock(&spec_ctrl_mutex);
-
- if (sched_smt_active() && unprivileged_ebpf_enabled() &&
- spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
- pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
-
- switch (spectre_v2_user_stibp) {
- case SPECTRE_V2_USER_NONE:
- break;
- case SPECTRE_V2_USER_STRICT:
- case SPECTRE_V2_USER_STRICT_PREFERRED:
- update_stibp_strict();
- break;
- case SPECTRE_V2_USER_PRCTL:
- case SPECTRE_V2_USER_SECCOMP:
- update_indir_branch_cond();
- break;
- }
-
- switch (mds_mitigation) {
- case MDS_MITIGATION_FULL:
- case MDS_MITIGATION_AUTO:
- case MDS_MITIGATION_VMWERV:
- if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
- pr_warn_once(MDS_MSG_SMT);
- update_mds_branch_idle();
- break;
- case MDS_MITIGATION_OFF:
- break;
- }
-
- switch (taa_mitigation) {
- case TAA_MITIGATION_VERW:
- case TAA_MITIGATION_AUTO:
- case TAA_MITIGATION_UCODE_NEEDED:
- if (sched_smt_active())
- pr_warn_once(TAA_MSG_SMT);
- break;
- case TAA_MITIGATION_TSX_DISABLED:
- case TAA_MITIGATION_OFF:
- break;
- }
-
- switch (mmio_mitigation) {
- case MMIO_MITIGATION_VERW:
- case MMIO_MITIGATION_AUTO:
- case MMIO_MITIGATION_UCODE_NEEDED:
- if (sched_smt_active())
- pr_warn_once(MMIO_MSG_SMT);
- break;
- case MMIO_MITIGATION_OFF:
- break;
- }
-
- switch (tsa_mitigation) {
- case TSA_MITIGATION_USER_KERNEL:
- case TSA_MITIGATION_VM:
- case TSA_MITIGATION_AUTO:
- case TSA_MITIGATION_FULL:
- /*
- * TSA-SQ can potentially lead to info leakage between
- * SMT threads.
- */
- if (sched_smt_active())
- static_branch_enable(&cpu_buf_idle_clear);
- else
- static_branch_disable(&cpu_buf_idle_clear);
- break;
- case TSA_MITIGATION_NONE:
- case TSA_MITIGATION_UCODE_NEEDED:
- break;
- }
-
- mutex_unlock(&spec_ctrl_mutex);
-}
-
#undef pr_fmt
#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
@@ -3331,8 +3263,184 @@ static void __init srso_apply_mitigation(void)
}
#undef pr_fmt
+#define pr_fmt(fmt) "VMSCAPE: " fmt
+
+enum vmscape_mitigations {
+ VMSCAPE_MITIGATION_NONE,
+ VMSCAPE_MITIGATION_AUTO,
+ VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER,
+ VMSCAPE_MITIGATION_IBPB_ON_VMEXIT,
+};
+
+static const char * const vmscape_strings[] = {
+ [VMSCAPE_MITIGATION_NONE] = "Vulnerable",
+ /* [VMSCAPE_MITIGATION_AUTO] */
+ [VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER] = "Mitigation: IBPB before exit to userspace",
+ [VMSCAPE_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT",
+};
+
+static enum vmscape_mitigations vmscape_mitigation __ro_after_init =
+ IS_ENABLED(CONFIG_MITIGATION_VMSCAPE) ? VMSCAPE_MITIGATION_AUTO : VMSCAPE_MITIGATION_NONE;
+
+static int __init vmscape_parse_cmdline(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "off")) {
+ vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
+ } else if (!strcmp(str, "ibpb")) {
+ vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
+ } else if (!strcmp(str, "force")) {
+ setup_force_cpu_bug(X86_BUG_VMSCAPE);
+ vmscape_mitigation = VMSCAPE_MITIGATION_AUTO;
+ } else {
+ pr_err("Ignoring unknown vmscape=%s option.\n", str);
+ }
+
+ return 0;
+}
+early_param("vmscape", vmscape_parse_cmdline);
+
+static void __init vmscape_select_mitigation(void)
+{
+ if (cpu_mitigations_off() ||
+ !boot_cpu_has_bug(X86_BUG_VMSCAPE) ||
+ !boot_cpu_has(X86_FEATURE_IBPB)) {
+ vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
+ return;
+ }
+
+ if (vmscape_mitigation == VMSCAPE_MITIGATION_AUTO)
+ vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
+}
+
+static void __init vmscape_update_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_VMSCAPE))
+ return;
+
+ if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB ||
+ srso_mitigation == SRSO_MITIGATION_IBPB_ON_VMEXIT)
+ vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_ON_VMEXIT;
+
+ pr_info("%s\n", vmscape_strings[vmscape_mitigation]);
+}
+
+static void __init vmscape_apply_mitigation(void)
+{
+ if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER)
+ setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER);
+}
+
+#undef pr_fmt
#define pr_fmt(fmt) fmt
+#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
+#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
+#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
+#define VMSCAPE_MSG_SMT "VMSCAPE: SMT on, STIBP is required for full protection. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/vmscape.html for more details.\n"
+
+void cpu_bugs_smt_update(void)
+{
+ mutex_lock(&spec_ctrl_mutex);
+
+ if (sched_smt_active() && unprivileged_ebpf_enabled() &&
+ spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
+ pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
+
+ switch (spectre_v2_user_stibp) {
+ case SPECTRE_V2_USER_NONE:
+ break;
+ case SPECTRE_V2_USER_STRICT:
+ case SPECTRE_V2_USER_STRICT_PREFERRED:
+ update_stibp_strict();
+ break;
+ case SPECTRE_V2_USER_PRCTL:
+ case SPECTRE_V2_USER_SECCOMP:
+ update_indir_branch_cond();
+ break;
+ }
+
+ switch (mds_mitigation) {
+ case MDS_MITIGATION_FULL:
+ case MDS_MITIGATION_AUTO:
+ case MDS_MITIGATION_VMWERV:
+ if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
+ pr_warn_once(MDS_MSG_SMT);
+ update_mds_branch_idle();
+ break;
+ case MDS_MITIGATION_OFF:
+ break;
+ }
+
+ switch (taa_mitigation) {
+ case TAA_MITIGATION_VERW:
+ case TAA_MITIGATION_AUTO:
+ case TAA_MITIGATION_UCODE_NEEDED:
+ if (sched_smt_active())
+ pr_warn_once(TAA_MSG_SMT);
+ break;
+ case TAA_MITIGATION_TSX_DISABLED:
+ case TAA_MITIGATION_OFF:
+ break;
+ }
+
+ switch (mmio_mitigation) {
+ case MMIO_MITIGATION_VERW:
+ case MMIO_MITIGATION_AUTO:
+ case MMIO_MITIGATION_UCODE_NEEDED:
+ if (sched_smt_active())
+ pr_warn_once(MMIO_MSG_SMT);
+ break;
+ case MMIO_MITIGATION_OFF:
+ break;
+ }
+
+ switch (tsa_mitigation) {
+ case TSA_MITIGATION_USER_KERNEL:
+ case TSA_MITIGATION_VM:
+ case TSA_MITIGATION_AUTO:
+ case TSA_MITIGATION_FULL:
+ /*
+ * TSA-SQ can potentially lead to info leakage between
+ * SMT threads.
+ */
+ if (sched_smt_active())
+ static_branch_enable(&cpu_buf_idle_clear);
+ else
+ static_branch_disable(&cpu_buf_idle_clear);
+ break;
+ case TSA_MITIGATION_NONE:
+ case TSA_MITIGATION_UCODE_NEEDED:
+ break;
+ }
+
+ switch (vmscape_mitigation) {
+ case VMSCAPE_MITIGATION_NONE:
+ case VMSCAPE_MITIGATION_AUTO:
+ break;
+ case VMSCAPE_MITIGATION_IBPB_ON_VMEXIT:
+ case VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER:
+ /*
+ * Hypervisors can be attacked across-threads, warn for SMT when
+ * STIBP is not already enabled system-wide.
+ *
+ * Intel eIBRS (!AUTOIBRS) implies STIBP on.
+ */
+ if (!sched_smt_active() ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
+ (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
+ !boot_cpu_has(X86_FEATURE_AUTOIBRS)))
+ break;
+ pr_warn_once(VMSCAPE_MSG_SMT);
+ break;
+ }
+
+ mutex_unlock(&spec_ctrl_mutex);
+}
+
#ifdef CONFIG_SYSFS
#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
@@ -3578,6 +3686,11 @@ static ssize_t tsa_show_state(char *buf)
return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
}
+static ssize_t vmscape_show_state(char *buf)
+{
+ return sysfs_emit(buf, "%s\n", vmscape_strings[vmscape_mitigation]);
+}
+
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug)
{
@@ -3644,6 +3757,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_TSA:
return tsa_show_state(buf);
+ case X86_BUG_VMSCAPE:
+ return vmscape_show_state(buf);
+
default:
break;
}
@@ -3735,6 +3851,11 @@ ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *bu
{
return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
}
+
+ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_VMSCAPE);
+}
#endif
void __warn_thunk(void)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 34a054181c4d..f98ec9c7fc07 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1236,55 +1236,71 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
#define ITS_NATIVE_ONLY BIT(9)
/* CPU is affected by Transient Scheduler Attacks */
#define TSA BIT(10)
+/* CPU is affected by VMSCAPE */
+#define VMSCAPE BIT(11)
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
- VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE, X86_STEP_MAX, SRBDS),
- VULNBL_INTEL_STEPS(INTEL_HASWELL, X86_STEP_MAX, SRBDS),
- VULNBL_INTEL_STEPS(INTEL_HASWELL_L, X86_STEP_MAX, SRBDS),
- VULNBL_INTEL_STEPS(INTEL_HASWELL_G, X86_STEP_MAX, SRBDS),
- VULNBL_INTEL_STEPS(INTEL_HASWELL_X, X86_STEP_MAX, MMIO),
- VULNBL_INTEL_STEPS(INTEL_BROADWELL_D, X86_STEP_MAX, MMIO),
- VULNBL_INTEL_STEPS(INTEL_BROADWELL_G, X86_STEP_MAX, SRBDS),
- VULNBL_INTEL_STEPS(INTEL_BROADWELL_X, X86_STEP_MAX, MMIO),
- VULNBL_INTEL_STEPS(INTEL_BROADWELL, X86_STEP_MAX, SRBDS),
- VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, 0x5, MMIO | RETBLEED | GDS),
- VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, X86_STEP_MAX, MMIO | RETBLEED | GDS | ITS),
- VULNBL_INTEL_STEPS(INTEL_SKYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS),
- VULNBL_INTEL_STEPS(INTEL_SKYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS),
- VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, 0xb, MMIO | RETBLEED | GDS | SRBDS),
- VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | ITS),
- VULNBL_INTEL_STEPS(INTEL_KABYLAKE, 0xc, MMIO | RETBLEED | GDS | SRBDS),
- VULNBL_INTEL_STEPS(INTEL_KABYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | ITS),
- VULNBL_INTEL_STEPS(INTEL_CANNONLAKE_L, X86_STEP_MAX, RETBLEED),
+ VULNBL_INTEL_STEPS(INTEL_SANDYBRIDGE_X, X86_STEP_MAX, VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_SANDYBRIDGE, X86_STEP_MAX, VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE_X, X86_STEP_MAX, VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE, X86_STEP_MAX, SRBDS | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_HASWELL, X86_STEP_MAX, SRBDS | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_HASWELL_L, X86_STEP_MAX, SRBDS | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_HASWELL_G, X86_STEP_MAX, SRBDS | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_HASWELL_X, X86_STEP_MAX, MMIO | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_BROADWELL_D, X86_STEP_MAX, MMIO | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_BROADWELL_X, X86_STEP_MAX, MMIO | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_BROADWELL_G, X86_STEP_MAX, SRBDS | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_BROADWELL, X86_STEP_MAX, SRBDS | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, 0x5, MMIO | RETBLEED | GDS | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, X86_STEP_MAX, MMIO | RETBLEED | GDS | ITS | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_SKYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_SKYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, 0xb, MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | ITS | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_KABYLAKE, 0xc, MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_KABYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | ITS | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_CANNONLAKE_L, X86_STEP_MAX, RETBLEED | VMSCAPE),
VULNBL_INTEL_STEPS(INTEL_ICELAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPS(INTEL_ICELAKE_D, X86_STEP_MAX, MMIO | GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPS(INTEL_ICELAKE_X, X86_STEP_MAX, MMIO | GDS | ITS | ITS_NATIVE_ONLY),
- VULNBL_INTEL_STEPS(INTEL_COMETLAKE, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
- VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, 0x0, MMIO | RETBLEED | ITS),
- VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
+ VULNBL_INTEL_STEPS(INTEL_COMETLAKE, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, 0x0, MMIO | RETBLEED | ITS | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | VMSCAPE),
VULNBL_INTEL_STEPS(INTEL_TIGERLAKE_L, X86_STEP_MAX, GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPS(INTEL_TIGERLAKE, X86_STEP_MAX, GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPS(INTEL_LAKEFIELD, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED),
VULNBL_INTEL_STEPS(INTEL_ROCKETLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
- VULNBL_INTEL_TYPE(INTEL_ALDERLAKE, ATOM, RFDS),
- VULNBL_INTEL_STEPS(INTEL_ALDERLAKE_L, X86_STEP_MAX, RFDS),
- VULNBL_INTEL_TYPE(INTEL_RAPTORLAKE, ATOM, RFDS),
- VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_P, X86_STEP_MAX, RFDS),
- VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_S, X86_STEP_MAX, RFDS),
- VULNBL_INTEL_STEPS(INTEL_ATOM_GRACEMONT, X86_STEP_MAX, RFDS),
+ VULNBL_INTEL_TYPE(INTEL_ALDERLAKE, ATOM, RFDS | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_ALDERLAKE, X86_STEP_MAX, VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_ALDERLAKE_L, X86_STEP_MAX, RFDS | VMSCAPE),
+ VULNBL_INTEL_TYPE(INTEL_RAPTORLAKE, ATOM, RFDS | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE, X86_STEP_MAX, VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_P, X86_STEP_MAX, RFDS | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_S, X86_STEP_MAX, RFDS | VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_METEORLAKE_L, X86_STEP_MAX, VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_ARROWLAKE_H, X86_STEP_MAX, VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_ARROWLAKE, X86_STEP_MAX, VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_ARROWLAKE_U, X86_STEP_MAX, VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_LUNARLAKE_M, X86_STEP_MAX, VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_SAPPHIRERAPIDS_X, X86_STEP_MAX, VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_GRANITERAPIDS_X, X86_STEP_MAX, VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_EMERALDRAPIDS_X, X86_STEP_MAX, VMSCAPE),
+ VULNBL_INTEL_STEPS(INTEL_ATOM_GRACEMONT, X86_STEP_MAX, RFDS | VMSCAPE),
VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT, X86_STEP_MAX, MMIO | MMIO_SBDS | RFDS),
VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT_D, X86_STEP_MAX, MMIO | RFDS),
VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RFDS),
VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT, X86_STEP_MAX, RFDS),
VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT_D, X86_STEP_MAX, RFDS),
VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT_PLUS, X86_STEP_MAX, RFDS),
+ VULNBL_INTEL_STEPS(INTEL_ATOM_CRESTMONT_X, X86_STEP_MAX, VMSCAPE),
VULNBL_AMD(0x15, RETBLEED),
VULNBL_AMD(0x16, RETBLEED),
- VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
- VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
- VULNBL_AMD(0x19, SRSO | TSA),
- VULNBL_AMD(0x1a, SRSO),
+ VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO | VMSCAPE),
+ VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO | VMSCAPE),
+ VULNBL_AMD(0x19, SRSO | TSA | VMSCAPE),
+ VULNBL_AMD(0x1a, SRSO | VMSCAPE),
{}
};
@@ -1543,6 +1559,14 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
}
}
+ /*
+ * Set the bug only on bare-metal. A nested hypervisor should already be
+ * deploying IBPB to isolate itself from nested guests.
+ */
+ if (cpu_matches(cpu_vuln_blacklist, VMSCAPE) &&
+ !boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ setup_force_cpu_bug(X86_BUG_VMSCAPE);
+
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 604490b1cb19..706b6fd56d3c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -11011,6 +11011,15 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
wrmsrq(MSR_IA32_XFD_ERR, 0);
/*
+ * Mark this CPU as needing a branch predictor flush before running
+ * userspace. Must be done before enabling preemption to ensure it gets
+ * set for the CPU that actually ran the guest, and not the CPU that it
+ * may migrate to.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER))
+ this_cpu_write(x86_ibpb_exit_to_user, true);
+
+ /*
* Consume any pending interrupts, including the possible source of
* VM-Exit on SVM and any ticks that occur between VM-Exit and now.
* An instruction is required after local_irq_enable() to fully unblock
diff --git a/block/fops.c b/block/fops.c
index 82451ac8ff25..ddbc69c0922b 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/uio.h>
@@ -54,7 +55,6 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
struct bio bio;
ssize_t ret;
- WARN_ON_ONCE(iocb->ki_flags & IOCB_HAS_METADATA);
if (nr_pages <= DIO_INLINE_BIO_VECS)
vecs = inline_vecs;
else {
@@ -131,7 +131,7 @@ static void blkdev_bio_end_io(struct bio *bio)
if (bio->bi_status && !dio->bio.bi_status)
dio->bio.bi_status = bio->bi_status;
- if (!is_sync && (dio->iocb->ki_flags & IOCB_HAS_METADATA))
+ if (bio_integrity(bio))
bio_integrity_unmap_user(bio);
if (atomic_dec_and_test(&dio->ref)) {
@@ -233,7 +233,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
}
bio->bi_opf |= REQ_NOWAIT;
}
- if (!is_sync && (iocb->ki_flags & IOCB_HAS_METADATA)) {
+ if (iocb->ki_flags & IOCB_HAS_METADATA) {
ret = bio_integrity_map_iter(bio, iocb->private);
if (unlikely(ret))
goto fail;
@@ -301,7 +301,7 @@ static void blkdev_bio_end_io_async(struct bio *bio)
ret = blk_status_to_errno(bio->bi_status);
}
- if (iocb->ki_flags & IOCB_HAS_METADATA)
+ if (bio_integrity(bio))
bio_integrity_unmap_user(bio);
iocb->ki_complete(iocb, ret);
@@ -422,7 +422,8 @@ static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
}
nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
- if (likely(nr_pages <= BIO_MAX_VECS)) {
+ if (likely(nr_pages <= BIO_MAX_VECS &&
+ !(iocb->ki_flags & IOCB_HAS_METADATA))) {
if (is_sync_kiocb(iocb))
return __blkdev_direct_IO_simple(iocb, iter, bdev,
nr_pages);
@@ -687,6 +688,8 @@ static int blkdev_open(struct inode *inode, struct file *filp)
if (bdev_can_atomic_write(bdev))
filp->f_mode |= FMODE_CAN_ATOMIC_WRITE;
+ if (blk_get_integrity(bdev->bd_disk))
+ filp->f_mode |= FMODE_HAS_METADATA;
ret = bdev_open(bdev, mode, filp->private_data, NULL, filp);
if (ret)
diff --git a/crypto/sha1.c b/crypto/sha1.c
index ecef4bf2d9c0..4fbf61cf0370 100644
--- a/crypto/sha1.c
+++ b/crypto/sha1.c
@@ -49,6 +49,18 @@ static int __crypto_sha1_import(struct sha1_ctx *ctx, const void *in)
return 0;
}
+static int __crypto_sha1_export_core(const struct sha1_ctx *ctx, void *out)
+{
+ memcpy(out, ctx, offsetof(struct sha1_ctx, buf));
+ return 0;
+}
+
+static int __crypto_sha1_import_core(struct sha1_ctx *ctx, const void *in)
+{
+ memcpy(ctx, in, offsetof(struct sha1_ctx, buf));
+ return 0;
+}
+
const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = {
0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
@@ -94,6 +106,16 @@ static int crypto_sha1_import(struct shash_desc *desc, const void *in)
return __crypto_sha1_import(SHA1_CTX(desc), in);
}
+static int crypto_sha1_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha1_export_core(SHA1_CTX(desc), out);
+}
+
+static int crypto_sha1_import_core(struct shash_desc *desc, const void *in)
+{
+ return __crypto_sha1_import_core(SHA1_CTX(desc), in);
+}
+
#define HMAC_SHA1_KEY(tfm) ((struct hmac_sha1_key *)crypto_shash_ctx(tfm))
#define HMAC_SHA1_CTX(desc) ((struct hmac_sha1_ctx *)shash_desc_ctx(desc))
@@ -143,6 +165,19 @@ static int crypto_hmac_sha1_import(struct shash_desc *desc, const void *in)
return __crypto_sha1_import(&ctx->sha_ctx, in);
}
+static int crypto_hmac_sha1_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha1_export_core(&HMAC_SHA1_CTX(desc)->sha_ctx, out);
+}
+
+static int crypto_hmac_sha1_import_core(struct shash_desc *desc, const void *in)
+{
+ struct hmac_sha1_ctx *ctx = HMAC_SHA1_CTX(desc);
+
+ ctx->ostate = HMAC_SHA1_KEY(desc->tfm)->ostate;
+ return __crypto_sha1_import_core(&ctx->sha_ctx, in);
+}
+
static struct shash_alg algs[] = {
{
.base.cra_name = "sha1",
@@ -157,6 +192,8 @@ static struct shash_alg algs[] = {
.digest = crypto_sha1_digest,
.export = crypto_sha1_export,
.import = crypto_sha1_import,
+ .export_core = crypto_sha1_export_core,
+ .import_core = crypto_sha1_import_core,
.descsize = sizeof(struct sha1_ctx),
.statesize = SHA1_SHASH_STATE_SIZE,
},
@@ -175,6 +212,8 @@ static struct shash_alg algs[] = {
.digest = crypto_hmac_sha1_digest,
.export = crypto_hmac_sha1_export,
.import = crypto_hmac_sha1_import,
+ .export_core = crypto_hmac_sha1_export_core,
+ .import_core = crypto_hmac_sha1_import_core,
.descsize = sizeof(struct hmac_sha1_ctx),
.statesize = SHA1_SHASH_STATE_SIZE,
},
diff --git a/crypto/sha256.c b/crypto/sha256.c
index 052806559f06..fb81defe084c 100644
--- a/crypto/sha256.c
+++ b/crypto/sha256.c
@@ -50,6 +50,19 @@ static int __crypto_sha256_import(struct __sha256_ctx *ctx, const void *in)
return 0;
}
+static int __crypto_sha256_export_core(const struct __sha256_ctx *ctx,
+ void *out)
+{
+ memcpy(out, ctx, offsetof(struct __sha256_ctx, buf));
+ return 0;
+}
+
+static int __crypto_sha256_import_core(struct __sha256_ctx *ctx, const void *in)
+{
+ memcpy(ctx, in, offsetof(struct __sha256_ctx, buf));
+ return 0;
+}
+
/* SHA-224 */
const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = {
@@ -98,6 +111,16 @@ static int crypto_sha224_import(struct shash_desc *desc, const void *in)
return __crypto_sha256_import(&SHA224_CTX(desc)->ctx, in);
}
+static int crypto_sha224_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha256_export_core(&SHA224_CTX(desc)->ctx, out);
+}
+
+static int crypto_sha224_import_core(struct shash_desc *desc, const void *in)
+{
+ return __crypto_sha256_import_core(&SHA224_CTX(desc)->ctx, in);
+}
+
/* SHA-256 */
const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = {
@@ -146,6 +169,16 @@ static int crypto_sha256_import(struct shash_desc *desc, const void *in)
return __crypto_sha256_import(&SHA256_CTX(desc)->ctx, in);
}
+static int crypto_sha256_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha256_export_core(&SHA256_CTX(desc)->ctx, out);
+}
+
+static int crypto_sha256_import_core(struct shash_desc *desc, const void *in)
+{
+ return __crypto_sha256_import_core(&SHA256_CTX(desc)->ctx, in);
+}
+
/* HMAC-SHA224 */
#define HMAC_SHA224_KEY(tfm) ((struct hmac_sha224_key *)crypto_shash_ctx(tfm))
@@ -198,6 +231,21 @@ static int crypto_hmac_sha224_import(struct shash_desc *desc, const void *in)
return __crypto_sha256_import(&ctx->ctx.sha_ctx, in);
}
+static int crypto_hmac_sha224_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha256_export_core(&HMAC_SHA224_CTX(desc)->ctx.sha_ctx,
+ out);
+}
+
+static int crypto_hmac_sha224_import_core(struct shash_desc *desc,
+ const void *in)
+{
+ struct hmac_sha224_ctx *ctx = HMAC_SHA224_CTX(desc);
+
+ ctx->ctx.ostate = HMAC_SHA224_KEY(desc->tfm)->key.ostate;
+ return __crypto_sha256_import_core(&ctx->ctx.sha_ctx, in);
+}
+
/* HMAC-SHA256 */
#define HMAC_SHA256_KEY(tfm) ((struct hmac_sha256_key *)crypto_shash_ctx(tfm))
@@ -250,6 +298,21 @@ static int crypto_hmac_sha256_import(struct shash_desc *desc, const void *in)
return __crypto_sha256_import(&ctx->ctx.sha_ctx, in);
}
+static int crypto_hmac_sha256_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha256_export_core(&HMAC_SHA256_CTX(desc)->ctx.sha_ctx,
+ out);
+}
+
+static int crypto_hmac_sha256_import_core(struct shash_desc *desc,
+ const void *in)
+{
+ struct hmac_sha256_ctx *ctx = HMAC_SHA256_CTX(desc);
+
+ ctx->ctx.ostate = HMAC_SHA256_KEY(desc->tfm)->key.ostate;
+ return __crypto_sha256_import_core(&ctx->ctx.sha_ctx, in);
+}
+
/* Algorithm definitions */
static struct shash_alg algs[] = {
@@ -266,6 +329,8 @@ static struct shash_alg algs[] = {
.digest = crypto_sha224_digest,
.export = crypto_sha224_export,
.import = crypto_sha224_import,
+ .export_core = crypto_sha224_export_core,
+ .import_core = crypto_sha224_import_core,
.descsize = sizeof(struct sha224_ctx),
.statesize = SHA256_SHASH_STATE_SIZE,
},
@@ -282,6 +347,8 @@ static struct shash_alg algs[] = {
.digest = crypto_sha256_digest,
.export = crypto_sha256_export,
.import = crypto_sha256_import,
+ .export_core = crypto_sha256_export_core,
+ .import_core = crypto_sha256_import_core,
.descsize = sizeof(struct sha256_ctx),
.statesize = SHA256_SHASH_STATE_SIZE,
},
@@ -300,6 +367,8 @@ static struct shash_alg algs[] = {
.digest = crypto_hmac_sha224_digest,
.export = crypto_hmac_sha224_export,
.import = crypto_hmac_sha224_import,
+ .export_core = crypto_hmac_sha224_export_core,
+ .import_core = crypto_hmac_sha224_import_core,
.descsize = sizeof(struct hmac_sha224_ctx),
.statesize = SHA256_SHASH_STATE_SIZE,
},
@@ -318,6 +387,8 @@ static struct shash_alg algs[] = {
.digest = crypto_hmac_sha256_digest,
.export = crypto_hmac_sha256_export,
.import = crypto_hmac_sha256_import,
+ .export_core = crypto_hmac_sha256_export_core,
+ .import_core = crypto_hmac_sha256_import_core,
.descsize = sizeof(struct hmac_sha256_ctx),
.statesize = SHA256_SHASH_STATE_SIZE,
},
diff --git a/crypto/sha512.c b/crypto/sha512.c
index fb1c520978ef..d320fe53913f 100644
--- a/crypto/sha512.c
+++ b/crypto/sha512.c
@@ -50,6 +50,19 @@ static int __crypto_sha512_import(struct __sha512_ctx *ctx, const void *in)
return 0;
}
+static int __crypto_sha512_export_core(const struct __sha512_ctx *ctx,
+ void *out)
+{
+ memcpy(out, ctx, offsetof(struct __sha512_ctx, buf));
+ return 0;
+}
+
+static int __crypto_sha512_import_core(struct __sha512_ctx *ctx, const void *in)
+{
+ memcpy(ctx, in, offsetof(struct __sha512_ctx, buf));
+ return 0;
+}
+
/* SHA-384 */
const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE] = {
@@ -100,6 +113,16 @@ static int crypto_sha384_import(struct shash_desc *desc, const void *in)
return __crypto_sha512_import(&SHA384_CTX(desc)->ctx, in);
}
+static int crypto_sha384_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha512_export_core(&SHA384_CTX(desc)->ctx, out);
+}
+
+static int crypto_sha384_import_core(struct shash_desc *desc, const void *in)
+{
+ return __crypto_sha512_import_core(&SHA384_CTX(desc)->ctx, in);
+}
+
/* SHA-512 */
const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE] = {
@@ -152,6 +175,16 @@ static int crypto_sha512_import(struct shash_desc *desc, const void *in)
return __crypto_sha512_import(&SHA512_CTX(desc)->ctx, in);
}
+static int crypto_sha512_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha512_export_core(&SHA512_CTX(desc)->ctx, out);
+}
+
+static int crypto_sha512_import_core(struct shash_desc *desc, const void *in)
+{
+ return __crypto_sha512_import_core(&SHA512_CTX(desc)->ctx, in);
+}
+
/* HMAC-SHA384 */
#define HMAC_SHA384_KEY(tfm) ((struct hmac_sha384_key *)crypto_shash_ctx(tfm))
@@ -204,6 +237,21 @@ static int crypto_hmac_sha384_import(struct shash_desc *desc, const void *in)
return __crypto_sha512_import(&ctx->ctx.sha_ctx, in);
}
+static int crypto_hmac_sha384_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha512_export_core(&HMAC_SHA384_CTX(desc)->ctx.sha_ctx,
+ out);
+}
+
+static int crypto_hmac_sha384_import_core(struct shash_desc *desc,
+ const void *in)
+{
+ struct hmac_sha384_ctx *ctx = HMAC_SHA384_CTX(desc);
+
+ ctx->ctx.ostate = HMAC_SHA384_KEY(desc->tfm)->key.ostate;
+ return __crypto_sha512_import_core(&ctx->ctx.sha_ctx, in);
+}
+
/* HMAC-SHA512 */
#define HMAC_SHA512_KEY(tfm) ((struct hmac_sha512_key *)crypto_shash_ctx(tfm))
@@ -256,6 +304,21 @@ static int crypto_hmac_sha512_import(struct shash_desc *desc, const void *in)
return __crypto_sha512_import(&ctx->ctx.sha_ctx, in);
}
+static int crypto_hmac_sha512_export_core(struct shash_desc *desc, void *out)
+{
+ return __crypto_sha512_export_core(&HMAC_SHA512_CTX(desc)->ctx.sha_ctx,
+ out);
+}
+
+static int crypto_hmac_sha512_import_core(struct shash_desc *desc,
+ const void *in)
+{
+ struct hmac_sha512_ctx *ctx = HMAC_SHA512_CTX(desc);
+
+ ctx->ctx.ostate = HMAC_SHA512_KEY(desc->tfm)->key.ostate;
+ return __crypto_sha512_import_core(&ctx->ctx.sha_ctx, in);
+}
+
/* Algorithm definitions */
static struct shash_alg algs[] = {
@@ -272,6 +335,8 @@ static struct shash_alg algs[] = {
.digest = crypto_sha384_digest,
.export = crypto_sha384_export,
.import = crypto_sha384_import,
+ .export_core = crypto_sha384_export_core,
+ .import_core = crypto_sha384_import_core,
.descsize = sizeof(struct sha384_ctx),
.statesize = SHA512_SHASH_STATE_SIZE,
},
@@ -288,6 +353,8 @@ static struct shash_alg algs[] = {
.digest = crypto_sha512_digest,
.export = crypto_sha512_export,
.import = crypto_sha512_import,
+ .export_core = crypto_sha512_export_core,
+ .import_core = crypto_sha512_import_core,
.descsize = sizeof(struct sha512_ctx),
.statesize = SHA512_SHASH_STATE_SIZE,
},
@@ -306,6 +373,8 @@ static struct shash_alg algs[] = {
.digest = crypto_hmac_sha384_digest,
.export = crypto_hmac_sha384_export,
.import = crypto_hmac_sha384_import,
+ .export_core = crypto_hmac_sha384_export_core,
+ .import_core = crypto_hmac_sha384_import_core,
.descsize = sizeof(struct hmac_sha384_ctx),
.statesize = SHA512_SHASH_STATE_SIZE,
},
@@ -324,6 +393,8 @@ static struct shash_alg algs[] = {
.digest = crypto_hmac_sha512_digest,
.export = crypto_hmac_sha512_export,
.import = crypto_hmac_sha512_import,
+ .export_core = crypto_hmac_sha512_export_core,
+ .import_core = crypto_hmac_sha512_import_core,
.descsize = sizeof(struct hmac_sha512_ctx),
.statesize = SHA512_SHASH_STATE_SIZE,
},
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 3d6d52492536..3289751b4757 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -677,7 +677,7 @@ static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
static void ivpu_dev_fini(struct ivpu_device *vdev)
{
ivpu_jobs_abort_all(vdev);
- ivpu_pm_cancel_recovery(vdev);
+ ivpu_pm_disable_recovery(vdev);
ivpu_pm_disable(vdev);
ivpu_prepare_for_reset(vdev);
ivpu_shutdown(vdev);
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index eacda1dbe840..475ddc94f1cf 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -417,10 +417,10 @@ void ivpu_pm_init(struct ivpu_device *vdev)
ivpu_dbg(vdev, PM, "Autosuspend delay = %d\n", delay);
}
-void ivpu_pm_cancel_recovery(struct ivpu_device *vdev)
+void ivpu_pm_disable_recovery(struct ivpu_device *vdev)
{
drm_WARN_ON(&vdev->drm, delayed_work_pending(&vdev->pm->job_timeout_work));
- cancel_work_sync(&vdev->pm->recovery_work);
+ disable_work_sync(&vdev->pm->recovery_work);
}
void ivpu_pm_enable(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h
index 89b264cc0e3e..a2aa7a27f32e 100644
--- a/drivers/accel/ivpu/ivpu_pm.h
+++ b/drivers/accel/ivpu/ivpu_pm.h
@@ -25,7 +25,7 @@ struct ivpu_pm_info {
void ivpu_pm_init(struct ivpu_device *vdev);
void ivpu_pm_enable(struct ivpu_device *vdev);
void ivpu_pm_disable(struct ivpu_device *vdev);
-void ivpu_pm_cancel_recovery(struct ivpu_device *vdev);
+void ivpu_pm_disable_recovery(struct ivpu_device *vdev);
int ivpu_pm_suspend_cb(struct device *dev);
int ivpu_pm_resume_cb(struct device *dev);
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 98759d6199d3..65f0f56ad753 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -937,8 +937,10 @@ static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start,
new_sids = krealloc_array(sids, count + new_count,
sizeof(*new_sids), GFP_KERNEL);
- if (!new_sids)
+ if (!new_sids) {
+ kfree(sids);
return NULL;
+ }
for (i = count; i < total_count; i++)
new_sids[i] = id_start++;
diff --git a/drivers/acpi/riscv/cppc.c b/drivers/acpi/riscv/cppc.c
index 440cf9fb91aa..42c1a9052470 100644
--- a/drivers/acpi/riscv/cppc.c
+++ b/drivers/acpi/riscv/cppc.c
@@ -119,7 +119,7 @@ int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
*val = data.ret.value;
- return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ return data.ret.error;
}
return -EINVAL;
@@ -148,7 +148,7 @@ int cpc_write_ffh(int cpu, struct cpc_reg *reg, u64 val)
smp_call_function_single(cpu, cppc_ffh_csr_write, &data, 1);
- return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ return data.ret.error;
}
return -EINVAL;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index efc575a00edd..008da0354fba 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -603,6 +603,7 @@ CPU_SHOW_VULN_FALLBACK(ghostwrite);
CPU_SHOW_VULN_FALLBACK(old_microcode);
CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
CPU_SHOW_VULN_FALLBACK(tsa);
+CPU_SHOW_VULN_FALLBACK(vmscape);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -622,6 +623,7 @@ static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL);
static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
+static DEVICE_ATTR(vmscape, 0444, cpu_show_vmscape, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -642,6 +644,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_old_microcode.attr,
&dev_attr_indirect_target_selection.attr,
&dev_attr_tsa.attr,
+ &dev_attr_vmscape.attr,
NULL
};
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index bbc27ef9edf7..b4c79fde1979 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1554,13 +1554,15 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
pr_debug("CPU %d exiting\n", policy->cpu);
}
-static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
+static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy, bool policy_change)
{
struct amd_cpudata *cpudata = policy->driver_data;
union perf_cached perf;
u8 epp;
- if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
+ if (policy_change ||
+ policy->min != cpudata->min_limit_freq ||
+ policy->max != cpudata->max_limit_freq)
amd_pstate_update_min_max_limit(policy);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
@@ -1584,7 +1586,7 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
cpudata->policy = policy->policy;
- ret = amd_pstate_epp_update_limit(policy);
+ ret = amd_pstate_epp_update_limit(policy, true);
if (ret)
return ret;
@@ -1626,13 +1628,14 @@ static int amd_pstate_suspend(struct cpufreq_policy *policy)
* min_perf value across kexec reboots. If this CPU is just resumed back without kexec,
* the limits, epp and desired perf will get reset to the cached values in cpudata struct
*/
- ret = amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
+ ret = amd_pstate_update_perf(policy, perf.bios_min_perf,
+ FIELD_GET(AMD_CPPC_DES_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached),
+ false);
if (ret)
return ret;
- /* invalidate to ensure it's rewritten during resume */
- cpudata->cppc_req_cached = 0;
-
/* set this flag to avoid setting core offline*/
cpudata->suspended = true;
@@ -1658,7 +1661,7 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
int ret;
/* enable amd pstate from suspend state*/
- ret = amd_pstate_epp_update_limit(policy);
+ ret = amd_pstate_epp_update_limit(policy, false);
if (ret)
return ret;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index f366d35c5840..0d5d283a5429 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1034,8 +1034,8 @@ static bool hybrid_register_perf_domain(unsigned int cpu)
if (!cpu_dev)
return false;
- if (em_dev_register_perf_domain(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
- cpumask_of(cpu), false))
+ if (em_dev_register_pd_no_update(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
+ cpumask_of(cpu), false))
return false;
cpudata->pd_registered = true;
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index cae52c654a15..7685a8550d4b 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -128,7 +128,6 @@ static ssize_t altr_sdr_mc_err_inject_write(struct file *file,
ptemp = dma_alloc_coherent(mci->pdev, 16, &dma_handle, GFP_KERNEL);
if (!ptemp) {
- dma_free_coherent(mci->pdev, 16, ptemp, dma_handle);
edac_printk(KERN_ERR, EDAC_MC,
"Inject: Buffer Allocation error\n");
return -ENOMEM;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index e43abb322fa6..d8ac40d0eb6f 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -3,6 +3,9 @@
# GPIO infrastructure and drivers
#
+config GPIOLIB_LEGACY
+ def_bool y
+
menuconfig GPIOLIB
bool "GPIO Support"
help
@@ -12,9 +15,6 @@ menuconfig GPIOLIB
If unsure, say N.
-config GPIOLIB_LEGACY
- def_bool y
-
if GPIOLIB
config GPIOLIB_FASTPATH_LIMIT
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 23484317a5fa..693357caa9a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -448,7 +448,7 @@ static int psp_sw_init(struct amdgpu_ip_block *ip_block)
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!psp->cmd) {
dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
- ret = -ENOMEM;
+ return -ENOMEM;
}
adev->psp.xgmi_context.supports_extended_data =
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index bf7c22f81cda..ba73518f5cdf 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1462,17 +1462,12 @@ static int dce_v10_0_audio_init(struct amdgpu_device *adev)
static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
{
- int i;
-
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
adev->mode_info.audio.enabled = false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 47e05783c4a0..b01d88d078fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1511,17 +1511,12 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev)
static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
{
- int i;
-
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
adev->mode_info.audio.enabled = false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 276c025c4c03..81760a26f2ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1451,17 +1451,12 @@ static int dce_v6_0_audio_init(struct amdgpu_device *adev)
static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
{
- int i;
-
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
adev->mode_info.audio.enabled = false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index e62ccf9eb73d..19a265bd4d19 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1443,17 +1443,12 @@ static int dce_v8_0_audio_init(struct amdgpu_device *adev)
static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
{
- int i;
-
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
adev->mode_info.audio.enabled = false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 28eb846280dd..3f6a828cad8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -641,8 +641,9 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes,
break;
case MES_MISC_OP_CHANGE_CONFIG:
if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) < 0x63) {
- dev_err(mes->adev->dev, "MES FW version must be larger than 0x63 to support limit single process feature.\n");
- return -EINVAL;
+ dev_warn_once(mes->adev->dev,
+ "MES FW version must be larger than 0x63 to support limit single process feature.\n");
+ return 0;
}
misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG;
misc_pkt.change_config.opcode =
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index e6d8eddda2bf..db6e41967f12 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -1377,7 +1377,7 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
case IP_VERSION(6, 0, 0):
- if ((adev->sdma.instance[0].fw_version >= 24) && !adev->sdma.disable_uq)
+ if ((adev->sdma.instance[0].fw_version >= 27) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
case IP_VERSION(6, 0, 1):
@@ -1385,11 +1385,11 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
case IP_VERSION(6, 0, 2):
- if ((adev->sdma.instance[0].fw_version >= 21) && !adev->sdma.disable_uq)
+ if ((adev->sdma.instance[0].fw_version >= 23) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
case IP_VERSION(6, 0, 3):
- if ((adev->sdma.instance[0].fw_version >= 25) && !adev->sdma.disable_uq)
+ if ((adev->sdma.instance[0].fw_version >= 27) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
case IP_VERSION(6, 1, 0):
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a0ca3b2c6bd8..7808a647a306 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8381,8 +8381,7 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
drm_add_modes_noedid(connector, 1920, 1080);
} else {
amdgpu_dm_connector_ddc_get_modes(connector, drm_edid);
- if (encoder && (connector->connector_type != DRM_MODE_CONNECTOR_eDP) &&
- (connector->connector_type != DRM_MODE_CONNECTOR_LVDS))
+ if (encoder)
amdgpu_dm_connector_add_common_modes(encoder, connector);
amdgpu_dm_connector_add_freesync_modes(connector, drm_edid);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
index 75fb77bca83b..01480a04f85e 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
@@ -520,6 +520,15 @@ void dpp1_dppclk_control(
REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 0);
}
+void dpp_force_disable_cursor(struct dpp *dpp_base)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ /* Force disable cursor */
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, 0);
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = 0;
+}
+
static const struct dpp_funcs dcn10_dpp_funcs = {
.dpp_read_state = dpp_read_state,
.dpp_reset = dpp_reset,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
index c48139bed11f..f466182963f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
@@ -1525,4 +1525,6 @@ void dpp1_construct(struct dcn10_dpp *dpp1,
void dpp1_cm_get_gamut_remap(struct dpp *dpp_base,
struct dpp_grph_csc_adjustment *adjust);
+void dpp_force_disable_cursor(struct dpp *dpp_base);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index 2d70586cef40..09be2a90cc79 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -1494,6 +1494,7 @@ static struct dpp_funcs dcn30_dpp_funcs = {
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
.dpp_get_gamut_remap = dpp3_cm_get_gamut_remap,
+ .dpp_force_disable_cursor = dpp_force_disable_cursor,
};
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index e68f21fd5f0f..560984533950 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -528,3 +528,75 @@ void dcn314_disable_link_output(struct dc_link *link,
apply_symclk_on_tx_off_wa(link);
}
+
+/**
+ * dcn314_dpp_pg_control - DPP power gate control.
+ *
+ * @hws: dce_hwseq reference.
+ * @dpp_inst: DPP instance reference.
+ * @power_on: true if we want to enable power gate, false otherwise.
+ *
+ * Enable or disable power gate in the specific DPP instance.
+ * If power gating is disabled, will force disable cursor in the DPP instance.
+ */
+void dcn314_dpp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on)
+{
+ uint32_t power_gate = power_on ? 0 : 1;
+ uint32_t pwr_status = power_on ? 0 : 2;
+
+
+ if (hws->ctx->dc->debug.disable_dpp_power_gate) {
+ /* Workaround for DCN314 with disabled power gating */
+ if (!power_on) {
+
+ /* Force disable cursor if power gating is disabled */
+ struct dpp *dpp = hws->ctx->dc->res_pool->dpps[dpp_inst];
+ if (dpp && dpp->funcs->dpp_force_disable_cursor)
+ dpp->funcs->dpp_force_disable_cursor(dpp);
+ }
+ return;
+ }
+ if (REG(DOMAIN1_PG_CONFIG) == 0)
+ return;
+
+ switch (dpp_inst) {
+ case 0: /* DPP0 */
+ REG_UPDATE(DOMAIN1_PG_CONFIG,
+ DOMAIN1_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN1_PG_STATUS,
+ DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 1: /* DPP1 */
+ REG_UPDATE(DOMAIN3_PG_CONFIG,
+ DOMAIN3_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN3_PG_STATUS,
+ DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 2: /* DPP2 */
+ REG_UPDATE(DOMAIN5_PG_CONFIG,
+ DOMAIN5_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN5_PG_STATUS,
+ DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 3: /* DPP3 */
+ REG_UPDATE(DOMAIN7_PG_CONFIG,
+ DOMAIN7_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN7_PG_STATUS,
+ DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
index 2305ad282f21..6c072d0274ea 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
@@ -47,4 +47,6 @@ void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst,
void dcn314_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal);
+void dcn314_dpp_pg_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on);
+
#endif /* __DC_HWSS_DCN314_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
index f5112742edf9..9f454fa90e65 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
@@ -141,6 +141,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
.enable_power_gating_plane = dcn314_enable_power_gating_plane,
.dpp_root_clock_control = dcn314_dpp_root_clock_control,
.hubp_pg_control = dcn31_hubp_pg_control,
+ .dpp_pg_control = dcn314_dpp_pg_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn314_update_odm,
.dsc_pg_control = dcn314_dsc_pg_control,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 0c5675d1c593..1b7c085dc2cc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -349,6 +349,9 @@ struct dpp_funcs {
struct dpp *dpp_base,
enum dc_color_space color_space,
struct dc_csc_transform cursor_csc_color_matrix);
+
+ void (*dpp_force_disable_cursor)(struct dpp *dpp_base);
+
};
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 464390372b34..ae0d08e5e960 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -393,6 +393,17 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
gpiod_set_value_cansleep(pdata->enable_gpio, 1);
/*
+ * After EN is deasserted and an external clock is detected, the bridge
+ * will sample GPIO3:1 to determine its frequency. The driver will
+ * overwrite this setting in ti_sn_bridge_set_refclk_freq(). But this is
+ * racy. Thus we have to wait a couple of us. According to the datasheet
+ * the GPIO lines has to be stable at least 5 us (td5) but it seems that
+ * is not enough and the refclk frequency value is still lost or
+ * overwritten by the bridge itself. Waiting for 20us seems to work.
+ */
+ usleep_range(20, 30);
+
+ /*
* If we have a reference clock we can enable communication w/ the
* panel (including the aux channel) w/out any need for an input clock
* so we can do it in resume which lets us read the EDID before
diff --git a/drivers/gpu/drm/nouveau/gv100_fence.c b/drivers/gpu/drm/nouveau/gv100_fence.c
index cccdeca72002..317e516c4ec7 100644
--- a/drivers/gpu/drm/nouveau/gv100_fence.c
+++ b/drivers/gpu/drm/nouveau/gv100_fence.c
@@ -18,7 +18,7 @@ gv100_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
struct nvif_push *push = &chan->chan.push;
int ret;
- ret = PUSH_WAIT(push, 8);
+ ret = PUSH_WAIT(push, 13);
if (ret)
return ret;
@@ -32,6 +32,11 @@ gv100_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
NVDEF(NVC36F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT) |
NVDEF(NVC36F, SEM_EXECUTE, RELEASE_TIMESTAMP, DIS));
+ PUSH_MTHD(push, NVC36F, MEM_OP_A, 0,
+ MEM_OP_B, 0,
+ MEM_OP_C, NVDEF(NVC36F, MEM_OP_C, MEMBAR_TYPE, SYS_MEMBAR),
+ MEM_OP_D, NVDEF(NVC36F, MEM_OP_D, OPERATION, MEMBAR));
+
PUSH_MTHD(push, NVC36F, NON_STALL_INTERRUPT, 0);
PUSH_KICK(push);
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h
index 8735dda4c8a7..338f74b9f501 100644
--- a/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h
@@ -7,6 +7,91 @@
#define NVC36F_NON_STALL_INTERRUPT (0x00000020)
#define NVC36F_NON_STALL_INTERRUPT_HANDLE 31:0
+// NOTE - MEM_OP_A and MEM_OP_B have been replaced in gp100 with methods for
+// specifying the page address for a targeted TLB invalidate and the uTLB for
+// a targeted REPLAY_CANCEL for UVM.
+// The previous MEM_OP_A/B functionality is in MEM_OP_C/D, with slightly
+// rearranged fields.
+#define NVC36F_MEM_OP_A (0x00000028)
+#define NVC36F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_CLIENT_UNIT_ID 5:0 // only relevant for REPLAY_CANCEL_TARGETED
+#define NVC36F_MEM_OP_A_TLB_INVALIDATE_INVALIDATION_SIZE 5:0 // Used to specify size of invalidate, used for invalidates which are not of the REPLAY_CANCEL_TARGETED type
+#define NVC36F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_GPC_ID 10:6 // only relevant for REPLAY_CANCEL_TARGETED
+#define NVC36F_MEM_OP_A_TLB_INVALIDATE_CANCEL_MMU_ENGINE_ID 6:0 // only relevant for REPLAY_CANCEL_VA_GLOBAL
+#define NVC36F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR 11:11
+#define NVC36F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_EN 0x00000001
+#define NVC36F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_DIS 0x00000000
+#define NVC36F_MEM_OP_A_TLB_INVALIDATE_TARGET_ADDR_LO 31:12
+#define NVC36F_MEM_OP_B (0x0000002c)
+#define NVC36F_MEM_OP_B_TLB_INVALIDATE_TARGET_ADDR_HI 31:0
+#define NVC36F_MEM_OP_C (0x00000030)
+#define NVC36F_MEM_OP_C_MEMBAR_TYPE 2:0
+#define NVC36F_MEM_OP_C_MEMBAR_TYPE_SYS_MEMBAR 0x00000000
+#define NVC36F_MEM_OP_C_MEMBAR_TYPE_MEMBAR 0x00000001
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 // Probably nonsensical for MMU_TLB_INVALIDATE_TARGETED
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_GPC 1:1
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE 0x00000000
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY 4:2 // only relevant if GPC ENABLE
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE 0x00000000
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START 0x00000001
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_VA_GLOBAL 0x00000005
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE 6:5 // only relevant if GPC ENABLE
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_NONE 0x00000000
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_GLOBALLY 0x00000001
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_INTRANODE 0x00000002
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE 9:7 //only relevant for REPLAY_CANCEL_VA_GLOBAL
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_READ 0
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE 1
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_STRONG 2
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_RSVRVD 3
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_WEAK 4
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_ALL 5
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE_AND_ATOMIC 6
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ALL 7
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL 9:7 // Invalidate affects this level and all below
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_ALL 0x00000000 // Invalidate tlb caches at all levels of the page table
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_PTE_ONLY 0x00000001
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE0 0x00000002
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE1 0x00000003
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE2 0x00000004
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 0x00000005
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE4 0x00000006
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE5 0x00000007
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE 11:10 // only relevant if PDB_ONE
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_VID_MEM 0x00000000
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_COHERENT 0x00000002
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_NONCOHERENT 0x00000003
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_ADDR_LO 31:12 // only relevant if PDB_ONE
+#define NVC36F_MEM_OP_C_ACCESS_COUNTER_CLR_TARGETED_NOTIFY_TAG 19:0
+// MEM_OP_D MUST be preceded by MEM_OPs A-C.
+#define NVC36F_MEM_OP_D (0x00000034)
+#define NVC36F_MEM_OP_D_TLB_INVALIDATE_PDB_ADDR_HI 26:0 // only relevant if PDB_ONE
+#define NVC36F_MEM_OP_D_OPERATION 31:27
+#define NVC36F_MEM_OP_D_OPERATION_MEMBAR 0x00000005
+#define NVC36F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009
+#define NVC36F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE_TARGETED 0x0000000a
+#define NVC36F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d
+#define NVC36F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e
+// CLEAN_LINES is an alias for Tegra/GPU IP usage
+#define NVC36F_MEM_OP_B_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e
+#define NVC36F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f
+#define NVC36F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010
+#define NVC36F_MEM_OP_D_OPERATION_L2_WAIT_FOR_SYS_PENDING_READS 0x00000015
+#define NVC36F_MEM_OP_D_OPERATION_ACCESS_COUNTER_CLR 0x00000016
+#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE 1:0
+#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MIMC 0x00000000
+#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MOMC 0x00000001
+#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_ALL 0x00000002
+#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_TARGETED 0x00000003
+#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE 2:2
+#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MIMC 0x00000000
+#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MOMC 0x00000001
+#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_BANK 6:3
#define NVC36F_SEM_ADDR_LO (0x0000005c)
#define NVC36F_SEM_ADDR_LO_OFFSET 31:2
#define NVC36F_SEM_ADDR_HI (0x00000060)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index fdffa0391b31..6fd4e60634fb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -350,6 +350,8 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
nvkm_chid_unref(&fifo->chid);
nvkm_event_fini(&fifo->nonstall.event);
+ if (fifo->func->nonstall_dtor)
+ fifo->func->nonstall_dtor(fifo);
mutex_destroy(&fifo->mutex);
if (fifo->func->dtor)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
index e74493a4569e..6848a56f20c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
@@ -517,19 +517,11 @@ ga100_fifo_nonstall_intr(struct nvkm_inth *inth)
static void
ga100_fifo_nonstall_block(struct nvkm_event *event, int type, int index)
{
- struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
- struct nvkm_runl *runl = nvkm_runl_get(fifo, index, 0);
-
- nvkm_inth_block(&runl->nonstall.inth);
}
static void
ga100_fifo_nonstall_allow(struct nvkm_event *event, int type, int index)
{
- struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
- struct nvkm_runl *runl = nvkm_runl_get(fifo, index, 0);
-
- nvkm_inth_allow(&runl->nonstall.inth);
}
const struct nvkm_event_func
@@ -564,12 +556,26 @@ ga100_fifo_nonstall_ctor(struct nvkm_fifo *fifo)
if (ret)
return ret;
+ nvkm_inth_allow(&runl->nonstall.inth);
+
nr = max(nr, runl->id + 1);
}
return nr;
}
+void
+ga100_fifo_nonstall_dtor(struct nvkm_fifo *fifo)
+{
+ struct nvkm_runl *runl;
+
+ nvkm_runl_foreach(runl, fifo) {
+ if (runl->nonstall.vector < 0)
+ continue;
+ nvkm_inth_block(&runl->nonstall.inth);
+ }
+}
+
int
ga100_fifo_runl_ctor(struct nvkm_fifo *fifo)
{
@@ -599,6 +605,7 @@ ga100_fifo = {
.runl_ctor = ga100_fifo_runl_ctor,
.mmu_fault = &tu102_fifo_mmu_fault,
.nonstall_ctor = ga100_fifo_nonstall_ctor,
+ .nonstall_dtor = ga100_fifo_nonstall_dtor,
.nonstall = &ga100_fifo_nonstall,
.runl = &ga100_runl,
.runq = &ga100_runq,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
index 755235f55b3a..18a0b1f4eab7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
@@ -30,6 +30,7 @@ ga102_fifo = {
.runl_ctor = ga100_fifo_runl_ctor,
.mmu_fault = &tu102_fifo_mmu_fault,
.nonstall_ctor = ga100_fifo_nonstall_ctor,
+ .nonstall_dtor = ga100_fifo_nonstall_dtor,
.nonstall = &ga100_fifo_nonstall,
.runl = &ga100_runl,
.runq = &ga100_runq,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index 5e81ae195329..fff1428ef267 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -41,6 +41,7 @@ struct nvkm_fifo_func {
void (*start)(struct nvkm_fifo *, unsigned long *);
int (*nonstall_ctor)(struct nvkm_fifo *);
+ void (*nonstall_dtor)(struct nvkm_fifo *);
const struct nvkm_event_func *nonstall;
const struct nvkm_runl_func *runl;
@@ -200,6 +201,7 @@ u32 tu102_chan_doorbell_handle(struct nvkm_chan *);
int ga100_fifo_runl_ctor(struct nvkm_fifo *);
int ga100_fifo_nonstall_ctor(struct nvkm_fifo *);
+void ga100_fifo_nonstall_dtor(struct nvkm_fifo *);
extern const struct nvkm_event_func ga100_fifo_nonstall;
extern const struct nvkm_runl_func ga100_runl;
extern const struct nvkm_runq_func ga100_runq;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c
index 1ac5628c5140..4ed54b386a60 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c
@@ -601,6 +601,7 @@ r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device,
rm->chan.func = &r535_chan;
rm->nonstall = &ga100_fifo_nonstall;
rm->nonstall_ctor = ga100_fifo_nonstall_ctor;
+ rm->nonstall_dtor = ga100_fifo_nonstall_dtor;
return nvkm_fifo_new_(rm, device, type, inst, pfifo);
}
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 8867b95ab089..3d06f72531ba 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -391,7 +391,8 @@ EXPORT_SYMBOL(drm_sched_entity_set_priority);
* Add a callback to the current dependency of the entity to wake up the
* scheduler when the entity becomes available.
*/
-static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
+static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity,
+ struct drm_sched_job *sched_job)
{
struct drm_gpu_scheduler *sched = entity->rq->sched;
struct dma_fence *fence = entity->dependency;
@@ -421,6 +422,10 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
entity->dependency = fence;
}
+ if (trace_drm_sched_job_unschedulable_enabled() &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &entity->dependency->flags))
+ trace_drm_sched_job_unschedulable(sched_job, entity->dependency);
+
if (!dma_fence_add_callback(entity->dependency, &entity->cb,
drm_sched_entity_wakeup))
return true;
@@ -461,10 +466,8 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
while ((entity->dependency =
drm_sched_job_dependency(sched_job, entity))) {
- if (drm_sched_entity_add_dependency_cb(entity)) {
- trace_drm_sched_job_unschedulable(sched_job, entity->dependency);
+ if (drm_sched_entity_add_dependency_cb(entity, sched_job))
return NULL;
- }
}
/* skip jobs from entity that marked guilty */
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 1be2415966df..9954bb458ce1 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -819,8 +819,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
return ret;
}
- tt_has_data = ttm && (ttm_tt_is_populated(ttm) ||
- (ttm->page_flags & TTM_TT_FLAG_SWAPPED));
+ tt_has_data = ttm && (ttm_tt_is_populated(ttm) || ttm_tt_is_swapped(ttm));
move_lacks_source = !old_mem || (handle_system_ccs ? (!bo->ccs_cleared) :
(!mem_type_is_vram(old_mem_type) && !tt_has_data));
diff --git a/drivers/hwmon/ina238.c b/drivers/hwmon/ina238.c
index 5a394eeff676..59a2c8889fa2 100644
--- a/drivers/hwmon/ina238.c
+++ b/drivers/hwmon/ina238.c
@@ -379,7 +379,7 @@ static int ina238_write_in(struct device *dev, u32 attr, int channel,
regval = clamp_val(val, -163, 163);
regval = (regval * 1000 * 4) /
(INA238_SHUNT_VOLTAGE_LSB * data->gain);
- regval = clamp_val(regval, S16_MIN, S16_MAX);
+ regval = clamp_val(regval, S16_MIN, S16_MAX) & 0xffff;
switch (attr) {
case hwmon_in_max:
@@ -517,9 +517,10 @@ static int ina238_write_power(struct device *dev, u32 attr, long val)
* Unsigned postive values. Compared against the 24-bit power register,
* lower 8-bits are truncated. Same conversion to/from uW as POWER
* register.
+ * The first clamp_val() is to establish a baseline to avoid overflows.
*/
- regval = clamp_val(val, 0, LONG_MAX);
- regval = div_u64(val * 4 * 100 * data->rshunt, data->config->power_calculate_factor *
+ regval = clamp_val(val, 0, LONG_MAX / 2);
+ regval = div_u64(regval * 4 * 100 * data->rshunt, data->config->power_calculate_factor *
1000ULL * INA238_FIXED_SHUNT * data->gain);
regval = clamp_val(regval >> 8, 0, U16_MAX);
@@ -572,7 +573,7 @@ static int ina238_write_temp(struct device *dev, u32 attr, long val)
return -EOPNOTSUPP;
/* Signed */
- regval = clamp_val(val, -40000, 125000);
+ val = clamp_val(val, -40000, 125000);
regval = div_s64(val * 10000, data->config->temp_lsb) << data->config->temp_shift;
regval = clamp_val(regval, S16_MIN, S16_MAX) & (0xffff << data->config->temp_shift);
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index a5f89aab3fb4..c25a54d5b39a 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -561,15 +561,14 @@ static int mlxreg_fan_cooling_config(struct device *dev, struct mlxreg_fan *fan)
if (!pwm->connected)
continue;
pwm->fan = fan;
+ /* Set minimal PWM speed. */
+ pwm->last_hwmon_state = MLXREG_FAN_PWM_DUTY2STATE(MLXREG_FAN_MIN_DUTY);
pwm->cdev = devm_thermal_of_cooling_device_register(dev, NULL, mlxreg_fan_name[i],
pwm, &mlxreg_fan_cooling_ops);
if (IS_ERR(pwm->cdev)) {
dev_err(dev, "Failed to register cooling device\n");
return PTR_ERR(pwm->cdev);
}
-
- /* Set minimal PWM speed. */
- pwm->last_hwmon_state = MLXREG_FAN_PWM_DUTY2STATE(MLXREG_FAN_MIN_DUTY);
}
return 0;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index a7f89946dad4..e94ac746a741 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1052,7 +1052,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5) },
{ PCI_DEVICE_DATA(INTEL, ARROW_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
diff --git a/drivers/i2c/busses/i2c-rtl9300.c b/drivers/i2c/busses/i2c-rtl9300.c
index cfafe089102a..9e1f71fed0fe 100644
--- a/drivers/i2c/busses/i2c-rtl9300.c
+++ b/drivers/i2c/busses/i2c-rtl9300.c
@@ -99,6 +99,9 @@ static int rtl9300_i2c_config_xfer(struct rtl9300_i2c *i2c, struct rtl9300_i2c_c
{
u32 val, mask;
+ if (len < 1 || len > 16)
+ return -EINVAL;
+
val = chan->bus_freq << RTL9300_I2C_MST_CTRL2_SCL_FREQ_OFS;
mask = RTL9300_I2C_MST_CTRL2_SCL_FREQ_MASK;
@@ -222,15 +225,6 @@ static int rtl9300_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned s
}
switch (size) {
- case I2C_SMBUS_QUICK:
- ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 0);
- if (ret)
- goto out_unlock;
- ret = rtl9300_i2c_reg_addr_set(i2c, 0, 0);
- if (ret)
- goto out_unlock;
- break;
-
case I2C_SMBUS_BYTE:
if (read_write == I2C_SMBUS_WRITE) {
ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 0);
@@ -312,9 +306,9 @@ out_unlock:
static u32 rtl9300_i2c_func(struct i2c_adapter *a)
{
- return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
- I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
- I2C_FUNC_SMBUS_BLOCK_DATA;
+ return I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK;
}
static const struct i2c_algorithm rtl9300_i2c_algo = {
@@ -323,7 +317,7 @@ static const struct i2c_algorithm rtl9300_i2c_algo = {
};
static struct i2c_adapter_quirks rtl9300_i2c_quirks = {
- .flags = I2C_AQ_NO_CLK_STRETCH,
+ .flags = I2C_AQ_NO_CLK_STRETCH | I2C_AQ_NO_ZERO_LEN,
.max_read_len = 16,
.max_write_len = 16,
};
@@ -353,7 +347,7 @@ static int rtl9300_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2c);
- if (device_get_child_node_count(dev) >= RTL9300_I2C_MUX_NCHAN)
+ if (device_get_child_node_count(dev) > RTL9300_I2C_MUX_NCHAN)
return dev_err_probe(dev, -EINVAL, "Too many channels\n");
device_for_each_child_node(dev, child) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 1baaf52c603c..4e033c26fdd4 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -9125,6 +9125,11 @@ void md_do_sync(struct md_thread *thread)
}
action = md_sync_action(mddev);
+ if (action == ACTION_FROZEN || action == ACTION_IDLE) {
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ goto skip;
+ }
+
desc = md_sync_action_name(action);
mddev->last_sync_action = action;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 408c26398321..bf44878ec640 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1225,7 +1225,7 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
int i = 0;
struct bio *behind_bio = NULL;
- behind_bio = bio_alloc_bioset(NULL, vcnt, 0, GFP_NOIO,
+ behind_bio = bio_alloc_bioset(NULL, vcnt, bio->bi_opf, GFP_NOIO,
&r1_bio->mddev->bio_set);
/* discard op, we don't support writezero/writesame yet */
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 64e664f5adcc..87c134bcd48d 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -861,7 +861,6 @@ static int rcar_can_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct rcar_can_priv *priv = netdev_priv(ndev);
- u16 ctlr;
int err;
if (!netif_running(ndev))
@@ -873,12 +872,7 @@ static int rcar_can_resume(struct device *dev)
return err;
}
- ctlr = readw(&priv->regs->ctlr);
- ctlr &= ~RCAR_CAN_CTLR_SLPM;
- writew(ctlr, &priv->regs->ctlr);
- ctlr &= ~RCAR_CAN_CTLR_CANM;
- writew(ctlr, &priv->regs->ctlr);
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ rcar_can_start(ndev);
netif_device_attach(ndev);
netif_start_queue(ndev);
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 81baec8eb1e5..a25a3ca62c12 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -690,14 +690,6 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
dlc |= XCAN_DLCR_EDL_MASK;
}
- if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
- (priv->devtype.flags & XCAN_FLAG_TXFEMP))
- can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
- else
- can_put_echo_skb(skb, ndev, 0, 0);
-
- priv->tx_head++;
-
priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
/* If the CAN frame is RTR frame this write triggers transmission
* (not on CAN FD)
@@ -730,6 +722,14 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
data[1]);
}
}
+
+ if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
+ (priv->devtype.flags & XCAN_FLAG_TXFEMP))
+ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
+ else
+ can_put_echo_skb(skb, ndev, 0, 0);
+
+ priv->tx_head++;
}
/**
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 829b1f087e9e..2f846381d5a7 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1273,9 +1273,15 @@ static int b53_setup(struct dsa_switch *ds)
*/
ds->untag_vlan_aware_bridge_pvid = true;
- /* Ageing time is set in seconds */
- ds->ageing_time_min = 1 * 1000;
- ds->ageing_time_max = AGE_TIME_MAX * 1000;
+ if (dev->chip_id == BCM53101_DEVICE_ID) {
+ /* BCM53101 uses 0.5 second increments */
+ ds->ageing_time_min = 1 * 500;
+ ds->ageing_time_max = AGE_TIME_MAX * 500;
+ } else {
+ /* Everything else uses 1 second increments */
+ ds->ageing_time_min = 1 * 1000;
+ ds->ageing_time_max = AGE_TIME_MAX * 1000;
+ }
ret = b53_reset_switch(dev);
if (ret) {
@@ -2559,7 +2565,10 @@ int b53_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
else
reg = B53_AGING_TIME_CONTROL;
- atc = DIV_ROUND_CLOSEST(msecs, 1000);
+ if (dev->chip_id == BCM53101_DEVICE_ID)
+ atc = DIV_ROUND_CLOSEST(msecs, 500);
+ else
+ atc = DIV_ROUND_CLOSEST(msecs, 1000);
if (!is5325(dev) && !is5365(dev))
atc |= AGE_CHANGE;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1383918f8a3f..adf1f2bbcbb1 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2363,7 +2363,8 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
*/
phy_dev = of_phy_find_device(fep->phy_node);
phy_reset_after_clk_enable(phy_dev);
- put_device(&phy_dev->mdio.dev);
+ if (phy_dev)
+ put_device(&phy_dev->mdio.dev);
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 76d872b91a38..cc02a85ad42b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1561,6 +1561,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
struct i40e_aq_set_mac_config {
__le16 max_frame_size;
u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN BIT(2)
u8 tx_timer_priority; /* bitmap */
__le16 tx_timer_value;
__le16 fc_refresh_threshold;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 270e7e8cf9cf..59f5c1e810eb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1190,6 +1190,40 @@ int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
}
/**
+ * i40e_aq_set_mac_config - Configure MAC settings
+ * @hw: pointer to the hw struct
+ * @max_frame_size: Maximum Frame Size to be supported by the port
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set MAC configuration (0x0603). Note that max_frame_size must be greater
+ * than zero.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+int i40e_aq_set_mac_config(struct i40e_hw *hw, u16 max_frame_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_set_mac_config *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = libie_aq_raw(&desc);
+
+ if (max_frame_size == 0)
+ return -EINVAL;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_mac_config);
+
+ cmd->max_frame_size = cpu_to_le16(max_frame_size);
+ cmd->params = I40E_AQ_SET_MAC_CONFIG_CRC_EN;
+
+#define I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD 0x7FFF
+ cmd->fc_refresh_threshold =
+ cpu_to_le16(I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD);
+
+ return i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+}
+
+/**
* i40e_aq_clear_pxe_mode
* @hw: pointer to the hw struct
* @cmd_details: pointer to command details structure or NULL
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b83f823e4917..b14019d44b58 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4156,7 +4156,7 @@ free_queue_irqs:
irq_num = pf->msix_entries[base + vector].vector;
irq_set_affinity_notifier(irq_num, NULL);
irq_update_affinity_hint(irq_num, NULL);
- free_irq(irq_num, &vsi->q_vectors[vector]);
+ free_irq(irq_num, vsi->q_vectors[vector]);
}
return err;
}
@@ -16045,13 +16045,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n",
ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
- /* make sure the MFS hasn't been set lower than the default */
#define MAX_FRAME_SIZE_DEFAULT 0x2600
- val = FIELD_GET(I40E_PRTGL_SAH_MFS_MASK,
- rd32(&pf->hw, I40E_PRTGL_SAH));
- if (val < MAX_FRAME_SIZE_DEFAULT)
- dev_warn(&pdev->dev, "MFS for port %x (%d) has been set below the default (%d)\n",
- pf->hw.port, val, MAX_FRAME_SIZE_DEFAULT);
+
+ err = i40e_aq_set_mac_config(hw, MAX_FRAME_SIZE_DEFAULT, NULL);
+ if (err)
+ dev_warn(&pdev->dev, "set mac config ret = %pe last_status = %s\n",
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
+
+ /* Make sure the MFS is set to the expected value */
+ val = rd32(hw, I40E_PRTGL_SAH);
+ FIELD_MODIFY(I40E_PRTGL_SAH_MFS_MASK, &val, MAX_FRAME_SIZE_DEFAULT);
+ wr32(hw, I40E_PRTGL_SAH, val);
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index aef5de53ce3b..26bb7bffe361 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -98,6 +98,8 @@ int i40e_aq_set_mac_loopback(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_mac_config(struct i40e_hw *hw, u16 max_frame_size,
+ struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 51d5cb6599ed..f8a208c84f15 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2081,11 +2081,8 @@ static void igb_diag_test(struct net_device *netdev,
} else {
dev_info(&adapter->pdev->dev, "online testing starting\n");
- /* PHY is powered down when interface is down */
- if (if_running && igb_link_test(adapter, &data[TEST_LINK]))
+ if (igb_link_test(adapter, &data[TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- else
- data[TEST_LINK] = 0;
/* Online tests aren't run; pass by default */
data[TEST_REG] = 0;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 5e63d7f6a568..85f9589cc568 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -4452,8 +4452,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
- rx_ring->queue_index,
- rx_ring->q_vector->napi.napi_id);
+ rx_ring->queue_index, 0);
if (res < 0) {
dev_err(dev, "Failed to register xdp_rxq index %u\n",
rx_ring->queue_index);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 7351e98d73f4..89ccb8eb82c7 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2984,6 +2984,13 @@ out:
if (txq->count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
+ /* This is not really the true transmit point, since we batch
+ * up several before hitting the hardware, but is the best we
+ * can do without more complexity to walk the packets in the
+ * pending section of the transmit queue.
+ */
+ skb_tx_timestamp(skb);
+
if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
mvneta_txq_pend_desc_add(pp, txq, frags);
@@ -5356,6 +5363,7 @@ static const struct ethtool_ops mvneta_eth_tool_ops = {
.set_link_ksettings = mvneta_ethtool_set_link_ksettings,
.get_wol = mvneta_ethtool_get_wol,
.set_wol = mvneta_ethtool_set_wol,
+ .get_ts_info = ethtool_op_get_ts_info,
.get_eee = mvneta_ethtool_get_eee,
.set_eee = mvneta_ethtool_set_eee,
};
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index fe136f61586f..272c83bfdc6c 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -58,7 +58,7 @@ config 8139TOO
config 8139TOO_PIO
bool "Use PIO instead of MMIO"
default y
- depends on 8139TOO
+ depends on 8139TOO && !NO_IOPORT_MAP
help
This instructs the driver to use programmed I/O ports (PIO) instead
of PCI shared memory (MMIO). This can possibly solve some problems
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
index 4c3e8cc5046f..d0979abd36de 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
@@ -12,19 +12,18 @@
#include <linux/slab.h>
#include "rcar_gen4_ptp.h"
-#define ptp_to_priv(ptp) container_of(ptp, struct rcar_gen4_ptp_private, info)
-static const struct rcar_gen4_ptp_reg_offset gen4_offs = {
- .enable = PTPTMEC,
- .disable = PTPTMDC,
- .increment = PTPTIVC0,
- .config_t0 = PTPTOVC00,
- .config_t1 = PTPTOVC10,
- .config_t2 = PTPTOVC20,
- .monitor_t0 = PTPGPTPTM00,
- .monitor_t1 = PTPGPTPTM10,
- .monitor_t2 = PTPGPTPTM20,
-};
+#define PTPTMEC_REG 0x0010
+#define PTPTMDC_REG 0x0014
+#define PTPTIVC0_REG 0x0020
+#define PTPTOVC00_REG 0x0030
+#define PTPTOVC10_REG 0x0034
+#define PTPTOVC20_REG 0x0038
+#define PTPGPTPTM00_REG 0x0050
+#define PTPGPTPTM10_REG 0x0054
+#define PTPGPTPTM20_REG 0x0058
+
+#define ptp_to_priv(ptp) container_of(ptp, struct rcar_gen4_ptp_private, info)
static int rcar_gen4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
@@ -38,20 +37,21 @@ static int rcar_gen4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
diff = div_s64(addend * scaled_ppm_to_ppb(scaled_ppm), NSEC_PER_SEC);
addend = neg_adj ? addend - diff : addend + diff;
- iowrite32(addend, ptp_priv->addr + ptp_priv->offs->increment);
+ iowrite32(addend, ptp_priv->addr + PTPTIVC0_REG);
return 0;
}
-/* Caller must hold the lock */
static void _rcar_gen4_ptp_gettime(struct ptp_clock_info *ptp,
struct timespec64 *ts)
{
struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
- ts->tv_nsec = ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t0);
- ts->tv_sec = ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t1) |
- ((s64)ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t2) << 32);
+ lockdep_assert_held(&ptp_priv->lock);
+
+ ts->tv_nsec = ioread32(ptp_priv->addr + PTPGPTPTM00_REG);
+ ts->tv_sec = ioread32(ptp_priv->addr + PTPGPTPTM10_REG) |
+ ((s64)ioread32(ptp_priv->addr + PTPGPTPTM20_REG) << 32);
}
static int rcar_gen4_ptp_gettime(struct ptp_clock_info *ptp,
@@ -73,14 +73,14 @@ static void _rcar_gen4_ptp_settime(struct ptp_clock_info *ptp,
{
struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
- iowrite32(1, ptp_priv->addr + ptp_priv->offs->disable);
- iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t2);
- iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t1);
- iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t0);
- iowrite32(1, ptp_priv->addr + ptp_priv->offs->enable);
- iowrite32(ts->tv_sec >> 32, ptp_priv->addr + ptp_priv->offs->config_t2);
- iowrite32(ts->tv_sec, ptp_priv->addr + ptp_priv->offs->config_t1);
- iowrite32(ts->tv_nsec, ptp_priv->addr + ptp_priv->offs->config_t0);
+ iowrite32(1, ptp_priv->addr + PTPTMDC_REG);
+ iowrite32(0, ptp_priv->addr + PTPTOVC20_REG);
+ iowrite32(0, ptp_priv->addr + PTPTOVC10_REG);
+ iowrite32(0, ptp_priv->addr + PTPTOVC00_REG);
+ iowrite32(1, ptp_priv->addr + PTPTMEC_REG);
+ iowrite32(ts->tv_sec >> 32, ptp_priv->addr + PTPTOVC20_REG);
+ iowrite32(ts->tv_sec, ptp_priv->addr + PTPTOVC10_REG);
+ iowrite32(ts->tv_nsec, ptp_priv->addr + PTPTOVC00_REG);
}
static int rcar_gen4_ptp_settime(struct ptp_clock_info *ptp,
@@ -130,17 +130,6 @@ static struct ptp_clock_info rcar_gen4_ptp_info = {
.enable = rcar_gen4_ptp_enable,
};
-static int rcar_gen4_ptp_set_offs(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout)
-{
- if (layout != RCAR_GEN4_PTP_REG_LAYOUT)
- return -EINVAL;
-
- ptp_priv->offs = &gen4_offs;
-
- return 0;
-}
-
static s64 rcar_gen4_ptp_rate_to_increment(u32 rate)
{
/* Timer increment in ns.
@@ -151,27 +140,20 @@ static s64 rcar_gen4_ptp_rate_to_increment(u32 rate)
return div_s64(1000000000LL << 27, rate);
}
-int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout, u32 rate)
+int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv, u32 rate)
{
- int ret;
-
if (ptp_priv->initialized)
return 0;
spin_lock_init(&ptp_priv->lock);
- ret = rcar_gen4_ptp_set_offs(ptp_priv, layout);
- if (ret)
- return ret;
-
ptp_priv->default_addend = rcar_gen4_ptp_rate_to_increment(rate);
- iowrite32(ptp_priv->default_addend, ptp_priv->addr + ptp_priv->offs->increment);
+ iowrite32(ptp_priv->default_addend, ptp_priv->addr + PTPTIVC0_REG);
ptp_priv->clock = ptp_clock_register(&ptp_priv->info, NULL);
if (IS_ERR(ptp_priv->clock))
return PTR_ERR(ptp_priv->clock);
- iowrite32(0x01, ptp_priv->addr + ptp_priv->offs->enable);
+ iowrite32(0x01, ptp_priv->addr + PTPTMEC_REG);
ptp_priv->initialized = true;
return 0;
@@ -180,7 +162,7 @@ EXPORT_SYMBOL_GPL(rcar_gen4_ptp_register);
int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv)
{
- iowrite32(1, ptp_priv->addr + ptp_priv->offs->disable);
+ iowrite32(1, ptp_priv->addr + PTPTMDC_REG);
return ptp_clock_unregister(ptp_priv->clock);
}
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
index e22da5acd53d..f77e79e47357 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
@@ -11,10 +11,6 @@
#define RCAR_GEN4_GPTP_OFFSET_S4 0x00018000
-enum rcar_gen4_ptp_reg_layout {
- RCAR_GEN4_PTP_REG_LAYOUT
-};
-
/* driver's definitions */
#define RCAR_GEN4_RXTSTAMP_ENABLED BIT(0)
#define RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT BIT(1)
@@ -23,37 +19,11 @@ enum rcar_gen4_ptp_reg_layout {
#define RCAR_GEN4_TXTSTAMP_ENABLED BIT(0)
-#define PTPRO 0
-
-enum rcar_gen4_ptp_reg {
- PTPTMEC = PTPRO + 0x0010,
- PTPTMDC = PTPRO + 0x0014,
- PTPTIVC0 = PTPRO + 0x0020,
- PTPTOVC00 = PTPRO + 0x0030,
- PTPTOVC10 = PTPRO + 0x0034,
- PTPTOVC20 = PTPRO + 0x0038,
- PTPGPTPTM00 = PTPRO + 0x0050,
- PTPGPTPTM10 = PTPRO + 0x0054,
- PTPGPTPTM20 = PTPRO + 0x0058,
-};
-
-struct rcar_gen4_ptp_reg_offset {
- u16 enable;
- u16 disable;
- u16 increment;
- u16 config_t0;
- u16 config_t1;
- u16 config_t2;
- u16 monitor_t0;
- u16 monitor_t1;
- u16 monitor_t2;
-};
struct rcar_gen4_ptp_private {
void __iomem *addr;
struct ptp_clock *clock;
struct ptp_clock_info info;
- const struct rcar_gen4_ptp_reg_offset *offs;
spinlock_t lock; /* For multiple registers access */
u32 tstamp_tx_ctrl;
u32 tstamp_rx_ctrl;
@@ -61,8 +31,7 @@ struct rcar_gen4_ptp_private {
bool initialized;
};
-int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout, u32 rate);
+int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv, u32 rate);
int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv);
struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev);
diff --git a/drivers/net/ethernet/renesas/rswitch_main.c b/drivers/net/ethernet/renesas/rswitch_main.c
index 59dceb81607c..ff5f966c98a9 100644
--- a/drivers/net/ethernet/renesas/rswitch_main.c
+++ b/drivers/net/ethernet/renesas/rswitch_main.c
@@ -2090,8 +2090,7 @@ static int rswitch_init(struct rswitch_private *priv)
if (err < 0)
goto err_fwd_init;
- err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
- clk_get_rate(priv->clk));
+ err = rcar_gen4_ptp_register(priv->ptp_priv, clk_get_rate(priv->clk));
if (err < 0)
goto err_ptp_register;
diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c
index 05c4b6c8c9c3..15a043e85431 100644
--- a/drivers/net/ethernet/renesas/rtsn.c
+++ b/drivers/net/ethernet/renesas/rtsn.c
@@ -1330,8 +1330,7 @@ static int rtsn_probe(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, 1);
- ret = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
- clk_get_rate(priv->clk));
+ ret = rcar_gen4_ptp_register(priv->ptp_priv, clk_get_rate(priv->clk));
if (ret)
goto error_pm;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index dadce6009791..e42d0fdefee1 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -654,7 +654,7 @@ static void icssg_prueth_hsr_fdb_add_del(struct prueth_emac *emac,
static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
{
- struct net_device *real_dev;
+ struct net_device *real_dev, *port_dev;
struct prueth_emac *emac;
u8 vlan_id, i;
@@ -663,11 +663,15 @@ static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
if (is_hsr_master(real_dev)) {
for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
- emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
- if (!emac)
+ port_dev = hsr_get_port_ndev(real_dev, i);
+ emac = netdev_priv(port_dev);
+ if (!emac) {
+ dev_put(port_dev);
return -EINVAL;
+ }
icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
true);
+ dev_put(port_dev);
}
} else {
emac = netdev_priv(real_dev);
@@ -679,7 +683,7 @@ static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
{
- struct net_device *real_dev;
+ struct net_device *real_dev, *port_dev;
struct prueth_emac *emac;
u8 vlan_id, i;
@@ -688,11 +692,15 @@ static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
if (is_hsr_master(real_dev)) {
for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
- emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
- if (!emac)
+ port_dev = hsr_get_port_ndev(real_dev, i);
+ emac = netdev_priv(port_dev);
+ if (!emac) {
+ dev_put(port_dev);
return -EINVAL;
+ }
icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
false);
+ dev_put(port_dev);
}
} else {
emac = netdev_priv(real_dev);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index bcd07a715752..5cb353a97d6d 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -2078,10 +2078,6 @@ static void wx_setup_mrqc(struct wx *wx)
{
u32 rss_field = 0;
- /* VT, and RSS do not coexist at the same time */
- if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
- return;
-
/* Disable indicating checksum in descriptor, enables RSS hash */
wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 54384f9b3872..77b0c3d52041 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -53,7 +53,6 @@ struct geneve_dev_node {
};
struct geneve_config {
- struct ip_tunnel_info info;
bool collect_md;
bool use_udp6_rx_checksums;
bool ttl_inherit;
@@ -61,6 +60,9 @@ struct geneve_config {
bool inner_proto_inherit;
u16 port_min;
u16 port_max;
+
+ /* Must be last --ends in a flexible-array member. */
+ struct ip_tunnel_info info;
};
/* Pseudo network device */
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 0b21818e4925..5200fd5a10e5 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -4211,6 +4211,7 @@ static int macsec_newlink(struct net_device *dev,
if (err < 0)
goto del_dev;
+ netdev_update_features(dev);
netif_stacked_transfer_operstate(real_dev, dev);
linkwatch_fire_event(dev);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index a7fb1d7cae94..0e7dcdb0a666 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -361,7 +361,7 @@ config NXP_TJA11XX_PHY
tristate "NXP TJA11xx PHYs support"
depends on HWMON
help
- Currently supports the NXP TJA1100 and TJA1101 PHY.
+ Currently supports the NXP TJA1100, TJA1101 and TJA1102 PHYs.
config NCN26000_PHY
tristate "Onsemi 10BASE-T1S Ethernet PHY"
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 13df28445f02..c02da57a4da5 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1065,23 +1065,19 @@ EXPORT_SYMBOL_GPL(phy_inband_caps);
*/
int phy_config_inband(struct phy_device *phydev, unsigned int modes)
{
- int err;
+ lockdep_assert_held(&phydev->lock);
if (!!(modes & LINK_INBAND_DISABLE) +
!!(modes & LINK_INBAND_ENABLE) +
!!(modes & LINK_INBAND_BYPASS) != 1)
return -EINVAL;
- mutex_lock(&phydev->lock);
if (!phydev->drv)
- err = -EIO;
+ return -EIO;
else if (!phydev->drv->config_inband)
- err = -EOPNOTSUPP;
- else
- err = phydev->drv->config_inband(phydev, modes);
- mutex_unlock(&phydev->lock);
+ return -EOPNOTSUPP;
- return err;
+ return phydev->drv->config_inband(phydev, modes);
}
EXPORT_SYMBOL(phy_config_inband);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 7556aa3dd7ee..c82c1997147b 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -287,8 +287,7 @@ static bool phy_uses_state_machine(struct phy_device *phydev)
if (phydev->phy_link_change == phy_link_change)
return phydev->attached_dev && phydev->adjust_link;
- /* phydev->phy_link_change is implicitly phylink_phy_change() */
- return true;
+ return !!phydev->phy_link_change;
}
static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
@@ -1864,6 +1863,8 @@ void phy_detach(struct phy_device *phydev)
phydev->attached_dev = NULL;
phy_link_topo_del_phy(dev, phydev);
}
+
+ phydev->phy_link_change = NULL;
phydev->phylink = NULL;
if (!phydev->is_on_sfp_module)
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index c7cb95aa8007..1988b7d2089a 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -67,6 +67,8 @@ struct phylink {
struct timer_list link_poll;
struct mutex state_mutex;
+ /* Serialize updates to pl->phydev with phylink_resolve() */
+ struct mutex phydev_mutex;
struct phylink_link_state phy_state;
unsigned int phy_ib_mode;
struct work_struct resolve;
@@ -1432,6 +1434,7 @@ static void phylink_get_fixed_state(struct phylink *pl,
static void phylink_mac_initial_config(struct phylink *pl, bool force_restart)
{
struct phylink_link_state link_state;
+ struct phy_device *phy = pl->phydev;
switch (pl->req_link_an_mode) {
case MLO_AN_PHY:
@@ -1455,7 +1458,11 @@ static void phylink_mac_initial_config(struct phylink *pl, bool force_restart)
link_state.link = false;
phylink_apply_manual_flow(pl, &link_state);
+ if (phy)
+ mutex_lock(&phy->lock);
phylink_major_config(pl, force_restart, &link_state);
+ if (phy)
+ mutex_unlock(&phy->lock);
}
static const char *phylink_pause_to_str(int pause)
@@ -1591,8 +1598,13 @@ static void phylink_resolve(struct work_struct *w)
struct phylink_link_state link_state;
bool mac_config = false;
bool retrigger = false;
+ struct phy_device *phy;
bool cur_link_state;
+ mutex_lock(&pl->phydev_mutex);
+ phy = pl->phydev;
+ if (phy)
+ mutex_lock(&phy->lock);
mutex_lock(&pl->state_mutex);
cur_link_state = phylink_link_is_up(pl);
@@ -1626,11 +1638,11 @@ static void phylink_resolve(struct work_struct *w)
/* If we have a phy, the "up" state is the union of both the
* PHY and the MAC
*/
- if (pl->phydev)
+ if (phy)
link_state.link &= pl->phy_state.link;
/* Only update if the PHY link is up */
- if (pl->phydev && pl->phy_state.link) {
+ if (phy && pl->phy_state.link) {
/* If the interface has changed, force a link down
* event if the link isn't already down, and re-resolve.
*/
@@ -1694,6 +1706,9 @@ static void phylink_resolve(struct work_struct *w)
queue_work(system_power_efficient_wq, &pl->resolve);
}
mutex_unlock(&pl->state_mutex);
+ if (phy)
+ mutex_unlock(&phy->lock);
+ mutex_unlock(&pl->phydev_mutex);
}
static void phylink_run_resolve(struct phylink *pl)
@@ -1829,6 +1844,7 @@ struct phylink *phylink_create(struct phylink_config *config,
if (!pl)
return ERR_PTR(-ENOMEM);
+ mutex_init(&pl->phydev_mutex);
mutex_init(&pl->state_mutex);
INIT_WORK(&pl->resolve, phylink_resolve);
@@ -2089,6 +2105,7 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
dev_name(&phy->mdio.dev), phy->drv->name, irq_str);
kfree(irq_str);
+ mutex_lock(&pl->phydev_mutex);
mutex_lock(&phy->lock);
mutex_lock(&pl->state_mutex);
pl->phydev = phy;
@@ -2134,6 +2151,7 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
mutex_unlock(&pl->state_mutex);
mutex_unlock(&phy->lock);
+ mutex_unlock(&pl->phydev_mutex);
phylink_dbg(pl,
"phy: %s setting supported %*pb advertising %*pb\n",
@@ -2312,6 +2330,7 @@ void phylink_disconnect_phy(struct phylink *pl)
ASSERT_RTNL();
+ mutex_lock(&pl->phydev_mutex);
phy = pl->phydev;
if (phy) {
mutex_lock(&phy->lock);
@@ -2321,8 +2340,11 @@ void phylink_disconnect_phy(struct phylink *pl)
pl->mac_tx_clk_stop = false;
mutex_unlock(&pl->state_mutex);
mutex_unlock(&phy->lock);
- flush_work(&pl->resolve);
+ }
+ mutex_unlock(&pl->phydev_mutex);
+ if (phy) {
+ flush_work(&pl->resolve);
phy_disconnect(phy);
}
}
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index 7eb76724b3ed..79b6d70de236 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -104,16 +104,11 @@ static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
{
- unsigned int cpu = *stored_cpu, cpu_index, i;
+ unsigned int cpu = *stored_cpu;
+
+ while (unlikely(cpu >= nr_cpu_ids || !cpu_online(cpu)))
+ cpu = *stored_cpu = cpumask_nth(id % num_online_cpus(), cpu_online_mask);
- if (unlikely(cpu >= nr_cpu_ids ||
- !cpumask_test_cpu(cpu, cpu_online_mask))) {
- cpu_index = id % cpumask_weight(cpu_online_mask);
- cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < cpu_index; ++i)
- cpu = cpumask_next(cpu, cpu_online_mask);
- *stored_cpu = cpu;
- }
return cpu;
}
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index bd1ec3b2c084..3a3965b79942 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -4078,12 +4078,68 @@ static int ath12k_mac_fils_discovery(struct ath12k_link_vif *arvif,
return ret;
}
+static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ieee80211_vif *vif = arvif->ahvif->vif;
+ struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
+ enum wmi_sta_powersave_param param;
+ struct ieee80211_bss_conf *info;
+ enum wmi_sta_ps_mode psmode;
+ int ret;
+ int timeout;
+ bool enable_ps;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ enable_ps = arvif->ahvif->ps;
+ if (enable_ps) {
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+
+ timeout = conf->dynamic_ps_timeout;
+ if (timeout == 0) {
+ info = ath12k_mac_get_link_bss_conf(arvif);
+ if (!info) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
+ /* firmware doesn't like 0 */
+ timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000;
+ }
+
+ ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
+ timeout);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+ } else {
+ psmode = WMI_STA_PS_MODE_DISABLED;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n",
+ arvif->vdev_id, psmode ? "enable" : "disable");
+
+ ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
+ psmode, arvif->vdev_id, ret);
+}
+
static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u64 changed)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
unsigned long links = ahvif->links_map;
+ struct ieee80211_vif_cfg *vif_cfg;
struct ieee80211_bss_conf *info;
struct ath12k_link_vif *arvif;
struct ieee80211_sta *sta;
@@ -4147,61 +4203,24 @@ static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
}
}
}
-}
-
-static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
-{
- struct ath12k *ar = arvif->ar;
- struct ieee80211_vif *vif = arvif->ahvif->vif;
- struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
- enum wmi_sta_powersave_param param;
- struct ieee80211_bss_conf *info;
- enum wmi_sta_ps_mode psmode;
- int ret;
- int timeout;
- bool enable_ps;
- lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+ if (changed & BSS_CHANGED_PS) {
+ links = ahvif->links_map;
+ vif_cfg = &vif->cfg;
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (!arvif || !arvif->ar)
+ continue;
- enable_ps = arvif->ahvif->ps;
- if (enable_ps) {
- psmode = WMI_STA_PS_MODE_ENABLED;
- param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+ ar = arvif->ar;
- timeout = conf->dynamic_ps_timeout;
- if (timeout == 0) {
- info = ath12k_mac_get_link_bss_conf(arvif);
- if (!info) {
- ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n",
- vif->addr, arvif->link_id);
- return;
+ if (ar->ab->hw_params->supports_sta_ps) {
+ ahvif->ps = vif_cfg->ps;
+ ath12k_mac_vif_setup_ps(arvif);
}
-
- /* firmware doesn't like 0 */
- timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000;
}
-
- ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
- timeout);
- if (ret) {
- ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
- arvif->vdev_id, ret);
- return;
- }
- } else {
- psmode = WMI_STA_PS_MODE_DISABLED;
}
-
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n",
- arvif->vdev_id, psmode ? "enable" : "disable");
-
- ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
- if (ret)
- ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
- psmode, arvif->vdev_id, ret);
}
static bool ath12k_mac_supports_tpc(struct ath12k *ar, struct ath12k_vif *ahvif,
@@ -4223,7 +4242,6 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
{
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
- struct ieee80211_vif_cfg *vif_cfg = &vif->cfg;
struct cfg80211_chan_def def;
u32 param_id, param_value;
enum nl80211_band band;
@@ -4510,12 +4528,6 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
}
ath12k_mac_fils_discovery(arvif, info);
-
- if (changed & BSS_CHANGED_PS &&
- ar->ab->hw_params->supports_sta_ps) {
- ahvif->ps = vif_cfg->ps;
- ath12k_mac_vif_setup_ps(arvif);
- }
}
static struct ath12k_vif_cache *ath12k_ahvif_get_link_cache(struct ath12k_vif *ahvif,
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 742ffeb48bce..29dadedefdd2 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -843,7 +843,7 @@ int ath12k_wmi_mgmt_send(struct ath12k_link_vif *arvif, u32 buf_id,
cmd->tx_params_valid = 0;
frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
- frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len);
+ frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len_aligned);
memcpy(frame_tlv->value, frame->data, buf_len);
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 908c4c8b7f82..6833430130f4 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -555,7 +555,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
/* Check whenever the PHY can be turned off again. */
/* 1. What about buffered unicast traffic for our AID? */
- cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid);
+ cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid, false);
/* 2. Maybe the AP wants to send multicast/broadcast data? */
cam |= !!(tim_ie->bitmap_ctrl & 0x01);
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 7703a0933a14..7218fe70f3bc 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -2708,6 +2708,7 @@ static void wil_wiphy_init(struct wiphy *wiphy)
wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
wiphy->mgmt_stypes = wil_mgmt_stypes;
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
+ wiphy->bss_param_support = WIPHY_BSS_PARAM_AP_ISOLATE;
wiphy->n_vendor_commands = ARRAY_SIZE(wil_nl80211_vendor_commands);
wiphy->vendor_commands = wil_nl80211_vendor_commands;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 8ab7d1e34a6e..6a3f187320fc 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -997,9 +997,9 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356, WCC),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4359, WCC),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43751, WCC),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43752, WCC),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373, CYW),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012, CYW),
- BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752, CYW),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_89359, CYW),
CYW_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439, CYW),
{ /* end: all zeroes */ }
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 8af402555b5e..8afaffe31031 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5958,6 +5958,26 @@ static int brcmf_cfg80211_del_pmk(struct wiphy *wiphy, struct net_device *dev,
return brcmf_set_pmk(ifp, NULL, 0);
}
+static int brcmf_cfg80211_change_bss(struct wiphy *wiphy, struct net_device *dev,
+ struct bss_parameters *params)
+{
+ struct brcmf_if *ifp = netdev_priv(dev);
+ int ret = 0;
+
+ /* In AP mode, the "ap_isolate" value represents
+ * 0 = allow low-level bridging of frames between associated stations
+ * 1 = restrict low-level bridging of frames to isolate associated stations
+ * -1 = do not change existing setting
+ */
+ if (params->ap_isolate >= 0) {
+ ret = brcmf_fil_iovar_int_set(ifp, "ap_isolate", params->ap_isolate);
+ if (ret < 0)
+ brcmf_err("ap_isolate iovar failed: ret=%d\n", ret);
+ }
+
+ return ret;
+}
+
static struct cfg80211_ops brcmf_cfg80211_ops = {
.add_virtual_intf = brcmf_cfg80211_add_iface,
.del_virtual_intf = brcmf_cfg80211_del_iface,
@@ -6005,6 +6025,7 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
.update_connect_params = brcmf_cfg80211_update_conn_params,
.set_pmk = brcmf_cfg80211_set_pmk,
.del_pmk = brcmf_cfg80211_del_pmk,
+ .change_bss = brcmf_cfg80211_change_bss,
};
struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings)
@@ -7659,6 +7680,8 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
BIT(NL80211_BSS_SELECT_ATTR_BAND_PREF) |
BIT(NL80211_BSS_SELECT_ATTR_RSSI_ADJUST);
+ wiphy->bss_param_support = WIPHY_BSS_PARAM_AP_ISOLATE;
+
wiphy->flags |= WIPHY_FLAG_NETNS_OK |
WIPHY_FLAG_PS_ON_BY_DEFAULT |
WIPHY_FLAG_HAVE_AP_SME |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 9074ab49e806..4239f2b21e54 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -738,8 +738,8 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case BRCM_CC_4364_CHIP_ID:
case CY_CC_4373_CHIP_ID:
return 0x160000;
- case CY_CC_43752_CHIP_ID:
case BRCM_CC_43751_CHIP_ID:
+ case BRCM_CC_43752_CHIP_ID:
case BRCM_CC_4377_CHIP_ID:
return 0x170000;
case BRCM_CC_4378_CHIP_ID:
@@ -1452,7 +1452,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
return (reg & CC_SR_CTL0_ENABLE_MASK) != 0;
case BRCM_CC_4359_CHIP_ID:
case BRCM_CC_43751_CHIP_ID:
- case CY_CC_43752_CHIP_ID:
+ case BRCM_CC_43752_CHIP_ID:
case CY_CC_43012_CHIP_ID:
addr = CORE_CC_REG(pmu->base, retention_ctl);
reg = chip->ops->read32(chip->ctx, addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 83f8ed7d00f9..ef79924fd8f4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -554,12 +554,16 @@ static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
data = (u8 *)fw->data;
data_len = fw->size;
} else {
- if ((data = bcm47xx_nvram_get_contents(&data_len)))
+ data = bcm47xx_nvram_get_contents(&data_len);
+ if (data) {
free_bcm47xx_nvram = true;
- else if ((data = brcmf_fw_nvram_from_efi(&data_len)))
- kfree_nvram = true;
- else if (!(cur->flags & BRCMF_FW_REQF_OPTIONAL))
- goto fail;
+ } else {
+ data = brcmf_fw_nvram_from_efi(&data_len);
+ if (data)
+ kfree_nvram = true;
+ else if (!(cur->flags & BRCMF_FW_REQF_OPTIONAL))
+ goto fail;
+ }
}
if (data)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 8a0bad5119a0..8cf9d7e7c3f7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -655,10 +655,10 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
BRCMF_FW_ENTRY(BRCM_CC_43751_CHIP_ID, 0xFFFFFFFF, 43752),
+ BRCMF_FW_ENTRY(BRCM_CC_43752_CHIP_ID, 0xFFFFFFFF, 43752),
BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373),
BRCMF_FW_ENTRY(CY_CC_43012_CHIP_ID, 0xFFFFFFFF, 43012),
BRCMF_FW_ENTRY(CY_CC_43439_CHIP_ID, 0xFFFFFFFF, 43439),
- BRCMF_FW_ENTRY(CY_CC_43752_CHIP_ID, 0xFFFFFFFF, 43752)
};
#define TXCTL_CREDITS 2
@@ -3426,8 +3426,8 @@ err:
static bool brcmf_sdio_aos_no_decode(struct brcmf_sdio *bus)
{
if (bus->ci->chip == BRCM_CC_43751_CHIP_ID ||
- bus->ci->chip == CY_CC_43012_CHIP_ID ||
- bus->ci->chip == CY_CC_43752_CHIP_ID)
+ bus->ci->chip == BRCM_CC_43752_CHIP_ID ||
+ bus->ci->chip == CY_CC_43012_CHIP_ID)
return true;
else
return false;
@@ -4278,8 +4278,8 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
switch (sdiod->func1->device) {
case SDIO_DEVICE_ID_BROADCOM_43751:
+ case SDIO_DEVICE_ID_BROADCOM_43752:
case SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373:
- case SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752:
brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
CY_4373_F2_WATERMARK);
brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index b39c5c1ee18b..df3b67ba4db2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -60,7 +60,6 @@
#define CY_CC_4373_CHIP_ID 0x4373
#define CY_CC_43012_CHIP_ID 43012
#define CY_CC_43439_CHIP_ID 43439
-#define CY_CC_43752_CHIP_ID 43752
/* USB Device IDs */
#define BRCM_USB_43143_DEVICE_ID 0xbd1e
diff --git a/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h
index 1e8ab704dbfb..f00eb878b94b 100644
--- a/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h
+++ b/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h
@@ -50,28 +50,4 @@ struct ieee80211_measurement_params {
__le16 duration;
} __packed;
-struct ieee80211_info_element {
- u8 id;
- u8 len;
- u8 data[];
-} __packed;
-
-struct ieee80211_measurement_request {
- struct ieee80211_info_element ie;
- u8 token;
- u8 mode;
- u8 type;
- struct ieee80211_measurement_params params[];
-} __packed;
-
-struct ieee80211_measurement_report {
- struct ieee80211_info_element ie;
- u8 token;
- u8 mode;
- u8 type;
- union {
- struct ieee80211_basic_report basic[0];
- } u;
-} __packed;
-
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index 9f543946b285..3e6206e739f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -9,11 +9,11 @@
#include "iwl-prph.h"
#include "fw/api/txq.h"
-/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 102
+/* Highest firmware core release supported */
+#define IWL_BZ_UCODE_CORE_MAX 99
/* Lowest firmware API version supported */
-#define IWL_BZ_UCODE_API_MIN 98
+#define IWL_BZ_UCODE_API_MIN 100
/* Memory offsets and lengths */
#define IWL_BZ_SMEM_OFFSET 0x400000
@@ -75,7 +75,7 @@ static const struct iwl_family_base_params iwl_bz_base = {
},
},
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
- .ucode_api_max = IWL_BZ_UCODE_API_MAX,
+ .ucode_api_max = ENCODE_CORE_AS_API(IWL_BZ_UCODE_CORE_MAX),
.ucode_api_min = IWL_BZ_UCODE_API_MIN,
};
@@ -101,8 +101,8 @@ const struct iwl_mac_cfg iwl_gl_mac_cfg = {
.low_latency_xtal = true,
};
-IWL_FW_AND_PNVM(IWL_BZ_A_FM_B_FW_PRE, IWL_BZ_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_BZ_A_FM_C_FW_PRE, IWL_BZ_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_BZ_A_FM4_B_FW_PRE, IWL_BZ_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_GL_B_FM_B_FW_PRE, IWL_BZ_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_GL_C_FM_C_FW_PRE, IWL_BZ_UCODE_API_MAX);
+IWL_CORE_FW(IWL_BZ_A_FM_B_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_BZ_A_FM_C_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_BZ_A_FM4_B_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_GL_B_FM_B_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_GL_C_FM_C_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
index 807f4e29d55a..e53a785686c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
@@ -8,11 +8,11 @@
#include "iwl-prph.h"
#include "fw/api/txq.h"
-/* Highest firmware API version supported */
-#define IWL_DR_UCODE_API_MAX 102
+/* Highest firmware core release supported */
+#define IWL_DR_UCODE_CORE_MAX 99
/* Lowest firmware API version supported */
-#define IWL_DR_UCODE_API_MIN 98
+#define IWL_DR_UCODE_API_MIN 100
/* Memory offsets and lengths */
#define IWL_DR_SMEM_OFFSET 0x400000
@@ -20,9 +20,6 @@
#define IWL_DR_A_PE_A_FW_PRE "iwlwifi-dr-a0-pe-a0"
-#define IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(api) \
- IWL_DR_A_PE_A_FW_PRE "-" __stringify(api) ".ucode"
-
static const struct iwl_family_base_params iwl_dr_base = {
.num_of_queues = 512,
.max_tfd_queue_size = 65536,
@@ -73,7 +70,7 @@ static const struct iwl_family_base_params iwl_dr_base = {
},
},
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
- .ucode_api_max = IWL_DR_UCODE_API_MAX,
+ .ucode_api_max = ENCODE_CORE_AS_API(IWL_DR_UCODE_CORE_MAX),
.ucode_api_min = IWL_DR_UCODE_API_MIN,
};
@@ -89,5 +86,5 @@ const struct iwl_mac_cfg iwl_dr_mac_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
-MODULE_FIRMWARE(IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
+IWL_CORE_FW(IWL_DR_A_PE_A_FW_PRE, IWL_DR_UCODE_CORE_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c
index 7ff5170faaa9..c16cda087a7c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c
@@ -9,7 +9,7 @@
#define IWL_GF_UCODE_API_MAX 100
/* Lowest firmware API version supported */
-#define IWL_GF_UCODE_API_MIN 98
+#define IWL_GF_UCODE_API_MIN 100
#define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0"
#define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0"
@@ -23,6 +23,18 @@
#define IWL_SC_A_GF_A_FW_PRE "iwlwifi-sc-a0-gf-a0"
#define IWL_SC_A_GF4_A_FW_PRE "iwlwifi-sc-a0-gf4-a0"
+#define IWL_BZ_A_GF_A_MODULE_FIRMWARE(api) \
+ IWL_BZ_A_GF_A_FW_PRE "-" __stringify(api) ".ucode"
+
+#define IWL_BZ_A_GF4_A_MODULE_FIRMWARE(api) \
+ IWL_BZ_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode"
+
+#define IWL_SC_A_GF_A_MODULE_FIRMWARE(api) \
+ IWL_SC_A_GF_A_FW_PRE "-" __stringify(api) ".ucode"
+
+#define IWL_SC_A_GF4_A_MODULE_FIRMWARE(api) \
+ IWL_SC_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode"
+
/* NVM versions */
#define IWL_GF_NVM_VERSION 0x0a1d
@@ -67,7 +79,7 @@ IWL_FW_AND_PNVM(IWL_MA_A_GF_A_FW_PRE, IWL_GF_UCODE_API_MAX);
IWL_FW_AND_PNVM(IWL_MA_B_GF_A_FW_PRE, IWL_GF_UCODE_API_MAX);
IWL_FW_AND_PNVM(IWL_MA_A_GF4_A_FW_PRE, IWL_GF_UCODE_API_MAX);
IWL_FW_AND_PNVM(IWL_MA_B_GF4_A_FW_PRE, IWL_GF_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_BZ_A_GF_A_FW_PRE, IWL_GF_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_BZ_A_GF4_A_FW_PRE, IWL_GF_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_SC_A_GF_A_FW_PRE, IWL_GF_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_SC_A_GF4_A_FW_PRE, IWL_GF_UCODE_API_MAX);
+MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_GF_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_GF_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC_A_GF_A_MODULE_FIRMWARE(IWL_GF_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC_A_GF4_A_MODULE_FIRMWARE(IWL_GF_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c
index 9f408d276ce9..6cf187d92dbf 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c
@@ -9,7 +9,7 @@
#define IWL_HR_UCODE_API_MAX 100
/* Lowest firmware API version supported */
-#define IWL_HR_UCODE_API_MIN 98
+#define IWL_HR_UCODE_API_MIN 100
#define IWL_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0"
#define IWL_QU_C_HR_B_FW_PRE "iwlwifi-Qu-c0-hr-b0"
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index 6d4a3bce49b9..e9449b59114a 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -9,11 +9,11 @@
#include "iwl-prph.h"
#include "fw/api/txq.h"
-/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 102
+/* Highest firmware core release supported */
+#define IWL_SC_UCODE_CORE_MAX 99
/* Lowest firmware API version supported */
-#define IWL_SC_UCODE_API_MIN 98
+#define IWL_SC_UCODE_API_MIN 100
/* NVM versions */
#define IWL_SC_NVM_VERSION 0x0a1d
@@ -78,7 +78,7 @@ static const struct iwl_family_base_params iwl_sc_base = {
},
},
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
- .ucode_api_max = IWL_SC_UCODE_API_MAX,
+ .ucode_api_max = ENCODE_CORE_AS_API(IWL_SC_UCODE_CORE_MAX),
.ucode_api_min = IWL_SC_UCODE_API_MIN,
};
@@ -94,8 +94,8 @@ const struct iwl_mac_cfg iwl_sc_mac_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
-IWL_FW_AND_PNVM(IWL_SC_A_FM_B_FW_PRE, IWL_SC_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_SC_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_SC_A_WH_A_FW_PRE, IWL_SC_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_SC2_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_SC2_A_WH_A_FW_PRE, IWL_SC_UCODE_API_MAX);
+IWL_CORE_FW(IWL_SC_A_FM_B_FW_PRE, IWL_SC_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_SC_A_FM_C_FW_PRE, IWL_SC_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_SC_A_WH_A_FW_PRE, IWL_SC_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_SC2_A_FM_C_FW_PRE, IWL_SC_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_SC2_A_WH_A_FW_PRE, IWL_SC_UCODE_CORE_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
index 9f8cdb027839..d337ab543eb0 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
@@ -766,7 +766,7 @@ static int iwl_init_otp_access(struct iwl_trans *trans)
{
int ret;
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_trans_activate_nic(trans);
if (ret)
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 0771a46bd552..a0a26ef482a5 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -378,7 +378,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
- iwl_trans_d3_suspend(priv->trans, false, true);
+ iwl_trans_d3_suspend(priv->trans, true);
goto out;
@@ -422,7 +422,6 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
struct ieee80211_vif *vif;
u32 base;
int ret;
- enum iwl_d3_status d3_status;
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;
@@ -451,15 +450,10 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
/* we'll clear ctx->vif during iwlagn_prepare_restart() */
vif = ctx->vif;
- ret = iwl_trans_d3_resume(priv->trans, &d3_status, false, true);
+ ret = iwl_trans_d3_resume(priv->trans, true);
if (ret)
goto out_unlock;
- if (d3_status != IWL_D3_STATUS_ALIVE) {
- IWL_INFO(priv, "Device was reset during suspend\n");
- goto out_unlock;
- }
-
/* uCode is no longer operating by itself */
iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.c b/drivers/net/wireless/intel/iwlwifi/dvm/power.c
index 6b42d6e5f30f..e7dbba7134f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.c
@@ -368,7 +368,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
/* initialize to default */
void iwl_power_initialize(struct iwl_priv *priv)
{
- priv->power_data.bus_pm = priv->trans->pm_support;
+ priv->power_data.bus_pm = iwl_trans_is_pm_supported(priv->trans);
priv->power_data.debug_sleep_level_override = -1;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 7ec22738b5d6..52edc19d8cdd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -9,9 +9,9 @@
#include "acpi.h"
#include "fw/runtime.h"
-const guid_t iwl_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6,
- 0xA5, 0xB3, 0x1F, 0x73,
- 0x8E, 0x28, 0x5A, 0xDE);
+static const guid_t iwl_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6,
+ 0xA5, 0xB3, 0x1F, 0x73,
+ 0x8E, 0x28, 0x5A, 0xDE);
static const size_t acpi_dsm_size[DSM_FUNC_NUM_FUNCS] = {
[DSM_FUNC_QUERY] = sizeof(u32),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 68d8fb5f6357..20bc6671f4eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -140,8 +140,6 @@ struct iwl_dsm_internal_product_reset_cmd {
struct iwl_fw_runtime;
-extern const guid_t iwl_guid;
-
union acpi_object *iwl_acpi_get_dsm_object(struct device *dev, int rev,
int func, union acpi_object *args,
const guid_t *guid);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index 53445087e9cb..d3bed0216df4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -367,6 +367,7 @@ enum iwl_wowlan_flags {
ENABLE_NBNS_FILTERING = BIT(2),
ENABLE_DHCP_FILTERING = BIT(3),
ENABLE_STORE_BEACON = BIT(4),
+ HAS_BEACON_PROTECTION = BIT(5),
};
/**
@@ -631,10 +632,65 @@ struct iwl_wowlan_gtk_status_v3 {
struct iwl_wowlan_all_rsc_tsc_v5 sc;
} __packed; /* WOWLAN_GTK_MATERIAL_VER_3 */
+/**
+ * enum iwl_wowlan_key_status - Status of security keys in WoWLAN notifications
+ * @IWL_WOWLAN_NOTIF_NO_KEY: No key is present; this entry should be ignored.
+ * @IWL_WOWLAN_STATUS_OLD_KEY: old key exists; no rekey occurred, and only
+ * metadata is available.
+ * @IWL_WOWLAN_STATUS_NEW_KEY: A new key was created after a rekey; new key
+ * material is available.
+ */
+enum iwl_wowlan_key_status {
+ IWL_WOWLAN_NOTIF_NO_KEY = 0,
+ IWL_WOWLAN_STATUS_OLD_KEY = 1,
+ IWL_WOWLAN_STATUS_NEW_KEY = 2
+};
+
+/**
+ * struct iwl_wowlan_gtk_status - GTK status
+ * @key: GTK material
+ * @key_len: GTK length, if set to 0, the key is not available
+ * @key_flags: information about the key:
+ * bits[0:1]: key index assigned by the AP
+ * bits[2:6]: GTK index of the key in the internal DB
+ * bit[7]: Set iff this is the currently used GTK
+ * @key_status: key status, see &enum iwl_wowlan_key_status
+ * @reserved: padding
+ * @tkip_mic_key: TKIP RX MIC key
+ * @sc: RSC/TSC counters
+ */
+struct iwl_wowlan_gtk_status {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 key_len;
+ u8 key_flags;
+ u8 key_status;
+ u8 reserved;
+ u8 tkip_mic_key[IWL_MIC_KEY_SIZE];
+ struct iwl_wowlan_all_rsc_tsc_v5 sc;
+} __packed; /* WOWLAN_GTK_MATERIAL_VER_4 */
+
#define IWL_WOWLAN_GTK_IDX_MASK (BIT(0) | BIT(1))
#define IWL_WOWLAN_IGTK_BIGTK_IDX_MASK (BIT(0))
/**
+ * struct iwl_wowlan_igtk_status_v1 - IGTK status
+ * @key: IGTK material
+ * @ipn: the IGTK packet number (replay counter)
+ * @key_len: IGTK length, if set to 0, the key is not available
+ * @key_flags: information about the key:
+ * bits[0]: key index assigned by the AP (0: index 4, 1: index 5)
+ * (0: index 6, 1: index 7 with bigtk)
+ * bits[1:5]: IGTK index of the key in the internal DB
+ * bit[6]: Set iff this is the currently used IGTK
+ */
+struct iwl_wowlan_igtk_status_v1 {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 ipn[6];
+ u8 key_len;
+ u8 key_flags;
+} __packed; /* WOWLAN_IGTK_MATERIAL_VER_1 */
+
+/**
* struct iwl_wowlan_igtk_status - IGTK status
* @key: IGTK material
* @ipn: the IGTK packet number (replay counter)
@@ -644,13 +700,17 @@ struct iwl_wowlan_gtk_status_v3 {
* (0: index 6, 1: index 7 with bigtk)
* bits[1:5]: IGTK index of the key in the internal DB
* bit[6]: Set iff this is the currently used IGTK
+ * @key_status: key status, see &enum iwl_wowlan_key_status
+ * @reserved: padding
*/
struct iwl_wowlan_igtk_status {
u8 key[WOWLAN_KEY_MAX_SIZE];
u8 ipn[6];
u8 key_len;
u8 key_flags;
-} __packed; /* WOWLAN_IGTK_MATERIAL_VER_1 */
+ u8 key_status;
+ u8 reserved[3];
+} __packed; /* WOWLAN_IGTK_MATERIAL_VER_2 */
/**
* struct iwl_wowlan_status_v6 - WoWLAN status
@@ -700,7 +760,7 @@ struct iwl_wowlan_status_v6 {
*/
struct iwl_wowlan_status_v7 {
struct iwl_wowlan_gtk_status_v2 gtk[WOWLAN_GTK_KEYS_NUM];
- struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status_v1 igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
__le16 pattern_number;
__le16 non_qos_seq_ctr;
@@ -735,7 +795,7 @@ struct iwl_wowlan_status_v7 {
*/
struct iwl_wowlan_info_notif_v1 {
struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
- struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status_v1 igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
__le16 pattern_number;
__le16 reserved1;
@@ -817,8 +877,8 @@ struct iwl_wowlan_mlo_gtk {
*/
struct iwl_wowlan_info_notif_v3 {
struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
- struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
- struct iwl_wowlan_igtk_status bigtk[WOWLAN_BIGTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status_v1 igtk[WOWLAN_IGTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status_v1 bigtk[WOWLAN_BIGTK_KEYS_NUM];
__le64 replay_ctr;
__le16 pattern_number;
__le16 reserved1;
@@ -833,6 +893,45 @@ struct iwl_wowlan_info_notif_v3 {
} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_3 */
/**
+ * struct iwl_wowlan_info_notif_v5 - WoWLAN information notification
+ * @gtk: GTK data
+ * @igtk: IGTK data
+ * @bigtk: BIGTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched patterns
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @transmitted_ndps: number of transmitted neighbor discovery packets
+ * @received_beacons: number of received beacons
+ * @tid_tear_down: bit mask of tids whose BA sessions were closed
+ * in suspend state
+ * @station_id: station id
+ * @num_mlo_link_keys: number of &struct iwl_wowlan_mlo_gtk structs
+ * following this notif
+ * @tid_offloaded_tx: tid used by the firmware to transmit data packets
+ * while in wowlan
+ * @mlo_gtks: array of GTKs of size num_mlo_link_keys
+ */
+struct iwl_wowlan_info_notif_v5 {
+ struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status_v1 igtk[WOWLAN_IGTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status_v1 bigtk[WOWLAN_BIGTK_KEYS_NUM];
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 qos_seq_ctr;
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ u8 tid_tear_down;
+ u8 station_id;
+ u8 num_mlo_link_keys;
+ u8 tid_offloaded_tx;
+ struct iwl_wowlan_mlo_gtk mlo_gtks[];
+} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_5 */
+
+/**
* struct iwl_wowlan_info_notif - WoWLAN information notification
* @gtk: GTK data
* @igtk: IGTK data
@@ -854,7 +953,7 @@ struct iwl_wowlan_info_notif_v3 {
* @mlo_gtks: array of GTKs of size num_mlo_link_keys
*/
struct iwl_wowlan_info_notif {
- struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
struct iwl_wowlan_igtk_status bigtk[WOWLAN_BIGTK_KEYS_NUM];
__le64 replay_ctr;
@@ -869,7 +968,7 @@ struct iwl_wowlan_info_notif {
u8 num_mlo_link_keys;
u8 tid_offloaded_tx;
struct iwl_wowlan_mlo_gtk mlo_gtks[];
-} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_5 */
+} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_6 */
/**
* struct iwl_wowlan_wake_pkt_notif - WoWLAN wake packet notification
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index b9f559dac39f..f76cea6e9ec8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -420,6 +420,8 @@ struct iwl_mac_config_cmd {
* eht_support set to true. No longer used since _VER_3 of this command.
* @LINK_CONTEXT_MODIFY_BANDWIDTH: Covers iwl_link_ctx_cfg_cmd::modify_bandwidth.
* Request RX OMI to the AP to modify bandwidth of this link.
+ * @LINK_CONTEXT_MODIFY_UHR_PARAMS: covers iwl_link_ctx_cfg_cmd::npca_params and
+ * iwl_link_ctx_cfg_cmd::prio_edca_params. Since _VER_7.
* @LINK_CONTEXT_MODIFY_ALL: set all above flags
*/
enum iwl_link_ctx_modify_flags {
@@ -432,6 +434,7 @@ enum iwl_link_ctx_modify_flags {
LINK_CONTEXT_MODIFY_BSS_COLOR_DISABLE = BIT(6),
LINK_CONTEXT_MODIFY_EHT_PARAMS = BIT(7),
LINK_CONTEXT_MODIFY_BANDWIDTH = BIT(8),
+ LINK_CONTEXT_MODIFY_UHR_PARAMS = BIT(9),
LINK_CONTEXT_MODIFY_ALL = 0xff,
}; /* LINK_CONTEXT_MODIFY_MASK_E_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
index 2a1c2b0f19e4..bb801650a565 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
@@ -20,7 +20,7 @@ enum iwl_prot_offload_subcmd_ids {
/**
* @WOWLAN_INFO_NOTIFICATION: Notification in
* &struct iwl_wowlan_info_notif_v1, iwl_wowlan_info_notif_v3,
- * or &struct iwl_wowlan_info_notif
+ * &struct iwl_wowlan_info_notif_v5 or &struct iwl_wowlan_info_notif
*/
WOWLAN_INFO_NOTIFICATION = 0xFD,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 786b3bf4b448..5eb8d10678fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -571,17 +571,16 @@ enum iwl_ppag_flags {
/**
* union iwl_ppag_table_cmd - union for all versions of PPAG command
* @v1: command version 1 structure.
- * @v2: command version from 2 to 6 are same structure as v2.
- * but has a different format of the flags bitmap
- * @v3: command version 7 structure.
+ * @v5: command version 5 structure.
+ * @v7: command version 7 structure.
* @v1.flags: values from &enum iwl_ppag_flags
* @v1.gain: table of antenna gain values per chain and sub-band
* @v1.reserved: reserved
- * @v2.flags: values from &enum iwl_ppag_flags
- * @v2.gain: table of antenna gain values per chain and sub-band
- * @v3.ppag_config_info: see @struct bios_value_u32
- * @v3.gain: table of antenna gain values per chain and sub-band
- * @v3.reserved: reserved
+ * @v5.flags: values from &enum iwl_ppag_flags
+ * @v5.gain: table of antenna gain values per chain and sub-band
+ * @v7.ppag_config_info: see @struct bios_value_u32
+ * @v7.gain: table of antenna gain values per chain and sub-band
+ * @v7.reserved: reserved
*/
union iwl_ppag_table_cmd {
struct {
@@ -593,30 +592,19 @@ union iwl_ppag_table_cmd {
__le32 flags;
s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
s8 reserved[2];
- } __packed v2; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_2, VER3, VER4,
- * VER5, VER6
- */
+ } __packed v5; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_5 */
struct {
struct bios_value_u32 ppag_config_info;
s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
s8 reserved[2];
- } __packed v3; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_7 */
+ } __packed v7; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_7 */
} __packed;
-#define IWL_PPAG_CMD_V4_MASK (IWL_PPAG_ETSI_MASK | IWL_PPAG_CHINA_MASK)
-#define IWL_PPAG_CMD_V5_MASK (IWL_PPAG_CMD_V4_MASK | \
+#define IWL_PPAG_CMD_V1_MASK (IWL_PPAG_ETSI_MASK | IWL_PPAG_CHINA_MASK)
+#define IWL_PPAG_CMD_V5_MASK (IWL_PPAG_CMD_V1_MASK | \
IWL_PPAG_ETSI_LPI_UHB_MASK | \
IWL_PPAG_USA_LPI_UHB_MASK)
-#define IWL_PPAG_CMD_V6_MASK (IWL_PPAG_CMD_V5_MASK | \
- IWL_PPAG_ETSI_VLP_UHB_MASK | \
- IWL_PPAG_ETSI_SP_UHB_MASK | \
- IWL_PPAG_USA_VLP_UHB_MASK | \
- IWL_PPAG_USA_SP_UHB_MASK | \
- IWL_PPAG_CANADA_LPI_UHB_MASK | \
- IWL_PPAG_CANADA_VLP_UHB_MASK | \
- IWL_PPAG_CANADA_SP_UHB_MASK)
-
#define MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE 26
#define MCC_TO_SAR_OFFSET_TABLE_COL_SIZE 13
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 3222cbcbe1ab..9c464e7aba10 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -24,6 +24,8 @@
* for BPSK (MCS 0) with 2 spatial
* streams
* @IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK: enable support for EHT extra LTF
+ * @IWL_TLC_MNG_CFG_FLAGS_UHR_ELR_1_5_MBPS_MSK: support ELR 1.5 Mbps
+ * @IWL_TLC_MNG_CFG_FLAGS_UHR_ELR_3_MBPS_MSK: support ELR 3 Mbps
*/
enum iwl_tlc_mng_cfg_flags {
IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(0),
@@ -32,6 +34,8 @@ enum iwl_tlc_mng_cfg_flags {
IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK = BIT(3),
IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK = BIT(4),
IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK = BIT(6),
+ IWL_TLC_MNG_CFG_FLAGS_UHR_ELR_1_5_MBPS_MSK = BIT(7),
+ IWL_TLC_MNG_CFG_FLAGS_UHR_ELR_3_MBPS_MSK = BIT(8),
};
/**
@@ -201,6 +205,37 @@ struct iwl_tlc_config_cmd_v4 {
} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_4 */
/**
+ * struct iwl_tlc_config_cmd - TLC configuration
+ * @sta_id: station id
+ * @reserved1: reserved
+ * @max_ch_width: max supported channel width from &enum iwl_tlc_mng_cfg_cw
+ * @mode: &enum iwl_tlc_mng_cfg_mode
+ * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains
+ * @sgi_ch_width_supp: bitmap of SGI support per channel width
+ * use BIT(&enum iwl_tlc_mng_cfg_cw)
+ * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
+ * @non_ht_rates: bitmap of supported legacy rates
+ * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per <nss, channel-width>
+ * pair (0 - 80mhz width and below, 1 - 160mhz, 2 - 320mhz).
+ * @max_mpdu_len: max MPDU length, in bytes
+ * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI),
+ * set zero for no limit.
+ */
+struct iwl_tlc_config_cmd {
+ u8 sta_id;
+ u8 reserved1[3];
+ u8 max_ch_width;
+ u8 mode;
+ u8 chains;
+ u8 sgi_ch_width_supp;
+ __le16 flags;
+ __le16 non_ht_rates;
+ __le32 ht_rates[IWL_TLC_NSS_MAX][IWL_TLC_MCS_PER_BW_NUM_V4];
+ __le16 max_mpdu_len;
+ __le16 max_tx_op;
+} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_5 */
+
+/**
* enum iwl_tlc_update_flags - updated fields
* @IWL_TLC_NOTIF_FLAG_RATE: last initial rate update
* @IWL_TLC_NOTIF_FLAG_AMSDU: umsdu parameters update
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 2879be4b8fcb..2ce55859641c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -830,7 +830,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
}
/* reading RXF/TXF sizes */
- if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
+ if (iwl_trans_is_fw_error(fwrt->trans)) {
fifo_len = iwl_fw_rxf_len(fwrt, mem_cfg);
fifo_len += iwl_fw_txf_len(fwrt, mem_cfg);
@@ -2393,7 +2393,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_dump_cfg_name *cfg_name;
u32 size = sizeof(*tlv) + sizeof(*dump);
u32 num_of_cfg_names = 0;
- u32 hw_type, is_cdb, is_jacket;
+ u32 hw_type, is_cdb;
list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) {
size += sizeof(*cfg_name);
@@ -2426,11 +2426,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
hw_type = CSR_HW_REV_TYPE(fwrt->trans->info.hw_rev);
is_cdb = CSR_HW_RFID_IS_CDB(fwrt->trans->info.hw_rf_id);
- is_jacket = !!(iwl_read_umac_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR) &
- WFPM_OTP_CFG1_IS_JACKET_BIT);
-
- /* Use bits 12 and 13 to indicate jacket/CDB, respectively */
- hw_type |= (is_jacket | (is_cdb << 1)) << IWL_JACKET_CDB_SHIFT;
+ hw_type |= IWL_CDB_MASK(is_cdb);
dump->hw_type = cpu_to_le32(hw_type);
@@ -2478,36 +2474,6 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
return entry->size;
}
-static u32 iwl_dump_ini_file_name_info(struct iwl_fw_runtime *fwrt,
- struct list_head *list)
-{
- struct iwl_fw_ini_dump_entry *entry;
- struct iwl_dump_file_name_info *tlv;
- u32 len = strnlen(fwrt->trans->dbg.dump_file_name_ext,
- IWL_FW_INI_MAX_NAME);
-
- if (!fwrt->trans->dbg.dump_file_name_ext_valid)
- return 0;
-
- entry = vzalloc(sizeof(*entry) + sizeof(*tlv) + len);
- if (!entry)
- return 0;
-
- entry->size = sizeof(*tlv) + len;
-
- tlv = (void *)entry->data;
- tlv->type = cpu_to_le32(IWL_INI_DUMP_NAME_TYPE);
- tlv->len = cpu_to_le32(len);
- memcpy(tlv->data, fwrt->trans->dbg.dump_file_name_ext, len);
-
- /* add the dump file name extension tlv to the list */
- list_add_tail(&entry->list, list);
-
- fwrt->trans->dbg.dump_file_name_ext_valid = false;
-
- return entry->size;
-}
-
static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
[IWL_FW_INI_REGION_INVALID] = {},
[IWL_FW_INI_REGION_INTERNAL_BUFFER] = {
@@ -2764,7 +2730,6 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
&iwl_dump_ini_region_ops[IWL_FW_INI_REGION_DRAM_IMR]);
if (size) {
- size += iwl_dump_ini_file_name_info(fwrt, list);
size += iwl_dump_ini_info(fwrt, trigger, list);
}
@@ -3151,7 +3116,7 @@ static void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
.len[0] = sizeof(hcmd_data),
};
- if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
+ if (iwl_trans_is_fw_error(fwrt->trans))
return;
if (fw_has_capa(&fwrt->fw->ucode_capa,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
index f633124979ab..ddd714cff2f4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dump.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
@@ -14,13 +14,6 @@
#include "iwl-csr.h"
#include "pnvm.h"
-#define FW_ASSERT_LMAC_FATAL 0x70
-#define FW_ASSERT_LMAC2_FATAL 0x72
-#define FW_ASSERT_UMAC_FATAL 0x71
-#define UMAC_RT_NMI_LMAC2_FATAL 0x72
-#define RT_NMI_INTERRUPT_OTHER_LMAC_FATAL 0x73
-#define FW_ASSERT_NMI_UNKNOWN 0x84
-
/*
* Note: This structure is read from the device with IO accesses,
* and the reading already does the endian conversion. As it is
@@ -103,17 +96,6 @@ struct iwl_umac_error_event_table {
#define ERROR_START_OFFSET (1 * sizeof(u32))
#define ERROR_ELEM_SIZE (7 * sizeof(u32))
-static bool iwl_fwrt_if_errorid_other_cpu(u32 err_id)
-{
- err_id &= 0xFF;
-
- if ((err_id >= FW_ASSERT_LMAC_FATAL &&
- err_id <= RT_NMI_INTERRUPT_OTHER_LMAC_FATAL) ||
- err_id == FW_ASSERT_NMI_UNKNOWN)
- return true;
- return false;
-}
-
static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt)
{
struct iwl_trans *trans = fwrt->trans;
@@ -131,13 +113,6 @@ static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt)
if (table.valid)
fwrt->dump.umac_err_id = table.error_id;
- if (!iwl_fwrt_if_errorid_other_cpu(fwrt->dump.umac_err_id) &&
- !fwrt->trans->dbg.dump_file_name_ext_valid) {
- fwrt->trans->dbg.dump_file_name_ext_valid = true;
- snprintf(fwrt->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
- "0x%x", fwrt->dump.umac_err_id);
- }
-
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n",
@@ -203,7 +178,7 @@ static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_nu
if (err)
return;
- err = iwl_finish_nic_init(trans);
+ err = iwl_trans_activate_nic(trans);
if (err)
return;
}
@@ -213,13 +188,6 @@ static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_nu
if (table.valid)
fwrt->dump.lmac_err_id[lmac_num] = table.error_id;
- if (!iwl_fwrt_if_errorid_other_cpu(fwrt->dump.lmac_err_id[lmac_num]) &&
- !fwrt->trans->dbg.dump_file_name_ext_valid) {
- fwrt->trans->dbg.dump_file_name_ext_valid = true;
- snprintf(fwrt->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
- "0x%x", fwrt->dump.lmac_err_id[lmac_num]);
- }
-
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n",
@@ -305,16 +273,6 @@ static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt, int idx)
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
- if (table.valid)
- fwrt->dump.tcm_err_id[idx] = table.error_id;
-
- if (!iwl_fwrt_if_errorid_other_cpu(fwrt->dump.tcm_err_id[idx]) &&
- !fwrt->trans->dbg.dump_file_name_ext_valid) {
- fwrt->trans->dbg.dump_file_name_ext_valid = true;
- snprintf(fwrt->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
- "0x%x", fwrt->dump.tcm_err_id[idx]);
- }
-
IWL_ERR(fwrt, "TCM%d status:\n", idx + 1);
IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
IWL_ERR(fwrt, "0x%08X | tcm branchlink2\n", table.blink2);
@@ -378,16 +336,6 @@ static void iwl_fwrt_dump_rcm_error_log(struct iwl_fw_runtime *fwrt, int idx)
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
- if (table.valid)
- fwrt->dump.rcm_err_id[idx] = table.error_id;
-
- if (!iwl_fwrt_if_errorid_other_cpu(fwrt->dump.rcm_err_id[idx]) &&
- !fwrt->trans->dbg.dump_file_name_ext_valid) {
- fwrt->trans->dbg.dump_file_name_ext_valid = true;
- snprintf(fwrt->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
- "0x%x", fwrt->dump.rcm_err_id[idx]);
- }
-
IWL_ERR(fwrt, "RCM%d status:\n", idx + 1);
IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
IWL_ERR(fwrt, "0x%08X | rcm branchlink2\n", table.blink2);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index cf41021d59ad..c2a73cc85eff 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -372,7 +372,8 @@ struct iwl_fw_ini_dump_cfg_name {
u8 cfg_name[IWL_FW_INI_MAX_CFG_NAME];
} __packed;
-#define IWL_JACKET_CDB_SHIFT 12
+#define IWL_CDB_MASK(val) val << 13
+
/* struct iwl_fw_ini_dump_info - ini dump information
* @version: dump version
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 4d91ae065c8d..f297e82d63d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -237,11 +237,12 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
return -ENOENT;
}
-static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
+static u8 *iwl_pnvm_get_from_fs(struct iwl_trans *trans, size_t *len)
{
const struct firmware *pnvm;
char pnvm_name[MAX_PNVM_NAME];
size_t new_len;
+ u8 *data;
int ret;
iwl_pnvm_get_fs_name(trans, pnvm_name, sizeof(pnvm_name));
@@ -250,29 +251,73 @@ static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
if (ret) {
IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
pnvm_name, ret);
- return ret;
+ return NULL;
}
new_len = pnvm->size;
- *data = kvmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
+ data = kvmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
release_firmware(pnvm);
- if (!*data)
- return -ENOMEM;
+ if (!data)
+ return NULL;
*len = new_len;
- return 0;
+ return data;
+}
+
+/**
+ * enum iwl_pnvm_source - different PNVM possible sources
+ *
+ * @IWL_PNVM_SOURCE_NONE: No PNVM.
+ * @IWL_PNVM_SOURCE_BIOS: PNVM should be read from BIOS.
+ * @IWL_PNVM_SOURCE_EXTERNAL: read .pnvm external file
+ * @IWL_PNVM_SOURCE_EMBEDDED: PNVM is embedded in the .ucode file.
+ */
+enum iwl_pnvm_source {
+ IWL_PNVM_SOURCE_NONE,
+ IWL_PNVM_SOURCE_BIOS,
+ IWL_PNVM_SOURCE_EXTERNAL,
+ IWL_PNVM_SOURCE_EMBEDDED
+};
+
+static enum iwl_pnvm_source iwl_select_pnvm_source(struct iwl_trans *trans,
+ bool intel_sku)
+{
+
+ /* Get PNVM from BIOS for non-Intel SKU */
+ if (!intel_sku)
+ return IWL_PNVM_SOURCE_BIOS;
+
+ /* Before those devices, PNVM didn't exist at all */
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ return IWL_PNVM_SOURCE_NONE;
+
+ /* After those devices, we moved to embedded PNVM */
+ if (trans->mac_cfg->device_family > IWL_DEVICE_FAMILY_AX210)
+ return IWL_PNVM_SOURCE_EMBEDDED;
+
+ /* For IWL_DEVICE_FAMILY_AX210, depends on the CRF */
+ if (CSR_HW_RFID_TYPE(trans->info.hw_rf_id) == IWL_CFG_RF_TYPE_GF)
+ return IWL_PNVM_SOURCE_EXTERNAL;
+
+ return IWL_PNVM_SOURCE_NONE;
}
static const u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len,
__le32 sku_id[3], const struct iwl_fw *fw)
{
struct pnvm_sku_package *package;
+ enum iwl_pnvm_source pnvm_src =
+ iwl_select_pnvm_source(trans_p, sku_id[2] == 0);
u8 *image = NULL;
- /* Get PNVM from BIOS for non-Intel SKU */
- if (sku_id[2]) {
+ IWL_DEBUG_FW(trans_p, "PNVM source %d\n", pnvm_src);
+
+ if (pnvm_src == IWL_PNVM_SOURCE_NONE)
+ return NULL;
+
+ if (pnvm_src == IWL_PNVM_SOURCE_BIOS) {
package = iwl_uefi_get_pnvm(trans_p, len);
if (!IS_ERR_OR_NULL(package)) {
if (*len >= sizeof(*package)) {
@@ -289,18 +334,26 @@ static const u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len,
if (image)
return image;
}
+
+ /* PNVM doesn't exist in BIOS. Find the fallback source */
+ pnvm_src = iwl_select_pnvm_source(trans_p, true);
+ IWL_DEBUG_FW(trans_p, "PNVM in BIOS doesn't exist, try %d\n",
+ pnvm_src);
}
- if (fw->pnvm_data) {
- *len = fw->pnvm_size;
+ if (pnvm_src == IWL_PNVM_SOURCE_EXTERNAL) {
+ image = iwl_pnvm_get_from_fs(trans_p, len);
+ if (image)
+ return image;
+ }
+ if (pnvm_src == IWL_PNVM_SOURCE_EMBEDDED && fw->pnvm_data) {
+ *len = fw->pnvm_size;
return fw->pnvm_data;
}
- /* If it's not available, or for Intel SKU, try from the filesystem */
- if (iwl_pnvm_get_from_fs(trans_p, &image, len))
- return NULL;
- return image;
+ IWL_ERR(trans_p, "Couldn't get PNVM from required source: %d\n", pnvm_src);
+ return NULL;
}
static void
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
index 3d6d1a85bb51..e1f28b053253 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -59,11 +59,16 @@ static const struct dmi_system_id dmi_ppag_approved_list[] = {
DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
},
},
- { .ident = "ASUS",
+ { .ident = "ASUSTEK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
},
},
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUS"),
+ },
+ },
{ .ident = "GOOGLE-HP",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Google"),
@@ -141,11 +146,16 @@ static const struct dmi_system_id dmi_tas_approved_list[] = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
},
},
- { .ident = "ASUS",
+ { .ident = "ASUSTEK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
},
},
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUS"),
+ },
+ },
{ .ident = "GOOGLE-HP",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Google"),
@@ -305,6 +315,7 @@ static bool iwl_ppag_value_valid(struct iwl_fw_runtime *fwrt, int chain,
return true;
}
+/* Utility function for iwlmvm and iwlxvt */
int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
union iwl_ppag_table_cmd *cmd, int *cmd_size)
{
@@ -344,18 +355,18 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
num_sub_bands = IWL_NUM_SUB_BANDS_V1;
gain = cmd->v1.gain[0];
*cmd_size = sizeof(cmd->v1);
- cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
+ cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags & IWL_PPAG_CMD_V1_MASK);
if (fwrt->ppag_bios_rev >= 1) {
/* in this case FW supports revision 0 */
IWL_DEBUG_RADIO(fwrt,
"PPAG table rev is %d, send truncated table\n",
fwrt->ppag_bios_rev);
}
- } else if (cmd_ver >= 2 && cmd_ver <= 6) {
+ } else if (cmd_ver == 5) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
- gain = cmd->v2.gain[0];
- *cmd_size = sizeof(cmd->v2);
- cmd->v2.flags = cpu_to_le32(fwrt->ppag_flags);
+ gain = cmd->v5.gain[0];
+ *cmd_size = sizeof(cmd->v5);
+ cmd->v5.flags = cpu_to_le32(fwrt->ppag_flags & IWL_PPAG_CMD_V5_MASK);
if (fwrt->ppag_bios_rev == 0) {
/* in this case FW supports revisions 1,2 or 3 */
IWL_DEBUG_RADIO(fwrt,
@@ -363,11 +374,11 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
}
} else if (cmd_ver == 7) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
- gain = cmd->v3.gain[0];
- *cmd_size = sizeof(cmd->v3);
- cmd->v3.ppag_config_info.table_source = fwrt->ppag_bios_source;
- cmd->v3.ppag_config_info.table_revision = fwrt->ppag_bios_rev;
- cmd->v3.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags);
+ gain = cmd->v7.gain[0];
+ *cmd_size = sizeof(cmd->v7);
+ cmd->v7.ppag_config_info.table_source = fwrt->ppag_bios_source;
+ cmd->v7.ppag_config_info.table_revision = fwrt->ppag_bios_rev;
+ cmd->v7.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags);
} else {
IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
return -EINVAL;
@@ -378,30 +389,22 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
"PPAG MODE bits were read from bios: %d\n",
fwrt->ppag_flags);
- if (cmd_ver == 6)
- cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V6_MASK);
- else if (cmd_ver == 5)
- cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V5_MASK);
- else if (cmd_ver < 5)
- cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V4_MASK);
-
- if ((cmd_ver == 1 &&
- !fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) ||
- (cmd_ver == 2 && fwrt->ppag_bios_rev >= 2)) {
+ if (cmd_ver == 1 &&
+ !fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) {
cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n");
} else {
IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n");
}
- /* The 'flags' field is the same in v1 and v2 so we can just
+ /* The 'flags' field is the same in v1 and v5 so we can just
* use v1 to access it.
*/
IWL_DEBUG_RADIO(fwrt,
"PPAG MODE bits going to be sent: %d\n",
(cmd_ver < 7) ? le32_to_cpu(cmd->v1.flags) :
- le32_to_cpu(cmd->v3.ppag_config_info.value));
+ le32_to_cpu(cmd->v7.ppag_config_info.value));
for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
for (j = 0; j < num_sub_bands; j++) {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
index a07c512b6ed4..735482e7adf5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
@@ -12,7 +12,6 @@
#include "fw/api/phy.h"
#include "fw/api/config.h"
#include "fw/api/nvm-reg.h"
-#include "fw/img.h"
#include "iwl-trans.h"
#define BIOS_SAR_MAX_PROFILE_NUM 4
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index bd3bc2846cfa..806f9bcdf4f5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -150,8 +150,6 @@ struct iwl_fw_runtime {
unsigned long non_collect_ts_start[IWL_FW_INI_TIME_POINT_NUM];
u32 *d3_debug_data;
u32 lmac_err_id[MAX_NUM_LMAC];
- u32 tcm_err_id[MAX_NUM_TCM];
- u32 rcm_err_id[MAX_NUM_RCM];
u32 umac_err_id;
struct iwl_txf_iter_data txf_iter_data;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index 99a17b9323e9..4ae4d215e633 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -727,6 +727,8 @@ int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
struct uefi_cnv_var_general_cfg *data;
int ret = -EINVAL;
+ BUILD_BUG_ON(ARRAY_SIZE(data->functions) < DSM_FUNC_NUM_FUNCS);
+
/* Not supported function index */
if (func >= DSM_FUNC_NUM_FUNCS || func == 5)
return -EOPNOTSUPP;
@@ -742,11 +744,6 @@ int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
goto out;
}
- if (ARRAY_SIZE(data->functions) != UEFI_MAX_DSM_FUNCS) {
- IWL_DEBUG_RADIO(fwrt, "Invalid size of DSM functions array\n");
- goto out;
- }
-
if (!(data->functions[DSM_FUNC_QUERY] & BIT(func))) {
IWL_DEBUG_RADIO(fwrt, "DSM func %d not in 0x%x\n",
func, data->functions[DSM_FUNC_QUERY]);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 30e5f5a5cd89..a607e7ab914b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include <linux/ieee80211.h>
#include <linux/nl80211.h>
+#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include "iwl-csr.h"
#include "iwl-drv.h"
@@ -107,6 +108,9 @@ enum iwl_nvm_type {
MODULE_FIRMWARE(pfx "-" __stringify(api) ".ucode"); \
MODULE_FIRMWARE(pfx ".pnvm")
+#define IWL_CORE_FW(pfx, core) \
+ MODULE_FIRMWARE(pfx "-c" __stringify(core) ".ucode")
+
static inline u8 num_of_ant(u8 mask)
{
return !!((mask) & ANT_A) +
@@ -192,8 +196,8 @@ struct iwl_family_base_params {
u8 max_ll_items;
u8 led_compensation;
- u8 ucode_api_max;
- u8 ucode_api_min;
+ u16 ucode_api_max;
+ u16 ucode_api_min;
u32 mac_addr_from_csr:10;
u8 nvm_hw_section_num;
netdev_features_t features;
@@ -211,6 +215,34 @@ struct iwl_family_base_params {
};
/*
+ * FW is released as "core N release", and we used to have a
+ * gap of 3 between the API version and core number. Now the
+ * reported API version will be 1000 + core and we encode it
+ * in the filename as "c<core>".
+ */
+#define API_IS_CORE_START 1000
+#define API_TO_CORE_OFFS 3
+#define ENCODE_CORE_AS_API(core) (API_IS_CORE_START + (core))
+
+static inline bool iwl_api_is_core_number(int api)
+{
+ return api >= API_IS_CORE_START;
+}
+
+static inline int iwl_api_to_core(int api)
+{
+ if (iwl_api_is_core_number(api))
+ return api - API_IS_CORE_START;
+
+ return api - API_TO_CORE_OFFS;
+}
+
+#define FW_API_FMT "%s%d"
+#define FW_API_ARG(n) \
+ iwl_api_is_core_number(n) ? "c" : "", \
+ iwl_api_is_core_number(n) ? (n) - API_IS_CORE_START : (n)
+
+/*
* @stbc: support Tx STBC and 1*SS Rx STBC
* @ldpc: support Tx/Rx with LDPC
* @use_rts_for_aggregation: use rts/cts protection for HT traffic
@@ -422,8 +454,8 @@ struct iwl_rf_cfg {
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 non_shared_ant;
- u8 ucode_api_max;
- u8 ucode_api_min;
+ u16 ucode_api_max;
+ u16 ucode_api_min;
u16 num_rbds;
};
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 28aad975434b..607fcea6f4ef 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -337,10 +337,18 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
return -EINVAL;
}
+ if (CSR_HW_RFID_TYPE(drv->trans->info.hw_rf_id) == IWL_CFG_RF_TYPE_WH &&
+ CSR_HW_RFID_STEP(drv->trans->info.hw_rf_id) == SILICON_A_STEP) {
+ IWL_ERR(drv, "WH A step is not supported\n");
+ return -EINVAL;
+ }
+
fw_name_pre = iwl_drv_get_fwname_pre(drv->trans, _fw_name_pre);
if (first)
drv->fw_index = ucode_api_max;
+ else if (drv->fw_index == ENCODE_CORE_AS_API(99))
+ drv->fw_index = 101; /* last API-scheme number below core 99 */
else
drv->fw_index--;
@@ -348,13 +356,15 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
IWL_ERR(drv, "no suitable firmware found!\n");
if (ucode_api_min == ucode_api_max) {
- IWL_ERR(drv, "%s-%d is required\n", fw_name_pre,
- ucode_api_max);
+ IWL_ERR(drv, "%s-" FW_API_FMT " is required\n",
+ fw_name_pre, FW_API_ARG(ucode_api_max));
} else {
- IWL_ERR(drv, "minimum version required: %s-%d\n",
- fw_name_pre, ucode_api_min);
- IWL_ERR(drv, "maximum version supported: %s-%d\n",
- fw_name_pre, ucode_api_max);
+ IWL_ERR(drv,
+ "minimum version required: %s-" FW_API_FMT "\n",
+ fw_name_pre, FW_API_ARG(ucode_api_min));
+ IWL_ERR(drv,
+ "maximum version supported: %s-" FW_API_FMT "\n",
+ fw_name_pre, FW_API_ARG(ucode_api_max));
}
IWL_ERR(drv,
@@ -362,8 +372,9 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
return -ENOENT;
}
- snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s-%d.ucode",
- fw_name_pre, drv->fw_index);
+ snprintf(drv->firmware_name, sizeof(drv->firmware_name),
+ "%s-" FW_API_FMT ".ucode",
+ fw_name_pre, FW_API_ARG(drv->fw_index));
IWL_DEBUG_FW_INFO(drv, "attempting to load firmware '%s'\n",
drv->firmware_name);
@@ -1588,6 +1599,7 @@ static void _iwl_op_mode_stop(struct iwl_drv *drv)
*/
static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
{
+ unsigned int min_core, max_core, loaded_core;
struct iwl_drv *drv = context;
struct iwl_fw *fw = &drv->fw;
const struct iwl_ucode_header *ucode;
@@ -1650,11 +1662,24 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* firmware filename ... but we don't check for that and only rely
* on the API version read from firmware header from here on forward
*/
- if (api_ver < api_min || api_ver > api_max) {
+
+ /*
+ * if -cN.ucode file was loaded, core version == file version,
+ * otherwise core version == file version (API version) - 3
+ */
+ if (iwl_api_is_core_number(drv->fw_index))
+ loaded_core = api_ver;
+ else
+ loaded_core = api_ver - API_TO_CORE_OFFS;
+
+ min_core = iwl_api_to_core(api_min);
+ max_core = iwl_api_to_core(api_max);
+
+ if (loaded_core < min_core || loaded_core > max_core) {
IWL_ERR(drv,
"Driver unable to support your firmware API. "
- "Driver supports v%u, firmware is v%u.\n",
- api_max, api_ver);
+ "Driver supports FW core %u..%u, firmware is %u.\n",
+ min_core, max_core, loaded_core);
goto try_again;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 5e483a55a4ba..b1944584c693 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -3,7 +3,6 @@
* Copyright (C) 2003-2014, 2018-2022, 2024-2025 Intel Corporation
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
-#include <linux/delay.h>
#include <linux/device.h>
#include <linux/export.h>
@@ -13,6 +12,7 @@
#include "iwl-debug.h"
#include "iwl-prph.h"
#include "iwl-fh.h"
+#include "pcie/gen1_2/internal.h"
void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
@@ -160,7 +160,7 @@ int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
do {
if ((iwl_read_prph(trans, addr) & mask) == (bits & mask))
- return t;
+ return 0;
udelay(IWL_POLL_INTERVAL);
t += IWL_POLL_INTERVAL;
} while (t < timeout);
@@ -396,96 +396,11 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf)
return 0;
}
-#define IWL_HOST_MON_BLOCK_PEMON 0x00
-#define IWL_HOST_MON_BLOCK_HIPM 0x22
-
-#define IWL_HOST_MON_BLOCK_PEMON_VEC0 0x00
-#define IWL_HOST_MON_BLOCK_PEMON_VEC1 0x01
-#define IWL_HOST_MON_BLOCK_PEMON_WFPM 0x06
-
-static void iwl_dump_host_monitor_block(struct iwl_trans *trans,
- u32 block, u32 vec, u32 iter)
-{
- int i;
-
- IWL_ERR(trans, "Host monitor block 0x%x vector 0x%x\n", block, vec);
- iwl_write32(trans, CSR_MONITOR_CFG_REG, (block << 8) | vec);
- for (i = 0; i < iter; i++)
- IWL_ERR(trans, " value [iter %d]: 0x%08x\n",
- i, iwl_read32(trans, CSR_MONITOR_STATUS_REG));
-}
-
-static void iwl_dump_host_monitor(struct iwl_trans *trans)
-{
- switch (trans->mac_cfg->device_family) {
- case IWL_DEVICE_FAMILY_22000:
- case IWL_DEVICE_FAMILY_AX210:
- IWL_ERR(trans, "CSR_RESET = 0x%x\n",
- iwl_read32(trans, CSR_RESET));
- iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
- IWL_HOST_MON_BLOCK_PEMON_VEC0, 15);
- iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
- IWL_HOST_MON_BLOCK_PEMON_VEC1, 15);
- iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
- IWL_HOST_MON_BLOCK_PEMON_WFPM, 15);
- iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_HIPM,
- IWL_HOST_MON_BLOCK_PEMON_VEC0, 1);
- break;
- default:
- /* not supported yet */
- return;
- }
-}
-
-int iwl_finish_nic_init(struct iwl_trans *trans)
+int iwl_trans_activate_nic(struct iwl_trans *trans)
{
- const struct iwl_mac_cfg *mac_cfg = trans->mac_cfg;
- u32 poll_ready;
- int err;
-
- if (mac_cfg->bisr_workaround) {
- /* ensure the TOP FSM isn't still in previous reset */
- mdelay(2);
- }
-
- /*
- * Set "initialization complete" bit to move adapter from
- * D0U* --> D0A* (powered-up active) state.
- */
- if (mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ |
- CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
- poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
- } else {
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
- poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY;
- }
-
- if (mac_cfg->device_family == IWL_DEVICE_FAMILY_8000)
- udelay(2);
-
- /*
- * Wait for clock stabilization; once stabilized, access to
- * device-internal resources is supported, e.g. iwl_write_prph()
- * and accesses to uCode SRAM.
- */
- err = iwl_poll_bits(trans, CSR_GP_CNTRL, poll_ready, 25000);
- if (err < 0) {
- IWL_DEBUG_INFO(trans, "Failed to wake NIC\n");
-
- iwl_dump_host_monitor(trans);
- }
-
- if (mac_cfg->bisr_workaround) {
- /* ensure BISR shift has finished */
- udelay(200);
- }
-
- return err < 0 ? err : 0;
+ return iwl_pcie_gen1_2_activate_nic(trans);
}
-IWL_EXPORT_SYMBOL(iwl_finish_nic_init);
+IWL_EXPORT_SYMBOL(iwl_trans_activate_nic);
void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
u32 sw_err_bit)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
index 731cda1a4e66..5bcec239ffc4 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
@@ -57,7 +57,7 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
void iwl_force_nmi(struct iwl_trans *trans);
-int iwl_finish_nic_init(struct iwl_trans *trans);
+int iwl_trans_activate_nic(struct iwl_trans *trans);
/* Error handling */
int iwl_dump_fh(struct iwl_trans *trans, char **buf);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index a67b9572aac3..23465e4c4b39 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -140,50 +140,6 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = {
#define N_RATES_52 (N_RATES_24 - RATES_52_OFFS)
/**
- * enum iwl_nvm_channel_flags - channel flags in NVM
- * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo
- * @NVM_CHANNEL_IBSS: usable as an IBSS channel and deprecated
- * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
- * @NVM_CHANNEL_ALLOW_20MHZ_ACTIVITY: active scanning allowed and
- * AP allowed only in 20 MHz. Valid only
- * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
- * @NVM_CHANNEL_ACTIVE: active scanning allowed and allows IBSS
- * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
- * @NVM_CHANNEL_RADAR: radar detection required
- * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed
- * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS
- * on same channel on 2.4 or same UNII band on 5.2
- * @NVM_CHANNEL_UNIFORM: uniform spreading required
- * @NVM_CHANNEL_20MHZ: 20 MHz channel okay
- * @NVM_CHANNEL_40MHZ: 40 MHz channel okay
- * @NVM_CHANNEL_80MHZ: 80 MHz channel okay
- * @NVM_CHANNEL_160MHZ: 160 MHz channel okay
- * @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?)
- * @NVM_CHANNEL_VLP: client support connection to UHB VLP AP
- * @NVM_CHANNEL_AFC: client support connection to UHB AFC AP
- * @NVM_CHANNEL_VLP_AP_NOT_ALLOWED: UHB VLP AP not allowed,
- * Valid only when %NVM_CHANNEL_VLP is enabled.
- */
-enum iwl_nvm_channel_flags {
- NVM_CHANNEL_VALID = BIT(0),
- NVM_CHANNEL_IBSS = BIT(1),
- NVM_CHANNEL_ALLOW_20MHZ_ACTIVITY = BIT(2),
- NVM_CHANNEL_ACTIVE = BIT(3),
- NVM_CHANNEL_RADAR = BIT(4),
- NVM_CHANNEL_INDOOR_ONLY = BIT(5),
- NVM_CHANNEL_GO_CONCURRENT = BIT(6),
- NVM_CHANNEL_UNIFORM = BIT(7),
- NVM_CHANNEL_20MHZ = BIT(8),
- NVM_CHANNEL_40MHZ = BIT(9),
- NVM_CHANNEL_80MHZ = BIT(10),
- NVM_CHANNEL_160MHZ = BIT(11),
- NVM_CHANNEL_DC_HIGH = BIT(12),
- NVM_CHANNEL_VLP = BIT(13),
- NVM_CHANNEL_AFC = BIT(14),
- NVM_CHANNEL_VLP_AP_NOT_ALLOWED = BIT(15),
-};
-
-/**
* enum iwl_reg_capa_flags_v1 - global flags applied for the whole regulatory
* domain.
* @REG_CAPA_V1_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the
@@ -282,30 +238,6 @@ enum iwl_reg_capa_flags_v4 {
*/
#define REG_CAPA_V4_RESP_VER 8
-/**
- * struct iwl_reg_capa - struct for global regulatory capabilities, Used for
- * handling the different APIs of reg_capa_flags.
- *
- * @allow_40mhz: 11n channel with a width of 40Mhz is allowed
- * for this regulatory domain.
- * @allow_80mhz: 11ac channel with a width of 80Mhz is allowed
- * for this regulatory domain (valid only in 5 and 6 Ghz).
- * @allow_160mhz: 11ac channel with a width of 160Mhz is allowed
- * for this regulatory domain (valid only in 5 and 6 Ghz).
- * @allow_320mhz: 11be channel with a width of 320Mhz is allowed
- * for this regulatory domain (valid only in 6 Ghz).
- * @disable_11ax: 11ax is forbidden for this regulatory domain.
- * @disable_11be: 11be is forbidden for this regulatory domain.
- */
-struct iwl_reg_capa {
- bool allow_40mhz;
- bool allow_80mhz;
- bool allow_160mhz;
- bool allow_320mhz;
- bool disable_11ax;
- bool disable_11be;
-};
-
static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
int chan, u32 flags)
{
@@ -1042,10 +974,6 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->he_cap.he_cap_elem.phy_cap_info[2] |=
IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO;
- if (fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_BROADCAST_TWT))
- iftype_data->he_cap.he_cap_elem.mac_cap_info[2] |=
- IEEE80211_HE_MAC_CAP2_BCAST_TWT;
-
if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
!is_ap) {
iftype_data->vendor_elems.data = iwl_vendor_caps;
@@ -1600,9 +1528,10 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
}
IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
-static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
- int ch_idx, u16 nvm_flags,
- struct iwl_reg_capa reg_capa)
+VISIBLE_IF_IWLWIFI_KUNIT
+u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
+ int ch_idx, u16 nvm_flags,
+ struct iwl_reg_capa reg_capa)
{
u32 flags = NL80211_RRF_NO_HT40;
@@ -1692,6 +1621,7 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
return flags;
}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_nvm_get_regdom_bw_flags);
static struct iwl_reg_capa iwl_get_reg_capa(u32 flags, u8 resp_ver)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index 9ce9fa4e78fd..cbc92abf9f87 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -21,6 +21,80 @@ enum iwl_nvm_sbands_flags {
IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ = BIT(1),
};
+/**
+ * struct iwl_reg_capa - struct for global regulatory capabilities, Used for
+ * handling the different APIs of reg_capa_flags.
+ *
+ * @allow_40mhz: 11n channel with a width of 40Mhz is allowed
+ * for this regulatory domain.
+ * @allow_80mhz: 11ac channel with a width of 80Mhz is allowed
+ * for this regulatory domain (valid only in 5 and 6 Ghz).
+ * @allow_160mhz: 11ac channel with a width of 160Mhz is allowed
+ * for this regulatory domain (valid only in 5 and 6 Ghz).
+ * @allow_320mhz: 11be channel with a width of 320Mhz is allowed
+ * for this regulatory domain (valid only in 6 Ghz).
+ * @disable_11ax: 11ax is forbidden for this regulatory domain.
+ * @disable_11be: 11be is forbidden for this regulatory domain.
+ */
+struct iwl_reg_capa {
+ bool allow_40mhz;
+ bool allow_80mhz;
+ bool allow_160mhz;
+ bool allow_320mhz;
+ bool disable_11ax;
+ bool disable_11be;
+};
+
+/**
+ * enum iwl_nvm_channel_flags - channel flags in NVM
+ * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo
+ * @NVM_CHANNEL_IBSS: usable as an IBSS channel and deprecated
+ * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
+ * @NVM_CHANNEL_ALLOW_20MHZ_ACTIVITY: active scanning allowed and
+ * AP allowed only in 20 MHz. Valid only
+ * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
+ * @NVM_CHANNEL_ACTIVE: active scanning allowed and allows IBSS
+ * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
+ * @NVM_CHANNEL_RADAR: radar detection required
+ * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed
+ * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS
+ * on same channel on 2.4 or same UNII band on 5.2
+ * @NVM_CHANNEL_UNIFORM: uniform spreading required
+ * @NVM_CHANNEL_20MHZ: 20 MHz channel okay
+ * @NVM_CHANNEL_40MHZ: 40 MHz channel okay
+ * @NVM_CHANNEL_80MHZ: 80 MHz channel okay
+ * @NVM_CHANNEL_160MHZ: 160 MHz channel okay
+ * @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?)
+ * @NVM_CHANNEL_VLP: client support connection to UHB VLP AP
+ * @NVM_CHANNEL_AFC: client support connection to UHB AFC AP
+ * @NVM_CHANNEL_VLP_AP_NOT_ALLOWED: UHB VLP AP not allowed,
+ * Valid only when %NVM_CHANNEL_VLP is enabled.
+ */
+enum iwl_nvm_channel_flags {
+ NVM_CHANNEL_VALID = BIT(0),
+ NVM_CHANNEL_IBSS = BIT(1),
+ NVM_CHANNEL_ALLOW_20MHZ_ACTIVITY = BIT(2),
+ NVM_CHANNEL_ACTIVE = BIT(3),
+ NVM_CHANNEL_RADAR = BIT(4),
+ NVM_CHANNEL_INDOOR_ONLY = BIT(5),
+ NVM_CHANNEL_GO_CONCURRENT = BIT(6),
+ NVM_CHANNEL_UNIFORM = BIT(7),
+ NVM_CHANNEL_20MHZ = BIT(8),
+ NVM_CHANNEL_40MHZ = BIT(9),
+ NVM_CHANNEL_80MHZ = BIT(10),
+ NVM_CHANNEL_160MHZ = BIT(11),
+ NVM_CHANNEL_DC_HIGH = BIT(12),
+ NVM_CHANNEL_VLP = BIT(13),
+ NVM_CHANNEL_AFC = BIT(14),
+ NVM_CHANNEL_VLP_AP_NOT_ALLOWED = BIT(15),
+};
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
+ int ch_idx, u16 nvm_flags,
+ struct iwl_reg_capa reg_capa);
+#endif
+
/*
* iwl_parse_nvm_data - parse NVM data and return values
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 3694b41d6621..5232f66c2d52 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -268,9 +268,7 @@ static void iwl_trans_restart_wk(struct work_struct *wk)
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_mac_cfg *mac_cfg,
- unsigned int txcmd_size,
- unsigned int txcmd_align)
+ const struct iwl_mac_cfg *mac_cfg)
{
struct iwl_trans *trans;
#ifdef CONFIG_LOCKDEP
@@ -292,22 +290,12 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
INIT_DELAYED_WORK(&trans->restart.wk, iwl_trans_restart_wk);
- snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
- "iwl_cmd_pool:%s", dev_name(trans->dev));
- trans->dev_cmd_pool =
- kmem_cache_create(trans->dev_cmd_pool_name,
- txcmd_size, txcmd_align,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!trans->dev_cmd_pool)
- return NULL;
-
return trans;
}
void iwl_trans_free(struct iwl_trans *trans)
{
cancel_delayed_work_sync(&trans->restart.wk);
- kmem_cache_destroy(trans->dev_cmd_pool);
}
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
@@ -318,9 +306,6 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
test_bit(STATUS_RFKILL_OPMODE, &trans->status)))
return -ERFKILL;
- if (unlikely(test_bit(STATUS_SUSPENDED, &trans->status)))
- return -EHOSTDOWN;
-
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
return -EIO;
@@ -348,6 +333,19 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
}
IWL_EXPORT_SYMBOL(iwl_trans_send_cmd);
+struct iwl_device_tx_cmd *iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
+{
+ return iwl_pcie_gen1_2_alloc_tx_cmd(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_alloc_tx_cmd);
+
+void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
+ struct iwl_device_tx_cmd *dev_cmd)
+{
+ iwl_pcie_gen1_2_free_tx_cmd(trans, dev_cmd);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_free_tx_cmd);
+
/* Comparator for struct iwl_hcmd_names.
* Used in the binary search over a list of host commands.
*
@@ -399,7 +397,7 @@ void iwl_trans_op_mode_enter(struct iwl_trans *trans,
WARN_ON_ONCE(!trans->conf.rx_mpdu_cmd);
- iwl_trans_pcie_op_mode_enter(trans);
+ iwl_pcie_gen1_2_op_mode_enter(trans);
}
IWL_EXPORT_SYMBOL(iwl_trans_op_mode_enter);
@@ -408,8 +406,6 @@ int iwl_trans_start_hw(struct iwl_trans *trans)
might_sleep();
clear_bit(STATUS_TRANS_RESET_IN_PROGRESS, &trans->status);
- /* opmode may not resume if it detects errors */
- clear_bit(STATUS_SUSPENDED, &trans->status);
return iwl_trans_pcie_start_hw(trans);
}
@@ -507,33 +503,19 @@ iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
sanitize_ops, sanitize_ctx);
}
-int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
+int iwl_trans_d3_suspend(struct iwl_trans *trans, bool reset)
{
- int err;
-
might_sleep();
- err = iwl_trans_pcie_d3_suspend(trans, test, reset);
-
- if (!err)
- set_bit(STATUS_SUSPENDED, &trans->status);
-
- return err;
+ return iwl_trans_pcie_d3_suspend(trans, reset);
}
IWL_EXPORT_SYMBOL(iwl_trans_d3_suspend);
-int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,
- bool test, bool reset)
+int iwl_trans_d3_resume(struct iwl_trans *trans, bool reset)
{
- int err;
-
might_sleep();
- err = iwl_trans_pcie_d3_resume(trans, status, test, reset);
-
- clear_bit(STATUS_SUSPENDED, &trans->status);
-
- return err;
+ return iwl_trans_pcie_d3_resume(trans, reset);
}
IWL_EXPORT_SYMBOL(iwl_trans_d3_resume);
@@ -825,3 +807,18 @@ void iwl_trans_set_reduce_power(struct iwl_trans *trans,
{
iwl_trans_pcie_ctx_info_v2_set_reduce_power(trans, capa);
}
+
+bool iwl_trans_is_pm_supported(struct iwl_trans *trans)
+{
+ if (WARN_ON(trans->mac_cfg->gen2))
+ return false;
+
+ return iwl_pcie_gen1_is_pm_supported(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_is_pm_supported);
+
+bool iwl_trans_is_ltr_enabled(struct iwl_trans *trans)
+{
+ return iwl_pcie_gen1_2_is_ltr_enabled(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_is_ltr_enabled);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index d0e658801c2e..a0cc5d7745e8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -275,16 +275,6 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
#define IWL_9000_MAX_RX_HW_QUEUES 1
/**
- * enum iwl_d3_status - WoWLAN image/device status
- * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
- * @IWL_D3_STATUS_RESET: device was reset while suspended
- */
-enum iwl_d3_status {
- IWL_D3_STATUS_ALIVE,
- IWL_D3_STATUS_RESET,
-};
-
-/**
* enum iwl_trans_status: transport status flags
* @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
* @STATUS_DEVICE_ENABLED: APM is enabled
@@ -294,16 +284,12 @@ enum iwl_d3_status {
* @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
* @STATUS_FW_ERROR: the fw is in error state
* @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
- * @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once,
- * e.g. for testing
* @STATUS_IN_SW_RESET: device is undergoing reset, cleared by opmode
* via iwl_trans_finish_sw_reset()
* @STATUS_RESET_PENDING: reset worker was scheduled, but didn't dump
* the firmware state yet
* @STATUS_TRANS_RESET_IN_PROGRESS: reset is still in progress, don't
* attempt another reset yet
- * @STATUS_SUSPENDED: device is suspended, don't send commands that
- * aren't marked accordingly
*/
enum iwl_trans_status {
STATUS_SYNC_HCMD_ACTIVE,
@@ -314,11 +300,9 @@ enum iwl_trans_status {
STATUS_RFKILL_OPMODE,
STATUS_FW_ERROR,
STATUS_TRANS_DEAD,
- STATUS_SUPPRESS_CMD_ERROR_ONCE,
STATUS_IN_SW_RESET,
STATUS_RESET_PENDING,
STATUS_TRANS_RESET_IN_PROGRESS,
- STATUS_SUSPENDED,
};
static inline int
@@ -658,8 +642,6 @@ struct iwl_pc_data {
* @restart_required: indicates debug restart is required
* @last_tp_resetfw: last handling of reset during debug timepoint
* @imr_data: IMR debug data allocation
- * @dump_file_name_ext: dump file name extension
- * @dump_file_name_ext_valid: dump file name extension if valid or not
* @num_pc: number of program counter for cpu
* @pc_data: details of the program counter
* @yoyo_bin_loaded: tells if a yoyo debug file has been loaded
@@ -698,8 +680,6 @@ struct iwl_trans_debug {
bool restart_required;
u32 last_tp_resetfw;
struct iwl_imr_data imr_data;
- u8 dump_file_name_ext[IWL_FW_INI_MAX_NAME];
- bool dump_file_name_ext_valid;
u32 num_pc;
struct iwl_pc_data *pc_data;
bool yoyo_bin_loaded;
@@ -830,7 +810,6 @@ struct iwl_txq {
* @hw_rf_id: the device RF ID
* @hw_cnv_id: the device CNV ID
* @hw_crf_id: the device CRF ID
- * @hw_wfpm_id: the device wfpm ID
* @hw_id: the ID of the device / sub-device
* Bits 0:15 represent the sub-device ID
* Bits 16:31 represent the device ID.
@@ -846,7 +825,6 @@ struct iwl_trans_info {
u32 hw_rf_id;
u32 hw_crf_id;
u32 hw_cnv_id;
- u32 hw_wfpm_id;
u32 hw_id;
u8 pcie_link_speed;
u8 num_rxqs;
@@ -866,14 +844,11 @@ struct iwl_trans_info {
* @dev: pointer to struct device * that represents the device
* @info: device information for use by other layers
* @pnvm_loaded: indicates PNVM was loaded
- * @pm_support: set to true in start_hw if link pm is supported
- * @ltr_enabled: set to true if the LTR is enabled
+ * @suppress_cmd_error_once: suppress "FW error in SYNC CMD" once,
+ * e.g. for testing
* @fail_to_parse_pnvm_image: set to true if pnvm parsing failed
* @reduce_power_loaded: indicates reduced power section was loaded
* @failed_to_load_reduce_power_image: set to true if pnvm loading failed
- * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
- * The user should use iwl_trans_{alloc,free}_tx_cmd.
- * @dev_cmd_pool_name: name for the TX command allocation pool
* @dbgfs_dir: iwlwifi debugfs base dir for this device
* @sync_cmd_lockdep_map: lockdep map for checking sync commands
* @dbg: additional debug data, see &struct iwl_trans_debug
@@ -905,18 +880,13 @@ struct iwl_trans {
const struct iwl_trans_info info;
bool reduced_cap_sku;
bool step_urm;
+ bool suppress_cmd_error_once;
- bool pm_support;
- bool ltr_enabled;
u8 pnvm_loaded:1;
u8 fail_to_parse_pnvm_image:1;
u8 reduce_power_loaded:1;
u8 failed_to_load_reduce_power_image:1;
- /* The following fields are internal only */
- struct kmem_cache *dev_cmd_pool;
- char dev_cmd_pool_name[50];
-
struct dentry *dbgfs_dir;
#ifdef CONFIG_LOCKDEP
@@ -956,29 +926,21 @@ int iwl_trans_start_fw(struct iwl_trans *trans, const struct iwl_fw *fw,
void iwl_trans_stop_device(struct iwl_trans *trans);
-int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset);
+int iwl_trans_d3_suspend(struct iwl_trans *trans, bool reset);
-int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,
- bool test, bool reset);
+int iwl_trans_d3_resume(struct iwl_trans *trans, bool reset);
struct iwl_trans_dump_data *
iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
const struct iwl_dump_sanitize_ops *sanitize_ops,
void *sanitize_ctx);
-static inline struct iwl_device_tx_cmd *
-iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
-{
- return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
-}
+struct iwl_device_tx_cmd *iwl_trans_alloc_tx_cmd(struct iwl_trans *trans);
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
-static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
- struct iwl_device_tx_cmd *dev_cmd)
-{
- kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
-}
+void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
+ struct iwl_device_tx_cmd *dev_cmd);
int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int queue);
@@ -1205,9 +1167,7 @@ static inline void iwl_trans_finish_sw_reset(struct iwl_trans *trans)
*****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_mac_cfg *mac_cfg,
- unsigned int txcmd_size,
- unsigned int txcmd_align);
+ const struct iwl_mac_cfg *mac_cfg);
void iwl_trans_free(struct iwl_trans *trans);
static inline bool iwl_trans_is_hw_error_value(u32 val)
@@ -1230,11 +1190,6 @@ static inline u16 iwl_trans_get_num_rbds(struct iwl_trans *trans)
return result;
}
-static inline void iwl_trans_suppress_cmd_error_once(struct iwl_trans *trans)
-{
- set_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &trans->status);
-}
-
static inline bool iwl_trans_device_enabled(struct iwl_trans *trans)
{
return test_bit(STATUS_DEVICE_ENABLED, &trans->status);
@@ -1245,6 +1200,20 @@ static inline bool iwl_trans_is_dead(struct iwl_trans *trans)
return test_bit(STATUS_TRANS_DEAD, &trans->status);
}
+static inline bool iwl_trans_is_fw_error(struct iwl_trans *trans)
+{
+ return test_bit(STATUS_FW_ERROR, &trans->status);
+}
+
+/*
+ * This function notifies the transport layer of firmware error, the recovery
+ * will be handled by the op mode
+ */
+static inline void iwl_trans_notify_fw_error(struct iwl_trans *trans)
+{
+ trans->state = IWL_TRANS_NO_FW;
+ set_bit(STATUS_FW_ERROR, &trans->status);
+}
/*****************************************************
* PCIe handling
*****************************************************/
@@ -1289,4 +1258,8 @@ static inline u16 iwl_trans_get_device_id(struct iwl_trans *trans)
return u32_get_bits(trans->info.hw_id, GENMASK(31, 16));
}
+bool iwl_trans_is_pm_supported(struct iwl_trans *trans);
+
+bool iwl_trans_is_ltr_enabled(struct iwl_trans *trans);
+
#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/sap.h b/drivers/net/wireless/intel/iwlwifi/mei/sap.h
index ba1f75f739c2..f985ab90d41c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/sap.h
+++ b/drivers/net/wireless/intel/iwlwifi/mei/sap.h
@@ -300,13 +300,11 @@ enum iwl_sap_msg {
* @type: See &enum iwl_sap_msg.
* @len: The length of the message (header not included).
* @seq_num: For debug.
- * @payload: The payload of the message.
*/
struct iwl_sap_hdr {
__le16 type;
__le16 len;
__le32 seq_num;
- u8 payload[];
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.c b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
index ed0a0f76f1c5..1d4282a21f09 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
@@ -11,6 +11,7 @@
#include "mcc.h"
#include "sta.h"
#include "mlo.h"
+#include "key.h"
#include "fw/api/d3.h"
#include "fw/api/offload.h"
@@ -40,8 +41,6 @@ enum iwl_mld_d3_notif {
struct iwl_mld_resume_key_iter_data {
struct iwl_mld *mld;
struct iwl_mld_wowlan_status *wowlan_status;
- u32 num_keys, gtk_cipher, igtk_cipher, bigtk_cipher;
- bool unhandled_cipher;
};
struct iwl_mld_suspend_key_iter_data {
@@ -71,6 +70,12 @@ struct iwl_mld_mcast_key_data {
};
+struct iwl_mld_wowlan_mlo_key {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 idx, type, link_id;
+ u8 pn[6];
+};
+
/**
* struct iwl_mld_wowlan_status - contains wowlan status data from
* all wowlan notifications
@@ -89,6 +94,8 @@ struct iwl_mld_mcast_key_data {
* @bigtk: data of the last two used gtk's by the FW upon resume
* @ptk: last seq numbers per tid passed by the FW,
* holds both in tkip and aes formats
+ * @num_mlo_keys: number of &struct iwl_mld_wowlan_mlo_key structs
+ * @mlo_keys: array of MLO keys
*/
struct iwl_mld_wowlan_status {
u32 wakeup_reasons;
@@ -108,6 +115,9 @@ struct iwl_mld_wowlan_status {
struct ieee80211_key_seq tkip_seq[IWL_MAX_TID_COUNT];
} ptk;
+
+ int num_mlo_keys;
+ struct iwl_mld_wowlan_mlo_key mlo_keys[WOWLAN_MAX_MLO_KEYS];
};
#define NETDETECT_QUERY_BUF_LEN \
@@ -271,7 +281,7 @@ iwl_mld_convert_gtk_resume_seq(struct iwl_mld_mcast_key_data *gtk_data,
static void
iwl_mld_convert_gtk_resume_data(struct iwl_mld *mld,
struct iwl_mld_wowlan_status *wowlan_status,
- const struct iwl_wowlan_gtk_status_v3 *gtk_data,
+ const struct iwl_wowlan_gtk_status *gtk_data,
const struct iwl_wowlan_all_rsc_tsc_v5 *sc)
{
int status_idx = 0;
@@ -283,8 +293,9 @@ iwl_mld_convert_gtk_resume_data(struct iwl_mld *mld,
for (int notif_idx = 0; notif_idx < ARRAY_SIZE(wowlan_status->gtk);
notif_idx++) {
int rsc_idx;
+ u8 key_status = gtk_data[notif_idx].key_status;
- if (!(gtk_data[notif_idx].key_len))
+ if (!key_status)
continue;
wowlan_status->gtk[status_idx].len =
@@ -294,10 +305,6 @@ iwl_mld_convert_gtk_resume_data(struct iwl_mld *mld,
wowlan_status->gtk[status_idx].id =
wowlan_status->gtk[status_idx].flags &
IWL_WOWLAN_GTK_IDX_MASK;
- memcpy(wowlan_status->gtk[status_idx].key,
- gtk_data[notif_idx].key,
- sizeof(gtk_data[notif_idx].key));
-
/* The rsc for both gtk keys are stored in gtk[0]->sc->mcast_rsc
* The gtk ids can be any two numbers between 0 and 3,
* the id_map maps between the key id and the index in sc->mcast
@@ -307,13 +314,27 @@ iwl_mld_convert_gtk_resume_data(struct iwl_mld *mld,
iwl_mld_convert_gtk_resume_seq(&wowlan_status->gtk[status_idx],
sc, rsc_idx);
- /* if it's as long as the TKIP encryption key, copy MIC key */
- if (wowlan_status->gtk[status_idx].len ==
- NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
- memcpy(wowlan_status->gtk[status_idx].key +
- NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
- gtk_data[notif_idx].tkip_mic_key,
- sizeof(gtk_data[notif_idx].tkip_mic_key));
+ if (key_status == IWL_WOWLAN_STATUS_NEW_KEY) {
+ memcpy(wowlan_status->gtk[status_idx].key,
+ gtk_data[notif_idx].key,
+ sizeof(gtk_data[notif_idx].key));
+
+ /* if it's as long as the TKIP encryption key,
+ * copy MIC key
+ */
+ if (wowlan_status->gtk[status_idx].len ==
+ NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
+ memcpy(wowlan_status->gtk[status_idx].key +
+ NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+ gtk_data[notif_idx].tkip_mic_key,
+ sizeof(gtk_data[notif_idx].tkip_mic_key));
+ } else {
+ /* If the key status is WOWLAN_STATUS_OLD_KEY, it
+ * indicates that no key material is present, Set the
+ * key length to 0 as an indication
+ */
+ wowlan_status->gtk[status_idx].len = 0;
+ }
status_idx++;
}
}
@@ -360,11 +381,11 @@ static void
iwl_mld_convert_igtk_resume_data(struct iwl_mld_wowlan_status *wowlan_status,
const struct iwl_wowlan_igtk_status *igtk)
{
- BUILD_BUG_ON(sizeof(wowlan_status->igtk.key) < sizeof(igtk->key));
-
- if (!igtk->key_len)
+ if (!igtk->key_status)
return;
+ BUILD_BUG_ON(sizeof(wowlan_status->igtk.key) < sizeof(igtk->key));
+
wowlan_status->igtk.len = igtk->key_len;
wowlan_status->igtk.flags = igtk->key_flags;
wowlan_status->igtk.id =
@@ -372,7 +393,15 @@ iwl_mld_convert_igtk_resume_data(struct iwl_mld_wowlan_status *wowlan_status,
IWL_WOWLAN_IGTK_BIGTK_IDX_MASK) +
WOWLAN_IGTK_MIN_INDEX;
- memcpy(wowlan_status->igtk.key, igtk->key, sizeof(igtk->key));
+ if (igtk->key_status == IWL_WOWLAN_STATUS_NEW_KEY)
+ memcpy(wowlan_status->igtk.key, igtk->key, sizeof(igtk->key));
+ else
+ /* If the key status is WOWLAN_STATUS_OLD_KEY, it indicates
+ * that no key material is present. Set the key length to 0
+ * as an indication.
+ */
+ wowlan_status->igtk.len = 0;
+
iwl_mld_convert_mcast_ipn(&wowlan_status->igtk, igtk);
}
@@ -386,7 +415,7 @@ iwl_mld_convert_bigtk_resume_data(struct iwl_mld_wowlan_status *wowlan_status,
for (int notif_idx = 0; notif_idx < WOWLAN_BIGTK_KEYS_NUM;
notif_idx++) {
- if (!bigtk[notif_idx].key_len)
+ if (!bigtk[notif_idx].key_status)
continue;
wowlan_status->bigtk[status_idx].len = bigtk[notif_idx].key_len;
@@ -399,32 +428,218 @@ iwl_mld_convert_bigtk_resume_data(struct iwl_mld_wowlan_status *wowlan_status,
BUILD_BUG_ON(sizeof(wowlan_status->bigtk[status_idx].key) <
sizeof(bigtk[notif_idx].key));
- memcpy(wowlan_status->bigtk[status_idx].key,
- bigtk[notif_idx].key, sizeof(bigtk[notif_idx].key));
+ if (bigtk[notif_idx].key_status == IWL_WOWLAN_STATUS_NEW_KEY)
+ memcpy(wowlan_status->bigtk[status_idx].key,
+ bigtk[notif_idx].key,
+ sizeof(bigtk[notif_idx].key));
+ else
+ /* If the key status is WOWLAN_STATUS_OLD_KEY, it
+ * indicates that no key material is present. Set the
+ * key length to 0 as an indication.
+ */
+ wowlan_status->bigtk[status_idx].len = 0;
+
iwl_mld_convert_mcast_ipn(&wowlan_status->bigtk[status_idx],
&bigtk[notif_idx]);
status_idx++;
}
}
+static void
+iwl_mld_convert_mlo_keys(struct iwl_mld *mld,
+ const struct iwl_wowlan_info_notif *notif,
+ struct iwl_mld_wowlan_status *wowlan_status)
+{
+ if (!notif->num_mlo_link_keys)
+ return;
+
+ wowlan_status->num_mlo_keys = notif->num_mlo_link_keys;
+
+ if (IWL_FW_CHECK(mld, wowlan_status->num_mlo_keys > WOWLAN_MAX_MLO_KEYS,
+ "Too many MLO keys: %d, max %d\n",
+ wowlan_status->num_mlo_keys, WOWLAN_MAX_MLO_KEYS))
+ wowlan_status->num_mlo_keys = WOWLAN_MAX_MLO_KEYS;
+
+ for (int i = 0; i < wowlan_status->num_mlo_keys; i++) {
+ const struct iwl_wowlan_mlo_gtk *fw_mlo_key = &notif->mlo_gtks[i];
+ struct iwl_mld_wowlan_mlo_key *driver_mlo_key =
+ &wowlan_status->mlo_keys[i];
+ u16 flags = le16_to_cpu(fw_mlo_key->flags);
+
+ driver_mlo_key->link_id =
+ u16_get_bits(flags, WOWLAN_MLO_GTK_FLAG_LINK_ID_MSK);
+ driver_mlo_key->type =
+ u16_get_bits(flags, WOWLAN_MLO_GTK_FLAG_KEY_TYPE_MSK);
+ driver_mlo_key->idx =
+ u16_get_bits(flags, WOWLAN_MLO_GTK_FLAG_KEY_ID_MSK);
+
+ BUILD_BUG_ON(sizeof(driver_mlo_key->key) != sizeof(fw_mlo_key->key));
+ BUILD_BUG_ON(sizeof(driver_mlo_key->pn) != sizeof(fw_mlo_key->pn));
+
+ memcpy(driver_mlo_key->key, fw_mlo_key->key, sizeof(fw_mlo_key->key));
+ memcpy(driver_mlo_key->pn, fw_mlo_key->pn, sizeof(fw_mlo_key->pn));
+ }
+}
+
+static void
+iwl_mld_convert_wowlan_notif_v5(const struct iwl_wowlan_info_notif_v5 *notif_v5,
+ struct iwl_wowlan_info_notif *notif)
+{
+ /* Convert GTK from v3 to the new format */
+ BUILD_BUG_ON(ARRAY_SIZE(notif->gtk) != ARRAY_SIZE(notif_v5->gtk));
+
+ for (int i = 0; i < ARRAY_SIZE(notif_v5->gtk); i++) {
+ const struct iwl_wowlan_gtk_status_v3 *gtk_v3 = &notif_v5->gtk[i];
+ struct iwl_wowlan_gtk_status *gtk = &notif->gtk[i];
+
+ /* Copy key material and metadata */
+ BUILD_BUG_ON(sizeof(gtk->key) != sizeof(gtk_v3->key));
+ BUILD_BUG_ON(sizeof(gtk->tkip_mic_key) != sizeof(gtk_v3->tkip_mic_key));
+
+ memcpy(gtk->key, gtk_v3->key, sizeof(gtk_v3->key));
+
+ gtk->key_len = gtk_v3->key_len;
+ gtk->key_flags = gtk_v3->key_flags;
+
+ memcpy(gtk->tkip_mic_key, gtk_v3->tkip_mic_key,
+ sizeof(gtk_v3->tkip_mic_key));
+ gtk->sc = gtk_v3->sc;
+
+ /* Set key_status based on whether key material is present.
+ * in v5, a key is either invalid (should be skipped) or has
+ * both meta data and the key itself.
+ */
+ if (gtk_v3->key_len)
+ gtk->key_status = IWL_WOWLAN_STATUS_NEW_KEY;
+ }
+
+ /* Convert IGTK from v1 to the new format, only one IGTK is passed by FW */
+ BUILD_BUG_ON(offsetof(struct iwl_wowlan_igtk_status, key_status) !=
+ sizeof(struct iwl_wowlan_igtk_status_v1));
+
+ memcpy(&notif->igtk[0], &notif_v5->igtk[0],
+ offsetof(struct iwl_wowlan_igtk_status, key_status));
+
+ /* Set key_status based on whether key material is present.
+ * in v5, a key is either invalid (should be skipped) or has
+ * both meta data and the key itself.
+ */
+ if (notif_v5->igtk[0].key_len)
+ notif->igtk[0].key_status = IWL_WOWLAN_STATUS_NEW_KEY;
+
+ /* Convert BIGTK from v1 to the new format */
+ BUILD_BUG_ON(ARRAY_SIZE(notif->bigtk) != ARRAY_SIZE(notif_v5->bigtk));
+
+ for (int i = 0; i < ARRAY_SIZE(notif_v5->bigtk); i++) {
+ /* Copy everything until key_status */
+ memcpy(&notif->bigtk[i], &notif_v5->bigtk[i],
+ offsetof(struct iwl_wowlan_igtk_status, key_status));
+
+ /* Set key_status based on whether key material is present.
+ * in v5, a key is either invalid (should be skipped) or has
+ * both meta data and the key itself.
+ */
+ if (notif_v5->bigtk[i].key_len)
+ notif->bigtk[i].key_status = IWL_WOWLAN_STATUS_NEW_KEY;
+ }
+
+ notif->replay_ctr = notif_v5->replay_ctr;
+ notif->pattern_number = notif_v5->pattern_number;
+ notif->qos_seq_ctr = notif_v5->qos_seq_ctr;
+ notif->wakeup_reasons = notif_v5->wakeup_reasons;
+ notif->num_of_gtk_rekeys = notif_v5->num_of_gtk_rekeys;
+ notif->transmitted_ndps = notif_v5->transmitted_ndps;
+ notif->received_beacons = notif_v5->received_beacons;
+ notif->tid_tear_down = notif_v5->tid_tear_down;
+ notif->station_id = notif_v5->station_id;
+ notif->num_mlo_link_keys = notif_v5->num_mlo_link_keys;
+ notif->tid_offloaded_tx = notif_v5->tid_offloaded_tx;
+
+ /* Copy MLO GTK keys */
+ if (notif_v5->num_mlo_link_keys) {
+ memcpy(notif->mlo_gtks, notif_v5->mlo_gtks,
+ notif_v5->num_mlo_link_keys * sizeof(struct iwl_wowlan_mlo_gtk));
+ }
+}
+
+static bool iwl_mld_validate_wowlan_notif_size(struct iwl_mld *mld,
+ u32 len,
+ u32 expected_len,
+ u8 num_mlo_keys,
+ int version)
+{
+ u32 len_with_mlo_keys;
+
+ if (IWL_FW_CHECK(mld, len < expected_len,
+ "Invalid wowlan_info_notif v%d (expected=%u got=%u)\n",
+ version, expected_len, len))
+ return false;
+
+ len_with_mlo_keys = expected_len +
+ (num_mlo_keys * sizeof(struct iwl_wowlan_mlo_gtk));
+
+ if (IWL_FW_CHECK(mld, len < len_with_mlo_keys,
+ "Invalid wowlan_info_notif v%d with MLO keys (expected=%u got=%u)\n",
+ version, len_with_mlo_keys, len))
+ return false;
+
+ return true;
+}
+
static bool
iwl_mld_handle_wowlan_info_notif(struct iwl_mld *mld,
struct iwl_mld_wowlan_status *wowlan_status,
struct iwl_rx_packet *pkt)
{
- const struct iwl_wowlan_info_notif *notif = (void *)pkt->data;
- u32 expected_len, len = iwl_rx_packet_payload_len(pkt);
-
- expected_len = sizeof(*notif);
+ const struct iwl_wowlan_info_notif *notif;
+ struct iwl_wowlan_info_notif *converted_notif __free(kfree) = NULL;
+ u32 len = iwl_rx_packet_payload_len(pkt);
+ int wowlan_info_ver = iwl_fw_lookup_notif_ver(mld->fw,
+ PROT_OFFLOAD_GROUP,
+ WOWLAN_INFO_NOTIFICATION,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (wowlan_info_ver == 5) {
+ /* v5 format - validate before conversion */
+ const struct iwl_wowlan_info_notif_v5 *notif_v5 = (void *)pkt->data;
+
+ if (!iwl_mld_validate_wowlan_notif_size(mld, len,
+ sizeof(*notif_v5),
+ notif_v5->num_mlo_link_keys,
+ 5))
+ return true;
+
+ converted_notif = kzalloc(struct_size(converted_notif,
+ mlo_gtks,
+ notif_v5->num_mlo_link_keys),
+ GFP_ATOMIC);
+ if (!converted_notif) {
+ IWL_ERR(mld,
+ "Failed to allocate memory for converted wowlan_info_notif\n");
+ return true;
+ }
- if (IWL_FW_CHECK(mld, len < expected_len,
- "Invalid wowlan_info_notif (expected=%ud got=%ud)\n",
- expected_len, len))
+ iwl_mld_convert_wowlan_notif_v5(notif_v5,
+ converted_notif);
+ notif = converted_notif;
+ } else if (wowlan_info_ver == 6) {
+ notif = (void *)pkt->data;
+ if (!iwl_mld_validate_wowlan_notif_size(mld, len,
+ sizeof(*notif),
+ notif->num_mlo_link_keys,
+ 6))
+ return true;
+ } else {
+ /* smaller versions are not supported */
+ IWL_WARN(mld,
+ "Unsupported wowlan_info_notif version %d\n",
+ wowlan_info_ver);
return true;
+ }
if (IWL_FW_CHECK(mld, notif->tid_offloaded_tx != IWL_WOWLAN_OFFLOAD_TID,
"Invalid tid_offloaded_tx %d\n",
- wowlan_status->tid_offloaded_tx))
+ notif->tid_offloaded_tx))
return true;
iwl_mld_convert_gtk_resume_data(mld, wowlan_status, notif->gtk,
@@ -442,8 +657,10 @@ iwl_mld_handle_wowlan_info_notif(struct iwl_mld *mld,
wowlan_status->num_of_gtk_rekeys =
le32_to_cpu(notif->num_of_gtk_rekeys);
wowlan_status->wakeup_reasons = le32_to_cpu(notif->wakeup_reasons);
+
+ iwl_mld_convert_mlo_keys(mld, notif, wowlan_status);
+
return false;
- /* TODO: mlo_links (task=MLO)*/
}
static bool
@@ -619,8 +836,8 @@ iwl_mld_set_key_rx_seq_tids(struct ieee80211_key_conf *key,
}
static void
-iwl_mld_set_key_rx_seq(struct ieee80211_key_conf *key,
- struct iwl_mld_mcast_key_data *key_data)
+iwl_mld_update_mcast_rx_seq(struct ieee80211_key_conf *key,
+ struct iwl_mld_mcast_key_data *key_data)
{
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
@@ -687,132 +904,53 @@ iwl_mld_resume_keys_iter(struct ieee80211_hw *hw,
struct iwl_mld_wowlan_status *wowlan_status = data->wowlan_status;
u8 status_idx;
- /* TODO: check key link id (task=MLO) */
- if (data->unhandled_cipher)
- return;
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- /* ignore WEP completely, nothing to do */
- return;
- case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_GCMP:
- case WLAN_CIPHER_SUITE_GCMP_256:
- case WLAN_CIPHER_SUITE_TKIP:
+ if (key->keyidx >= 0 && key->keyidx <= 3) {
+ /* PTK */
if (sta) {
iwl_mld_update_ptk_rx_seq(data->mld, wowlan_status,
sta, key,
key->cipher ==
WLAN_CIPHER_SUITE_TKIP);
- return;
+ /* GTK */
+ } else {
+ status_idx = key->keyidx == wowlan_status->gtk[1].id;
+ iwl_mld_update_mcast_rx_seq(key,
+ &wowlan_status->gtk[status_idx]);
}
+ }
- if (WARN_ON(data->gtk_cipher &&
- data->gtk_cipher != key->cipher))
- return;
+ /* IGTK */
+ if (key->keyidx == 4 || key->keyidx == 5) {
+ if (key->keyidx == wowlan_status->igtk.id)
+ iwl_mld_update_mcast_rx_seq(key, &wowlan_status->igtk);
+ }
- data->gtk_cipher = key->cipher;
- status_idx = key->keyidx == wowlan_status->gtk[1].id;
- iwl_mld_set_key_rx_seq(key, &wowlan_status->gtk[status_idx]);
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- case WLAN_CIPHER_SUITE_AES_CMAC:
- if (key->keyidx == 4 || key->keyidx == 5) {
- if (WARN_ON(data->igtk_cipher &&
- data->igtk_cipher != key->cipher))
- return;
-
- data->igtk_cipher = key->cipher;
- if (key->keyidx == wowlan_status->igtk.id)
- iwl_mld_set_key_rx_seq(key, &wowlan_status->igtk);
- }
- if (key->keyidx == 6 || key->keyidx == 7) {
- if (WARN_ON(data->bigtk_cipher &&
- data->bigtk_cipher != key->cipher))
- return;
-
- data->bigtk_cipher = key->cipher;
- status_idx = key->keyidx == wowlan_status->bigtk[1].id;
- iwl_mld_set_key_rx_seq(key, &wowlan_status->bigtk[status_idx]);
- }
- break;
- default:
- data->unhandled_cipher = true;
- return;
+ /* BIGTK */
+ if (key->keyidx == 6 || key->keyidx == 7) {
+ status_idx = key->keyidx == wowlan_status->bigtk[1].id;
+ iwl_mld_update_mcast_rx_seq(key,
+ &wowlan_status->bigtk[status_idx]);
}
- data->num_keys++;
}
static void
iwl_mld_add_mcast_rekey(struct ieee80211_vif *vif,
struct iwl_mld *mld,
struct iwl_mld_mcast_key_data *key_data,
- struct ieee80211_bss_conf *link_conf,
- u32 cipher)
+ struct ieee80211_bss_conf *link_conf)
{
struct ieee80211_key_conf *key_config;
- struct {
- struct ieee80211_key_conf conf;
- u8 key[WOWLAN_KEY_MAX_SIZE];
- } conf = {
- .conf.cipher = cipher,
- .conf.keyidx = key_data->id,
- };
int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
- u8 key[WOWLAN_KEY_MAX_SIZE];
-
- BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_BIP_GMAC_128);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_BIP_GMAC_256);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_AES_CMAC);
- BUILD_BUG_ON(sizeof(conf.key) < sizeof(key_data->key));
if (!key_data->len)
return;
- switch (cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_GCMP:
- conf.conf.keylen = WLAN_KEY_LEN_CCMP;
- break;
- case WLAN_CIPHER_SUITE_GCMP_256:
- conf.conf.keylen = WLAN_KEY_LEN_GCMP_256;
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- conf.conf.keylen = WLAN_KEY_LEN_TKIP;
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_128;
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_256;
- break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- conf.conf.keylen = WLAN_KEY_LEN_AES_CMAC;
- break;
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- conf.conf.keylen = WLAN_KEY_LEN_BIP_CMAC_256;
- break;
- default:
- WARN_ON(1);
- }
-
- memcpy(conf.conf.key, key_data->key, conf.conf.keylen);
-
- memcpy(key, key_data->key, sizeof(key_data->key));
-
- key_config = ieee80211_gtk_rekey_add(vif, key_data->id, key,
- sizeof(key), link_id);
+ key_config = ieee80211_gtk_rekey_add(vif, key_data->id, key_data->key,
+ sizeof(key_data->key), link_id);
if (IS_ERR(key_config))
return;
- iwl_mld_set_key_rx_seq(key_config, key_data);
+ iwl_mld_update_mcast_rx_seq(key_config, key_data);
/* The FW holds only one igtk so we keep track of the valid one */
if (key_config->keyidx == 4 || key_config->keyidx == 5) {
@@ -831,37 +969,78 @@ iwl_mld_add_mcast_rekey(struct ieee80211_vif *vif,
}
/* Also keep track of the new BIGTK */
- if ((key_config->keyidx == 6 || key_config->keyidx == 7) &&
- vif->type == NL80211_IFTYPE_STATION) {
- struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
-
- rcu_assign_pointer(mld_vif->bigtks[key_config->keyidx - 6], key_config);
- }
+ if (key_config->keyidx == 6 || key_config->keyidx == 7)
+ iwl_mld_track_bigtk(mld, vif, key_config, true);
}
static void
-iwl_mld_add_all_rekeys(struct ieee80211_vif *vif,
+iwl_mld_add_all_rekeys(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
struct iwl_mld_wowlan_status *wowlan_status,
- struct iwl_mld_resume_key_iter_data *key_iter_data,
struct ieee80211_bss_conf *link_conf)
{
int i;
for (i = 0; i < ARRAY_SIZE(wowlan_status->gtk); i++)
- iwl_mld_add_mcast_rekey(vif, key_iter_data->mld,
- &wowlan_status->gtk[i],
- link_conf,
- key_iter_data->gtk_cipher);
+ iwl_mld_add_mcast_rekey(vif, mld, &wowlan_status->gtk[i],
+ link_conf);
- iwl_mld_add_mcast_rekey(vif, key_iter_data->mld,
- &wowlan_status->igtk,
- link_conf, key_iter_data->igtk_cipher);
+ iwl_mld_add_mcast_rekey(vif, mld, &wowlan_status->igtk, link_conf);
for (i = 0; i < ARRAY_SIZE(wowlan_status->bigtk); i++)
- iwl_mld_add_mcast_rekey(vif, key_iter_data->mld,
- &wowlan_status->bigtk[i],
- link_conf,
- key_iter_data->bigtk_cipher);
+ iwl_mld_add_mcast_rekey(vif, mld, &wowlan_status->bigtk[i],
+ link_conf);
+}
+
+static void iwl_mld_mlo_rekey(struct iwl_mld *mld,
+ struct iwl_mld_wowlan_status *wowlan_status,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_old_mlo_keys *old_keys __free(kfree) = NULL;
+
+ IWL_DEBUG_WOWLAN(mld, "Num of MLO Keys: %d\n", wowlan_status->num_mlo_keys);
+
+ if (!wowlan_status->num_mlo_keys)
+ return;
+
+ for (int i = 0; i < wowlan_status->num_mlo_keys; i++) {
+ struct iwl_mld_wowlan_mlo_key *mlo_key = &wowlan_status->mlo_keys[i];
+ struct ieee80211_key_conf *key;
+ struct ieee80211_key_seq seq;
+ u8 link_id = mlo_key->link_id;
+
+ if (IWL_FW_CHECK(mld, mlo_key->link_id >= IEEE80211_MLD_MAX_NUM_LINKS ||
+ mlo_key->idx >= 8 ||
+ mlo_key->type >= WOWLAN_MLO_GTK_KEY_NUM_TYPES,
+ "Invalid MLO key link_id %d, idx %d, type %d\n",
+ mlo_key->link_id, mlo_key->idx, mlo_key->type))
+ continue;
+
+ if (!(vif->valid_links & BIT(link_id)) ||
+ (vif->active_links & BIT(link_id)))
+ continue;
+
+ IWL_DEBUG_WOWLAN(mld, "Add MLO key id %d, link id %d\n",
+ mlo_key->idx, link_id);
+
+ key = ieee80211_gtk_rekey_add(vif, mlo_key->idx, mlo_key->key,
+ sizeof(mlo_key->key), link_id);
+
+ if (IS_ERR(key))
+ continue;
+
+ /*
+ * mac80211 expects the PN in big-endian
+ * also note that seq is a union of all cipher types
+ * (ccmp, gcmp, cmac, gmac), and they all have the same
+ * pn field (of length 6) so just copy it to ccmp.pn.
+ */
+ for (int j = 5; j >= 0; j--)
+ seq.ccmp.pn[5 - j] = mlo_key->pn[j];
+
+ /* group keys are non-QoS and use TID 0 */
+ ieee80211_set_key_rx_seq(key, 0, &seq);
+ }
}
static bool
@@ -884,23 +1063,19 @@ iwl_mld_update_sec_keys(struct iwl_mld *mld,
ieee80211_iter_keys(mld->hw, vif, iwl_mld_resume_keys_iter,
&key_iter_data);
- if (key_iter_data.unhandled_cipher)
- return false;
-
- IWL_DEBUG_WOWLAN(mld,
- "Number of installed keys: %d, Number of rekeys: %d\n",
- key_iter_data.num_keys,
+ IWL_DEBUG_WOWLAN(mld, "Number of rekeys: %d\n",
wowlan_status->num_of_gtk_rekeys);
- if (!key_iter_data.num_keys || !wowlan_status->num_of_gtk_rekeys)
+ if (!wowlan_status->num_of_gtk_rekeys)
return true;
- iwl_mld_add_all_rekeys(vif, wowlan_status, &key_iter_data,
+ iwl_mld_add_all_rekeys(mld, vif, wowlan_status,
link_conf);
+ iwl_mld_mlo_rekey(mld, wowlan_status, vif);
+
ieee80211_gtk_rekey_notify(vif, link_conf->bssid,
(void *)&replay_ctr, GFP_KERNEL);
- /* TODO: MLO rekey (task=MLO) */
return true;
}
@@ -1179,7 +1354,6 @@ static int iwl_mld_wait_d3_notif(struct iwl_mld *mld,
WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
};
struct iwl_notification_wait wait_d3_notif;
- enum iwl_d3_status d3_status;
int ret;
if (with_wowlan)
@@ -1195,14 +1369,10 @@ static int iwl_mld_wait_d3_notif(struct iwl_mld *mld,
iwl_mld_handle_d3_notif,
resume_data);
- ret = iwl_trans_d3_resume(mld->trans, &d3_status, false, false);
- if (ret || d3_status != IWL_D3_STATUS_ALIVE) {
- if (d3_status != IWL_D3_STATUS_ALIVE) {
- IWL_INFO(mld, "Device was reset during suspend\n");
- ret = -ENOENT;
- } else {
- IWL_ERR(mld, "Transport resume failed\n");
- }
+ ret = iwl_trans_d3_resume(mld->trans, false);
+ if (ret) {
+ /* Avoid sending commands if the FW is dead */
+ iwl_trans_notify_fw_error(mld->trans);
iwl_remove_notification(&mld->notif_wait, &wait_d3_notif);
return ret;
}
@@ -1236,16 +1406,11 @@ int iwl_mld_no_wowlan_suspend(struct iwl_mld *mld)
iwl_mld_low_latency_stop(mld);
- /* This will happen if iwl_mld_supsend failed with FW error */
- if (mld->trans->state == IWL_TRANS_NO_FW &&
- test_bit(STATUS_FW_ERROR, &mld->trans->status))
- return -ENODEV;
-
ret = iwl_mld_update_device_power(mld, true);
if (ret) {
IWL_ERR(mld,
"d3 suspend: couldn't send power_device %d\n", ret);
- goto out;
+ return ret;
}
ret = iwl_mld_send_cmd_pdu(mld, D3_CONFIG_CMD,
@@ -1253,24 +1418,20 @@ int iwl_mld_no_wowlan_suspend(struct iwl_mld *mld)
if (ret) {
IWL_ERR(mld,
"d3 suspend: couldn't send D3_CONFIG_CMD %d\n", ret);
- goto out;
+ return ret;
}
- ret = iwl_trans_d3_suspend(mld->trans, false, false);
+ ret = iwl_trans_d3_suspend(mld->trans, false);
if (ret) {
IWL_ERR(mld, "d3 suspend: trans_d3_suspend failed %d\n", ret);
+ /* We are going to stop the FW. Avoid sending commands in that flow */
+ iwl_trans_notify_fw_error(mld->trans);
} else {
/* Async notification might send hcmds, which is not allowed in suspend */
iwl_mld_cancel_async_notifications(mld);
mld->fw_status.in_d3 = true;
}
- out:
- if (ret) {
- mld->trans->state = IWL_TRANS_NO_FW;
- set_bit(STATUS_FW_ERROR, &mld->trans->status);
- }
-
return ret;
}
@@ -1290,15 +1451,12 @@ int iwl_mld_no_wowlan_resume(struct iwl_mld *mld)
iwl_fw_dbg_read_d3_debug_data(&mld->fwrt);
ret = iwl_mld_wait_d3_notif(mld, &resume_data, false);
+ if (ret)
+ return ret;
if (!ret && (resume_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE))
return -ENODEV;
- if (ret) {
- mld->trans->state = IWL_TRANS_NO_FW;
- set_bit(STATUS_FW_ERROR, &mld->trans->status);
- return ret;
- }
iwl_mld_low_latency_restart(mld);
return iwl_mld_update_device_power(mld, false);
@@ -1530,7 +1688,8 @@ static void
iwl_mld_set_wowlan_config_cmd(struct iwl_mld *mld,
struct cfg80211_wowlan *wowlan,
struct iwl_wowlan_config_cmd *wowlan_config_cmd,
- struct ieee80211_sta *ap_sta)
+ struct ieee80211_sta *ap_sta,
+ struct ieee80211_bss_conf *link)
{
wowlan_config_cmd->is_11n_connection =
ap_sta->deflink.ht_cap.ht_supported;
@@ -1540,6 +1699,9 @@ iwl_mld_set_wowlan_config_cmd(struct iwl_mld *mld,
if (ap_sta->mfp)
wowlan_config_cmd->flags |= IS_11W_ASSOC;
+ if (iwl_mld_beacon_protection_enabled(mld, link))
+ wowlan_config_cmd->flags |= HAS_BEACON_PROTECTION;
+
if (wowlan->disconnect)
wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
@@ -1737,7 +1899,7 @@ iwl_mld_wowlan_config(struct iwl_mld *mld, struct ieee80211_vif *bss_vif,
return ret;
iwl_mld_set_wowlan_config_cmd(mld, wowlan,
- &wowlan_config_cmd, ap_sta);
+ &wowlan_config_cmd, ap_sta, link_conf);
ret = iwl_mld_send_cmd_pdu(mld, WOWLAN_CONFIGURATION,
&wowlan_config_cmd);
if (ret)
@@ -1807,7 +1969,6 @@ int iwl_mld_wowlan_resume(struct iwl_mld *mld)
};
int link_id;
int ret;
- bool fw_err = false;
lockdep_assert_wiphy(mld->wiphy);
@@ -1850,7 +2011,6 @@ int iwl_mld_wowlan_resume(struct iwl_mld *mld)
ret = iwl_mld_wait_d3_notif(mld, &resume_data, true);
if (ret) {
IWL_ERR(mld, "Couldn't get the d3 notifs %d\n", ret);
- fw_err = true;
goto err;
}
@@ -1887,11 +2047,6 @@ int iwl_mld_wowlan_resume(struct iwl_mld *mld)
goto out;
err:
- if (fw_err) {
- mld->trans->state = IWL_TRANS_NO_FW;
- set_bit(STATUS_FW_ERROR, &mld->trans->status);
- }
-
mld->fw_status.in_hw_restart = true;
ret = 1;
out:
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
index cc052b0aa53f..b9c9cd3f44e4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
@@ -86,7 +86,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mld *mld, char *buf,
if (count == 6 && !strcmp(buf, "nolog\n")) {
mld->fw_status.do_not_dump_once = true;
- iwl_trans_suppress_cmd_error_once(mld->trans);
+ mld->trans->suppress_cmd_error_once = true;
}
/* take the return value to make compiler happy - it will
@@ -1001,8 +1001,12 @@ void iwl_mld_add_link_debugfs(struct ieee80211_hw *hw,
* If not, this is a per-link dir of a MLO vif, add in it the iwlmld
* dir.
*/
- if (!mld_link_dir)
+ if (!mld_link_dir) {
mld_link_dir = debugfs_create_dir("iwlmld", dir);
+ } else {
+ /* Release the reference from debugfs_lookup */
+ dput(mld_link_dir);
+ }
}
static ssize_t _iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.c b/drivers/net/wireless/intel/iwlwifi/mld/iface.c
index 38993d65c052..ed379825a923 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/iface.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.c
@@ -115,20 +115,12 @@ static bool iwl_mld_is_nic_ack_enabled(struct iwl_mld *mld,
static void iwl_mld_set_he_support(struct iwl_mld *mld,
struct ieee80211_vif *vif,
- struct iwl_mac_config_cmd *cmd,
- int cmd_ver)
+ struct iwl_mac_config_cmd *cmd)
{
- if (vif->type == NL80211_IFTYPE_AP) {
- if (cmd_ver == 2)
- cmd->wifi_gen_v2.he_ap_support = cpu_to_le16(1);
- else
- cmd->wifi_gen.he_ap_support = 1;
- } else {
- if (cmd_ver == 2)
- cmd->wifi_gen_v2.he_support = cpu_to_le16(1);
- else
- cmd->wifi_gen.he_support = 1;
- }
+ if (vif->type == NL80211_IFTYPE_AP)
+ cmd->wifi_gen.he_ap_support = 1;
+ else
+ cmd->wifi_gen.he_support = 1;
}
/* fill the common part for all interface types */
@@ -140,9 +132,6 @@ static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct ieee80211_bss_conf *link_conf;
unsigned int link_id;
- int cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw,
- WIDE_ID(MAC_CONF_GROUP,
- MAC_CONFIG_CMD), 0);
lockdep_assert_wiphy(mld->wiphy);
@@ -169,11 +158,8 @@ static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
* and enable both when we have MLO.
*/
if (ieee80211_vif_is_mld(vif)) {
- iwl_mld_set_he_support(mld, vif, cmd, cmd_ver);
- if (cmd_ver == 2)
- cmd->wifi_gen_v2.eht_support = cpu_to_le32(1);
- else
- cmd->wifi_gen.eht_support = 1;
+ iwl_mld_set_he_support(mld, vif, cmd);
+ cmd->wifi_gen.eht_support = 1;
return;
}
@@ -181,7 +167,7 @@ static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
if (!link_conf->he_support)
continue;
- iwl_mld_set_he_support(mld, vif, cmd, cmd_ver);
+ iwl_mld_set_he_support(mld, vif, cmd);
/* EHT, if supported, was already set above */
break;
@@ -451,24 +437,21 @@ int iwl_mld_add_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
return ret;
}
-int iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
+void iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
- int ret;
lockdep_assert_wiphy(mld->wiphy);
- ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_REMOVE);
+ iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_REMOVE);
if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld->fw_id_to_vif)))
- return -EINVAL;
+ return;
RCU_INIT_POINTER(mld->fw_id_to_vif[mld_vif->fw_id], NULL);
iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_VIF,
mld_vif->fw_id);
-
- return ret;
}
void iwl_mld_set_vif_associated(struct iwl_mld *mld,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.h b/drivers/net/wireless/intel/iwlwifi/mld/iface.h
index 05dcb63701b1..a3573d20f214 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/iface.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.h
@@ -125,8 +125,6 @@ struct iwl_mld_emlsr {
* @ap_sta: pointer to AP sta, for easier access to it.
* Relevant only for STA vifs.
* @authorized: indicates the AP station was set to authorized
- * @bigtks: BIGTKs of the AP, for beacon protection.
- * Only valid for STA. (FIXME: needs to be per link)
* @num_associated_stas: number of associated STAs. Relevant only for AP mode.
* @ap_ibss_active: whether the AP/IBSS was started
* @cca_40mhz_workaround: When we are connected in 2.4 GHz and 40 MHz, and the
@@ -158,7 +156,6 @@ struct iwl_mld_vif {
struct iwl_mld_session_protect session_protect;
struct ieee80211_sta *ap_sta;
bool authorized;
- struct ieee80211_key_conf __rcu *bigtks[2];
u8 num_associated_stas;
bool ap_ibss_active;
enum iwl_mld_cca_40mhz_wa_status cca_40mhz_workaround;
@@ -227,7 +224,7 @@ void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif);
int iwl_mld_mac_fw_action(struct iwl_mld *mld, struct ieee80211_vif *vif,
u32 action);
int iwl_mld_add_vif(struct iwl_mld *mld, struct ieee80211_vif *vif);
-int iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif);
+void iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif);
void iwl_mld_set_vif_associated(struct iwl_mld *mld,
struct ieee80211_vif *vif);
u8 iwl_mld_get_fw_bss_vifs_ids(struct iwl_mld *mld);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/key.c b/drivers/net/wireless/intel/iwlwifi/mld/key.c
index 13462a5ad79a..04192c5f07ff 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/key.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/key.c
@@ -368,3 +368,41 @@ int iwl_mld_update_sta_keys(struct iwl_mld *mld,
&data);
return data.err;
}
+
+void iwl_mld_track_bigtk(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *key, bool add)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *link;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (WARN_ON(key->keyidx < 6 || key->keyidx > 7))
+ return;
+
+ if (WARN_ON(key->link_id < 0))
+ return;
+
+ link = iwl_mld_link_dereference_check(mld_vif, key->link_id);
+ if (WARN_ON(!link))
+ return;
+
+ if (add)
+ rcu_assign_pointer(link->bigtks[key->keyidx - 6], key);
+ else
+ RCU_INIT_POINTER(link->bigtks[key->keyidx - 6], NULL);
+}
+
+bool iwl_mld_beacon_protection_enabled(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+
+ if (WARN_ON(!mld_link))
+ return false;
+
+ return rcu_access_pointer(mld_link->bigtks[0]) ||
+ rcu_access_pointer(mld_link->bigtks[1]);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/key.h b/drivers/net/wireless/intel/iwlwifi/mld/key.h
index a68ea48913be..5a9efdaa3b03 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/key.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/key.h
@@ -36,4 +36,11 @@ iwl_mld_cleanup_keys_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
key->hw_key_idx = STA_KEY_IDX_INVALID;
}
+void iwl_mld_track_bigtk(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *key, bool add);
+
+bool iwl_mld_beacon_protection_enabled(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link);
+
#endif /* __iwl_mld_key_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.c b/drivers/net/wireless/intel/iwlwifi/mld/link.c
index 782fc41aa1c3..738f80fe0c50 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/link.c
@@ -532,7 +532,8 @@ void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
le32_to_cpu(notif->consec_missed_beacons_other_link);
struct ieee80211_bss_conf *link_conf =
iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
- u32 bss_param_ch_cnt_link_id;
+ struct ieee80211_bss_conf *other_link;
+ u32 bss_param_ch_cnt_link_id, other_link_fw_id;
struct ieee80211_vif *vif;
u8 link_id;
@@ -550,11 +551,6 @@ void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
if (WARN_ON(!vif))
return;
- mld->trans->dbg.dump_file_name_ext_valid = true;
- snprintf(mld->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
- "LinkId_%d_MacType_%d", fw_link_id,
- iwl_mld_mac80211_iftype_to_fw(vif));
-
iwl_dbg_tlv_time_point(&mld->fwrt,
IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data);
@@ -572,8 +568,11 @@ void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
if (missed_bcon_since_rx > IWL_MLD_MISSED_BEACONS_THRESHOLD) {
ieee80211_cqm_beacon_loss_notify(vif, GFP_ATOMIC);
- /* try to switch links, no-op if we don't have MLO */
- iwl_mld_int_mlo_scan(mld, vif);
+ /* Not in EMLSR and we can't hear the link.
+ * Try to switch to a better link. EMLSR case is handled below.
+ */
+ if (!iwl_mld_emlsr_active(vif))
+ iwl_mld_int_mlo_scan(mld, vif);
}
/* no more logic if we're not in EMLSR */
@@ -584,6 +583,17 @@ void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
if (le32_to_cpu(notif->other_link_id) == FW_CTXT_ID_INVALID)
return;
+ other_link_fw_id = le32_to_cpu(notif->other_link_id);
+ other_link = iwl_mld_fw_id_to_link_conf(mld, other_link_fw_id);
+
+ if (IWL_FW_CHECK(mld, !other_link, "link doesn't exist for: %d\n",
+ other_link_fw_id))
+ return;
+
+ IWL_DEBUG_EHT(mld,
+ "missed bcn on the other link (link_id=%u): %u\n",
+ other_link->link_id, scnd_lnk_bcn_lost);
+
/* Exit EMLSR if we lost more than
* IWL_MLD_MISSED_BEACONS_EXIT_ESR_THRESH beacons on boths links
* OR more than IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH on current link.
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.h b/drivers/net/wireless/intel/iwlwifi/mld/link.h
index cad2c9426349..9e4da8e4de93 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/link.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/link.h
@@ -36,6 +36,7 @@ struct iwl_probe_resp_data {
* @he_ru_2mhz_block: 26-tone RU OFDMA transmissions should be blocked.
* @igtk: fw can only have one IGTK at a time, whereas mac80211 can have two.
* This tracks the one IGTK that currently exists in FW.
+ * @bigtks: BIGTKs of the AP. Only valid for STA mode.
* @bcast_sta: station used for broadcast packets. Used in AP, GO and IBSS.
* @mcast_sta: station used for multicast packets. Used in AP, GO and IBSS.
* @mon_sta: station used for TX injection in monitor interface.
@@ -59,6 +60,7 @@ struct iwl_mld_link {
struct ieee80211_chanctx_conf __rcu *chan_ctx;
bool he_ru_2mhz_block;
struct ieee80211_key_conf *igtk;
+ struct ieee80211_key_conf __rcu *bigtks[2];
);
/* And here fields that survive a fw restart */
struct iwl_mld_int_sta bcast_sta;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
index b0bd01914a91..5725104a53bf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
@@ -626,7 +626,7 @@ int iwl_mld_mac80211_add_interface(struct ieee80211_hw *hw,
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
}
- if (vif->p2p || iwl_fw_lookup_cmd_ver(mld->fw, PHY_CONTEXT_CMD, 0) < 5)
+ if (vif->p2p)
vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
/*
@@ -1966,13 +1966,8 @@ iwl_mld_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
iwl_fw_runtime_suspend(&mld->fwrt);
ret = iwl_mld_wowlan_suspend(mld, wowlan);
- if (ret) {
- if (ret < 0) {
- mld->trans->state = IWL_TRANS_NO_FW;
- set_bit(STATUS_FW_ERROR, &mld->trans->status);
- }
+ if (ret)
return 1;
- }
if (iwl_mld_no_wowlan_suspend(mld))
return 1;
@@ -2065,9 +2060,8 @@ static int iwl_mld_set_key_add(struct iwl_mld *mld,
return -EOPNOTSUPP;
}
- if (vif->type == NL80211_IFTYPE_STATION &&
- (keyidx == 6 || keyidx == 7))
- rcu_assign_pointer(mld_vif->bigtks[keyidx - 6], key);
+ if (keyidx == 6 || keyidx == 7)
+ iwl_mld_track_bigtk(mld, vif, key, true);
/* After exiting from RFKILL, hostapd configures GTK/ITGK before the
* AP is started, but those keys can't be sent to the FW before the
@@ -2116,9 +2110,8 @@ static void iwl_mld_set_key_remove(struct iwl_mld *mld,
sta ? iwl_mld_sta_from_mac80211(sta) : NULL;
int keyidx = key->keyidx;
- if (vif->type == NL80211_IFTYPE_STATION &&
- (keyidx == 6 || keyidx == 7))
- RCU_INIT_POINTER(mld_vif->bigtks[keyidx - 6], NULL);
+ if (keyidx == 6 || keyidx == 7)
+ iwl_mld_track_bigtk(mld, vif, key, false);
if (mld_sta && key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
(key->cipher == WLAN_CIPHER_SUITE_CCMP ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.c b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
index 7b46ccc306ab..a6962256bdd1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mld.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
@@ -147,6 +147,7 @@ iwl_mld_construct_fw_runtime(struct iwl_mld *mld, struct iwl_trans *trans,
*/
static const struct iwl_hcmd_names iwl_mld_legacy_names[] = {
HCMD_NAME(UCODE_ALIVE_NTFY),
+ HCMD_NAME(REPLY_ERROR),
HCMD_NAME(INIT_COMPLETE_NOTIF),
HCMD_NAME(PHY_CONTEXT_CMD),
HCMD_NAME(SCAN_CFG_CMD),
@@ -158,12 +159,14 @@ static const struct iwl_hcmd_names iwl_mld_legacy_names[] = {
HCMD_NAME(LEDS_CMD),
HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION),
HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION),
+ HCMD_NAME(PHY_CONFIGURATION_CMD),
HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
HCMD_NAME(POWER_TABLE_CMD),
HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
HCMD_NAME(BEACON_NOTIFICATION),
HCMD_NAME(BEACON_TEMPLATE_CMD),
HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
+ HCMD_NAME(BT_CONFIG),
HCMD_NAME(REDUCE_TX_POWER_CMD),
HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
HCMD_NAME(MAC_PM_POWER_TABLE),
@@ -251,6 +254,7 @@ static const struct iwl_hcmd_names iwl_mld_data_path_names[] = {
HCMD_NAME(TLC_MNG_CONFIG_CMD),
HCMD_NAME(RX_BAID_ALLOCATION_CONFIG_CMD),
HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
+ HCMD_NAME(SEC_KEY_CMD),
HCMD_NAME(ESR_MODE_NOTIF),
HCMD_NAME(MONITOR_NOTIF),
HCMD_NAME(TLC_MNG_UPDATE_NOTIF),
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
index e57f5388fe77..241a6271d13d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
@@ -357,38 +357,26 @@ iwl_mld_vif_iter_emlsr_mode_notif(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
const struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
- enum iwl_mvm_fw_esr_recommendation action;
- const struct iwl_esr_mode_notif *notif = NULL;
-
- if (iwl_fw_lookup_notif_ver(mld_vif->mld->fw, DATA_PATH_GROUP,
- ESR_MODE_NOTIF, 0) > 1) {
- notif = (void *)data;
- action = le32_to_cpu(notif->action);
- } else {
- const struct iwl_esr_mode_notif_v1 *notif_v1 = (void *)data;
-
- action = le32_to_cpu(notif_v1->action);
- }
+ const struct iwl_esr_mode_notif *notif = (void *)data;
+ enum iwl_mvm_fw_esr_recommendation action = le32_to_cpu(notif->action);
if (!iwl_mld_vif_has_emlsr_cap(vif))
return;
switch (action) {
case ESR_RECOMMEND_LEAVE:
- if (notif)
- IWL_DEBUG_INFO(mld_vif->mld,
- "FW recommend leave reason = 0x%x\n",
- le32_to_cpu(notif->leave_reason_mask));
+ IWL_DEBUG_INFO(mld_vif->mld,
+ "FW recommend leave reason = 0x%x\n",
+ le32_to_cpu(notif->leave_reason_mask));
iwl_mld_exit_emlsr(mld_vif->mld, vif,
IWL_MLD_EMLSR_EXIT_FW_REQUEST,
iwl_mld_get_primary_link(vif));
break;
case ESR_FORCE_LEAVE:
- if (notif)
- IWL_DEBUG_INFO(mld_vif->mld,
- "FW force leave reason = 0x%x\n",
- le32_to_cpu(notif->leave_reason_mask));
+ IWL_DEBUG_INFO(mld_vif->mld,
+ "FW force leave reason = 0x%x\n",
+ le32_to_cpu(notif->leave_reason_mask));
fallthrough;
case ESR_RECOMMEND_ENTER:
default:
@@ -735,12 +723,6 @@ iwl_mld_set_link_sel_data(struct iwl_mld *mld,
u16 max_grade = 0;
unsigned long link_id;
- /*
- * TODO: don't select links that weren't discovered in the last scan
- * This requires mac80211 (or cfg80211) changes to forward/track when
- * a BSS was last updated. cfg80211 already tracks this information but
- * it is not exposed within the kernel.
- */
for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
struct ieee80211_bss_conf *link_conf =
link_conf_dereference_protected(vif, link_id);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/notif.c b/drivers/net/wireless/intel/iwlwifi/mld/notif.c
index f17aeca4fae6..884973d0b344 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/notif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/notif.c
@@ -333,7 +333,6 @@ CMD_VERSIONS(bt_coex_notif,
CMD_VERSIONS(beacon_notification,
CMD_VER_ENTRY(6, iwl_extended_beacon_notif))
CMD_VERSIONS(emlsr_mode_notif,
- CMD_VER_ENTRY(1, iwl_esr_mode_notif_v1)
CMD_VER_ENTRY(2, iwl_esr_mode_notif))
CMD_VERSIONS(emlsr_trans_fail_notif,
CMD_VER_ENTRY(1, iwl_esr_trans_fail_notif))
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
index 75d2f5cb23a7..40571125b3ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
@@ -163,18 +163,32 @@ int iwl_mld_init_sgom(struct iwl_mld *mld)
static int iwl_mld_ppag_send_cmd(struct iwl_mld *mld)
{
- union iwl_ppag_table_cmd cmd = {};
- int ret, len;
+ struct iwl_fw_runtime *fwrt = &mld->fwrt;
+ union iwl_ppag_table_cmd cmd = {
+ .v7.ppag_config_info.table_source = fwrt->ppag_bios_source,
+ .v7.ppag_config_info.table_revision = fwrt->ppag_bios_rev,
+ .v7.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags),
+ };
+ int ret;
- ret = iwl_fill_ppag_table(&mld->fwrt, &cmd, &len);
- /* Not supporting PPAG table is a valid scenario */
- if (ret < 0)
- return 0;
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG MODE bits going to be sent: %d\n",
+ fwrt->ppag_flags);
+
+ for (int chain = 0; chain < IWL_NUM_CHAIN_LIMITS; chain++) {
+ for (int subband = 0; subband < IWL_NUM_SUB_BANDS_V2; subband++) {
+ cmd.v7.gain[chain][subband] =
+ fwrt->ppag_chains[chain].subbands[subband];
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table: chain[%d] band[%d]: gain = %d\n",
+ chain, subband, cmd.v7.gain[chain][subband]);
+ }
+ }
IWL_DEBUG_RADIO(mld, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD),
- &cmd, len);
+ &cmd, sizeof(cmd.v7));
if (ret < 0)
IWL_ERR(mld, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/roc.c b/drivers/net/wireless/intel/iwlwifi/mld/roc.c
index e85f45bce79a..4136c98030d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/roc.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/roc.c
@@ -82,9 +82,6 @@ int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct iwl_roc_req cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
};
- u8 ver = iwl_fw_lookup_cmd_ver(mld->fw,
- WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
- u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(cmd);
enum iwl_roc_activity activity;
int ret = 0;
@@ -140,7 +137,7 @@ int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
memcpy(cmd.node_addr, vif->addr, ETH_ALEN);
ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
- &cmd, cmd_len);
+ &cmd);
if (ret) {
IWL_ERR(mld, "Couldn't send the ROC_CMD\n");
return ret;
@@ -190,9 +187,6 @@ int iwl_mld_cancel_roc(struct ieee80211_hw *hw,
struct iwl_roc_req cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
};
- u8 ver = iwl_fw_lookup_cmd_ver(mld->fw,
- WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
- u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(cmd);
int ret;
lockdep_assert_wiphy(mld->wiphy);
@@ -208,7 +202,7 @@ int iwl_mld_cancel_roc(struct ieee80211_hw *hw,
cmd.activity = cpu_to_le32(mld_vif->roc_activity);
ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
- &cmd, cmd_len);
+ &cmd);
if (ret)
IWL_ERR(mld, "Couldn't send the command to cancel the ROC\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/rx.c b/drivers/net/wireless/intel/iwlwifi/mld/rx.c
index b6dedd1ecd4d..20d866dd92c2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/rx.c
@@ -1611,20 +1611,21 @@ iwl_mld_rx_with_sta(struct iwl_mld *mld, struct ieee80211_hdr *hdr,
return sta;
}
-#define KEY_IDX_LEN 2
-
static int iwl_mld_rx_mgmt_prot(struct ieee80211_sta *sta,
struct ieee80211_hdr *hdr,
struct ieee80211_rx_status *rx_status,
u32 mpdu_status,
u32 mpdu_len)
{
+ struct iwl_mld_link *link;
struct wireless_dev *wdev;
struct iwl_mld_sta *mld_sta;
struct iwl_mld_vif *mld_vif;
u8 keyidx;
struct ieee80211_key_conf *key;
const u8 *frame = (void *)hdr;
+ const u8 *mmie;
+ u8 link_id;
if ((mpdu_status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
IWL_RX_MPDU_STATUS_SEC_NONE)
@@ -1657,21 +1658,30 @@ static int iwl_mld_rx_mgmt_prot(struct ieee80211_sta *sta,
return 0;
}
+ link_id = rx_status->link_valid ? rx_status->link_id : 0;
+ link = rcu_dereference(mld_vif->link[link_id]);
+ if (WARN_ON_ONCE(!link))
+ return -1;
+
/* both keys will have the same cipher and MIC length, use
* whichever one is available
*/
- key = rcu_dereference(mld_vif->bigtks[0]);
+ key = rcu_dereference(link->bigtks[0]);
if (!key) {
- key = rcu_dereference(mld_vif->bigtks[1]);
+ key = rcu_dereference(link->bigtks[1]);
if (!key)
goto report;
}
- if (mpdu_len < key->icv_len + IEEE80211_GMAC_PN_LEN + KEY_IDX_LEN)
+ /* get the real key ID */
+ if (mpdu_len < key->icv_len)
goto report;
- /* get the real key ID */
- keyidx = frame[mpdu_len - key->icv_len - IEEE80211_GMAC_PN_LEN - KEY_IDX_LEN];
+ mmie = frame + (mpdu_len - key->icv_len);
+
+ /* the position of the key_id in ieee80211_mmie_16 is the same */
+ keyidx = le16_to_cpu(((const struct ieee80211_mmie *) mmie)->key_id);
+
/* and if that's the other key, look it up */
if (keyidx != key->keyidx) {
/* shouldn't happen since firmware checked, but be safe
@@ -1680,7 +1690,7 @@ static int iwl_mld_rx_mgmt_prot(struct ieee80211_sta *sta,
if (keyidx != 6 && keyidx != 7)
return -1;
- key = rcu_dereference(mld_vif->bigtks[keyidx - 6]);
+ key = rcu_dereference(link->bigtks[keyidx - 6]);
if (!key)
goto report;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/scan.c b/drivers/net/wireless/intel/iwlwifi/mld/scan.c
index 62f97a18a16c..fd1022ddc912 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/scan.c
@@ -504,9 +504,7 @@ iwl_mld_scan_get_cmd_gen_flags2(struct iwl_mld *mld,
*/
if (scan_status == IWL_MLD_SCAN_REGULAR &&
ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP &&
- gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE &&
- iwl_fw_lookup_notif_ver(mld->fw, SCAN_GROUP,
- CHANNEL_SURVEY_NOTIF, 0) >= 1)
+ gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE)
flags |= IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS;
return flags;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/sta.c b/drivers/net/wireless/intel/iwlwifi/mld/sta.c
index 8fb51209b4a6..5cdbfa29a202 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/sta.c
@@ -401,11 +401,9 @@ static u32 iwl_mld_get_htc_flags(struct ieee80211_link_sta *link_sta)
static int iwl_mld_send_sta_cmd(struct iwl_mld *mld,
const struct iwl_sta_cfg_cmd *cmd)
{
- u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD);
- int cmd_len = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 0) > 1 ?
- sizeof(*cmd) :
- sizeof(struct iwl_sta_cfg_cmd_v1);
- int ret = iwl_mld_send_cmd_pdu(mld, cmd_id, cmd, cmd_len);
+ int ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD),
+ cmd);
if (ret)
IWL_ERR(mld, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret);
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/stats.c b/drivers/net/wireless/intel/iwlwifi/mld/stats.c
index cbc64db5eab6..7b8709716324 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/stats.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/stats.c
@@ -379,11 +379,14 @@ static void iwl_mld_update_link_sig(struct ieee80211_vif *vif, int sig,
/* TODO: task=statistics handle CQM notifications */
- if (sig < IWL_MLD_LOW_RSSI_MLO_SCAN_THRESH)
- iwl_mld_int_mlo_scan(mld, vif);
-
- if (!iwl_mld_emlsr_active(vif))
+ if (!iwl_mld_emlsr_active(vif)) {
+ /* We're not in EMLSR and our signal is bad,
+ * try to switch link maybe. EMLSR will be handled below.
+ */
+ if (sig < IWL_MLD_LOW_RSSI_MLO_SCAN_THRESH)
+ iwl_mld_int_mlo_scan(mld, vif);
return;
+ }
/* We are in EMLSR, check if we need to exit */
exit_emlsr_thresh =
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tlc.c b/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
index a9ca92c0455e..0e172281b0c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
@@ -157,9 +157,9 @@ iwl_mld_get_highest_fw_mcs(const struct ieee80211_sta_vht_cap *vht_cap,
static void
iwl_mld_fill_vht_rates(const struct ieee80211_link_sta *link_sta,
const struct ieee80211_sta_vht_cap *vht_cap,
- struct iwl_tlc_config_cmd_v4 *cmd)
+ struct iwl_tlc_config_cmd *cmd)
{
- u16 supp;
+ u32 supp;
int i, highest_mcs;
u8 max_nss = link_sta->rx_nss;
struct ieee80211_vht_cap ieee_vht_cap = {
@@ -182,7 +182,7 @@ iwl_mld_fill_vht_rates(const struct ieee80211_link_sta *link_sta,
if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
- cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(supp);
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le32(supp);
/* Check if VHT extended NSS indicates that the bandwidth/NSS
* configuration is supported - only for MCS 0 since we already
* decoded the MCS bits anyway ourselves.
@@ -196,7 +196,7 @@ iwl_mld_fill_vht_rates(const struct ieee80211_link_sta *link_sta,
}
}
-static u16 iwl_mld_he_mac80211_mcs_to_fw_mcs(u16 mcs)
+static u32 iwl_mld_he_mac80211_mcs_to_fw_mcs(u16 mcs)
{
switch (mcs) {
case IEEE80211_HE_MCS_SUPPORT_0_7:
@@ -216,7 +216,7 @@ static u16 iwl_mld_he_mac80211_mcs_to_fw_mcs(u16 mcs)
static void
iwl_mld_fill_he_rates(const struct ieee80211_link_sta *link_sta,
const struct ieee80211_sta_he_cap *own_he_cap,
- struct iwl_tlc_config_cmd_v4 *cmd)
+ struct iwl_tlc_config_cmd *cmd)
{
const struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
@@ -245,7 +245,7 @@ iwl_mld_fill_he_rates(const struct ieee80211_link_sta *link_sta,
if (_mcs_80 > _tx_mcs_80)
_mcs_80 = _tx_mcs_80;
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] =
- cpu_to_le16(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_80));
+ cpu_to_le32(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_80));
/* If one side doesn't support - mark both as not supporting */
if (_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
@@ -256,19 +256,19 @@ iwl_mld_fill_he_rates(const struct ieee80211_link_sta *link_sta,
if (_mcs_160 > _tx_mcs_160)
_mcs_160 = _tx_mcs_160;
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] =
- cpu_to_le16(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_160));
+ cpu_to_le32(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_160));
}
}
-static void iwl_mld_set_eht_mcs(__le16 ht_rates[][3],
+static void iwl_mld_set_eht_mcs(__le32 ht_rates[][3],
enum IWL_TLC_MCS_PER_BW bw,
- u8 max_nss, u16 mcs_msk)
+ u8 max_nss, u32 mcs_msk)
{
if (max_nss >= 2)
- ht_rates[IWL_TLC_NSS_2][bw] |= cpu_to_le16(mcs_msk);
+ ht_rates[IWL_TLC_NSS_2][bw] |= cpu_to_le32(mcs_msk);
if (max_nss >= 1)
- ht_rates[IWL_TLC_NSS_1][bw] |= cpu_to_le16(mcs_msk);
+ ht_rates[IWL_TLC_NSS_1][bw] |= cpu_to_le32(mcs_msk);
}
static const
@@ -307,7 +307,7 @@ iwl_mld_fill_eht_rates(struct ieee80211_vif *vif,
const struct ieee80211_link_sta *link_sta,
const struct ieee80211_sta_he_cap *own_he_cap,
const struct ieee80211_sta_eht_cap *own_eht_cap,
- struct iwl_tlc_config_cmd_v4 *cmd)
+ struct iwl_tlc_config_cmd *cmd)
{
/* peer RX mcs capa */
const struct ieee80211_eht_mcs_nss_supp *eht_rx_mcs =
@@ -405,7 +405,7 @@ iwl_mld_fill_supp_rates(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct ieee80211_supported_band *sband,
const struct ieee80211_sta_he_cap *own_he_cap,
const struct ieee80211_sta_eht_cap *own_eht_cap,
- struct iwl_tlc_config_cmd_v4 *cmd)
+ struct iwl_tlc_config_cmd *cmd)
{
int i;
u16 non_ht_rates = 0;
@@ -435,7 +435,7 @@ iwl_mld_fill_supp_rates(struct iwl_mld *mld, struct ieee80211_vif *vif,
} else if (ht_cap->ht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_HT;
cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_MCS_PER_BW_80] =
- cpu_to_le16(ht_cap->mcs.rx_mask[0]);
+ cpu_to_le32(ht_cap->mcs.rx_mask[0]);
/* the station support only a single receive chain */
if (link_sta->smps_mode == IEEE80211_SMPS_STATIC)
@@ -443,10 +443,30 @@ iwl_mld_fill_supp_rates(struct iwl_mld *mld, struct ieee80211_vif *vif,
0;
else
cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
- cpu_to_le16(ht_cap->mcs.rx_mask[1]);
+ cpu_to_le32(ht_cap->mcs.rx_mask[1]);
}
}
+static void iwl_mld_convert_tlc_cmd_to_v4(struct iwl_tlc_config_cmd *cmd,
+ struct iwl_tlc_config_cmd_v4 *cmd_v4)
+{
+ /* Copy everything until ht_rates */
+ memcpy(cmd_v4, cmd, offsetof(struct iwl_tlc_config_cmd, ht_rates));
+
+ /* Convert ht_rates from __le32 to __le16 */
+ BUILD_BUG_ON(ARRAY_SIZE(cmd_v4->ht_rates) != ARRAY_SIZE(cmd->ht_rates));
+ BUILD_BUG_ON(ARRAY_SIZE(cmd_v4->ht_rates[0]) != ARRAY_SIZE(cmd->ht_rates[0]));
+
+ for (int nss = 0; nss < ARRAY_SIZE(cmd->ht_rates); nss++)
+ for (int bw = 0; bw < ARRAY_SIZE(cmd->ht_rates[nss]); bw++)
+ cmd_v4->ht_rates[nss][bw] =
+ cpu_to_le16(le32_to_cpu(cmd->ht_rates[nss][bw]));
+
+ /* Copy the rest */
+ cmd_v4->max_mpdu_len = cmd->max_mpdu_len;
+ cmd_v4->max_tx_op = cmd->max_tx_op;
+}
+
static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
@@ -458,7 +478,7 @@ static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
ieee80211_get_he_iftype_cap_vif(sband, vif);
const struct ieee80211_sta_eht_cap *own_eht_cap =
ieee80211_get_eht_iftype_cap_vif(sband, vif);
- struct iwl_tlc_config_cmd_v4 cmd = {
+ struct iwl_tlc_config_cmd cmd = {
/* For AP mode, use 20 MHz until the STA is authorized */
.max_ch_width = mld_sta->sta_state > IEEE80211_STA_ASSOC ?
iwl_mld_fw_bw_from_sta_bw(link_sta) :
@@ -470,6 +490,11 @@ static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
.max_mpdu_len = cpu_to_le16(link_sta->agg.max_amsdu_len),
};
int fw_sta_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, TLC_MNG_CONFIG_CMD);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 0);
+ struct iwl_tlc_config_cmd_v4 cmd_v4;
+ void *cmd_ptr;
+ u8 cmd_size;
int ret;
if (fw_sta_id < 0)
@@ -481,14 +506,26 @@ static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
own_he_cap, own_eht_cap,
&cmd);
+ if (cmd_ver == 5) {
+ cmd_ptr = &cmd;
+ cmd_size = sizeof(cmd);
+ } else if (cmd_ver == 4) {
+ iwl_mld_convert_tlc_cmd_to_v4(&cmd, &cmd_v4);
+ cmd_ptr = &cmd_v4;
+ cmd_size = sizeof(cmd_v4);
+ } else {
+ IWL_ERR(mld, "Unsupported TLC config cmd version %d\n",
+ cmd_ver);
+ return;
+ }
+
IWL_DEBUG_RATE(mld,
"TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n",
cmd.sta_id, cmd.max_ch_width, cmd.mode);
/* Send async since this can be called within a RCU-read section */
- ret = iwl_mld_send_cmd_with_flags_pdu(mld, WIDE_ID(DATA_PATH_GROUP,
- TLC_MNG_CONFIG_CMD),
- CMD_ASYNC, &cmd);
+ ret = iwl_mld_send_cmd_with_flags_pdu(mld, cmd_id, CMD_ASYNC, cmd_ptr,
+ cmd_size);
if (ret)
IWL_ERR(mld, "Failed to send TLC cmd (%d)\n", ret);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 13cdc077d8d3..ca63f780140f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -253,93 +253,6 @@ static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm,
swap(data->primary, data->secondary);
}
-/*
- * This function receives the LB link id and checks if eSR should be
- * enabled or disabled (due to BT coex)
- */
-bool
-iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- s32 link_rssi,
- bool primary)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- bool have_wifi_loss_rate =
- iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
- BT_PROFILE_NOTIFICATION, 0) > 4 ||
- iwl_fw_lookup_notif_ver(mvm->fw, BT_COEX_GROUP,
- PROFILE_NOTIF, 0) >= 1;
- u8 wifi_loss_mid_high_rssi;
- u8 wifi_loss_low_rssi;
- u8 wifi_loss_rate;
-
- if (iwl_fw_lookup_notif_ver(mvm->fw, BT_COEX_GROUP,
- PROFILE_NOTIF, 0) >= 1) {
- /* For now, we consider 2.4 GHz band / ANT_A only */
- wifi_loss_mid_high_rssi =
- mvm->last_bt_wifi_loss.wifi_loss_mid_high_rssi[PHY_BAND_24][0];
- wifi_loss_low_rssi =
- mvm->last_bt_wifi_loss.wifi_loss_low_rssi[PHY_BAND_24][0];
- } else {
- wifi_loss_mid_high_rssi = mvm->last_bt_notif.wifi_loss_mid_high_rssi;
- wifi_loss_low_rssi = mvm->last_bt_notif.wifi_loss_low_rssi;
- }
-
- if (wifi_loss_low_rssi == BT_OFF)
- return true;
-
- if (primary)
- return false;
-
- /* The feature is not supported */
- if (!have_wifi_loss_rate)
- return true;
-
-
- /*
- * In case we don't know the RSSI - take the lower wifi loss,
- * so we will more likely enter eSR, and if RSSI is low -
- * we will get an update on this and exit eSR.
- */
- if (!link_rssi)
- wifi_loss_rate = wifi_loss_mid_high_rssi;
-
- else if (mvmvif->esr_active)
- /* RSSI needs to get really low to disable eSR... */
- wifi_loss_rate =
- link_rssi <= -IWL_MVM_BT_COEX_DISABLE_ESR_THRESH ?
- wifi_loss_low_rssi :
- wifi_loss_mid_high_rssi;
- else
- /* ...And really high before we enable it back */
- wifi_loss_rate =
- link_rssi <= -IWL_MVM_BT_COEX_ENABLE_ESR_THRESH ?
- wifi_loss_low_rssi :
- wifi_loss_mid_high_rssi;
-
- return wifi_loss_rate <= IWL_MVM_BT_COEX_WIFI_LOSS_THRESH;
-}
-
-void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- int link_id)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm_vif_link_info *link = mvmvif->link[link_id];
-
- if (!ieee80211_vif_is_mld(vif) ||
- !iwl_mvm_vif_from_mac80211(vif)->authorized ||
- WARN_ON(!link))
- return;
-
- if (!iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif,
- (s8)link->beacon_stats.avg_signal,
- link_id == iwl_mvm_get_primary_link(vif)))
- /* In case we decided to exit eSR - stay with the primary */
- iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_COEX,
- iwl_mvm_get_primary_link(vif));
-}
-
static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_bt_iterator_data *data,
@@ -385,8 +298,6 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
return;
}
- iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
-
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2))
min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC;
else
@@ -525,32 +436,6 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
iwl_mvm_bt_notif_per_link(mvm, vif, data, link_id);
}
-/* must be called under rcu_read_lock */
-static void iwl_mvm_bt_coex_notif_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm *mvm = _data;
- struct ieee80211_bss_conf *link_conf;
- unsigned int link_id;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
-
- for_each_vif_active_link(vif, link_conf, link_id) {
- struct ieee80211_chanctx_conf *chanctx_conf =
- rcu_dereference_check(link_conf->chanctx_conf,
- lockdep_is_held(&mvm->mutex));
-
- if ((!chanctx_conf ||
- chanctx_conf->def.chan->band != NL80211_BAND_2GHZ))
- continue;
-
- iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
- }
-}
-
static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
{
struct iwl_bt_iterator_data data = {
@@ -654,22 +539,6 @@ void iwl_mvm_rx_bt_coex_old_notif(struct iwl_mvm *mvm,
iwl_mvm_bt_coex_notif_handle(mvm);
}
-void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
-{
- const struct iwl_rx_packet *pkt = rxb_addr(rxb);
- const struct iwl_bt_coex_profile_notif *notif = (const void *)pkt->data;
-
- lockdep_assert_held(&mvm->mutex);
-
- mvm->last_bt_wifi_loss = *notif;
-
- ieee80211_iterate_active_interfaces(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_bt_coex_notif_iterator,
- mvm);
-}
-
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event_data rssi_event)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index 776600ddaea6..17f94663c941 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
- * Copyright (C) 2013-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2025 Intel Corporation
* Copyright (C) 2015 Intel Deutschland GmbH
*/
#ifndef __MVM_CONSTANTS_H
@@ -11,15 +11,7 @@
#include "fw-api.h"
#define IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM 20
-#define IWL_MVM_BT_COEX_DISABLE_ESR_THRESH 69
-#define IWL_MVM_BT_COEX_ENABLE_ESR_THRESH 63
-#define IWL_MVM_BT_COEX_WIFI_LOSS_THRESH 0
#define IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC 30
-#define IWL_MVM_TPT_COUNT_WINDOW_SEC 5
-#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS 5
-#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH 15
-#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED 11
-#define IWL_MVM_LOW_RSSI_MLO_SCAN_THRESH -72
#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
@@ -129,14 +121,4 @@
#define IWL_MVM_MIN_BEACON_INTERVAL_TU 16
#define IWL_MVM_AUTO_EML_ENABLE true
-#define IWL_MVM_HIGH_RSSI_THRESH_20MHZ -67
-#define IWL_MVM_LOW_RSSI_THRESH_20MHZ -71
-#define IWL_MVM_HIGH_RSSI_THRESH_40MHZ -64
-#define IWL_MVM_LOW_RSSI_THRESH_40MHZ -67
-#define IWL_MVM_HIGH_RSSI_THRESH_80MHZ -61
-#define IWL_MVM_LOW_RSSI_THRESH_80MHZ -74
-#define IWL_MVM_HIGH_RSSI_THRESH_160MHZ -58
-#define IWL_MVM_LOW_RSSI_THRESH_160MHZ -61
-
-#define IWL_MVM_ENTER_ESR_TPT_THRESH 400
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 029c846a236f..07f1a84c274e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -928,6 +928,10 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
if (ap_sta->mfp)
wowlan_config_cmd->flags |= IS_11W_ASSOC;
+ if (rcu_access_pointer(mvmvif->bcn_prot.keys[0]) ||
+ rcu_access_pointer(mvmvif->bcn_prot.keys[1]))
+ wowlan_config_cmd->flags |= HAS_BEACON_PROTECTION;
+
if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 6) {
/* Query the last used seqno and set it */
int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
@@ -1238,15 +1242,14 @@ static void iwl_mvm_free_nd(struct iwl_mvm *mvm)
}
static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
- struct cfg80211_wowlan *wowlan,
- bool test)
+ struct cfg80211_wowlan *wowlan)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct ieee80211_vif *vif = NULL;
struct iwl_mvm_vif *mvmvif = NULL;
struct ieee80211_sta *ap_sta = NULL;
struct iwl_mvm_vif_link_info *mvm_link;
- struct iwl_d3_manager_config d3_cfg_cmd_data = {
+ struct iwl_d3_manager_config d3_cfg_cmd = {
/*
* Program the minimum sleep time to 10 seconds, as many
* platforms have issues processing a wakeup signal while
@@ -1254,23 +1257,14 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
*/
.min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
};
- struct iwl_host_cmd d3_cfg_cmd = {
- .id = D3_CONFIG_CMD,
- .flags = CMD_WANT_SKB,
- .data[0] = &d3_cfg_cmd_data,
- .len[0] = sizeof(d3_cfg_cmd_data),
- };
int ret;
int len __maybe_unused;
bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
if (!wowlan) {
- /*
- * mac80211 shouldn't get here, but for D3 test
- * it doesn't warrant a warning
- */
- WARN_ON(!test);
+ /* mac80211 shouldn't get here */
+ WARN_ON(1);
return -EINVAL;
}
@@ -1278,10 +1272,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (IS_ERR_OR_NULL(vif))
return 1;
- ret = iwl_mvm_block_esr_sync(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN);
- if (ret)
- return ret;
-
mutex_lock(&mvm->mutex);
set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
@@ -1351,7 +1341,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvm->d3_wake_sysassert)
- d3_cfg_cmd_data.wakeup_flags |=
+ d3_cfg_cmd.wakeup_flags |=
cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
#endif
@@ -1364,21 +1354,14 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_fw_dbg_stop_restart_recording(&mvm->fwrt, NULL, true);
/* must be last -- this switches firmware state */
- ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, 0, sizeof(d3_cfg_cmd),
+ &d3_cfg_cmd);
if (ret)
goto out;
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt);
- if (len >= sizeof(u32)) {
- mvm->d3_test_pme_ptr =
- le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
- }
-#endif
- iwl_free_resp(&d3_cfg_cmd);
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
- ret = iwl_trans_d3_suspend(mvm->trans, test, !unified_image);
+ ret = iwl_trans_d3_suspend(mvm->trans, !unified_image);
out:
if (ret < 0) {
iwl_mvm_free_nd(mvm);
@@ -1401,7 +1384,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
iwl_fw_runtime_suspend(&mvm->fwrt);
mutex_unlock(&mvm->mutex);
- return __iwl_mvm_suspend(hw, wowlan, false);
+ return __iwl_mvm_suspend(hw, wowlan);
}
struct iwl_multicast_key_data {
@@ -1794,63 +1777,8 @@ static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
struct iwl_mvm_d3_gtk_iter_data {
struct iwl_mvm *mvm;
struct iwl_wowlan_status_data *status;
- u32 gtk_cipher, igtk_cipher, bigtk_cipher;
- bool unhandled_cipher, igtk_support, bigtk_support;
- int num_keys;
};
-static void iwl_mvm_d3_find_last_keys(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key,
- void *_data)
-{
- struct iwl_mvm_d3_gtk_iter_data *data = _data;
- int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
-
- if (link_id >= 0 && key->link_id >= 0 && link_id != key->link_id)
- return;
-
- if (data->unhandled_cipher)
- return;
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- /* ignore WEP completely, nothing to do */
- return;
- case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_GCMP:
- case WLAN_CIPHER_SUITE_GCMP_256:
- case WLAN_CIPHER_SUITE_TKIP:
- /* we support these */
- data->gtk_cipher = key->cipher;
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- case WLAN_CIPHER_SUITE_AES_CMAC:
- /* we support these */
- if (data->igtk_support &&
- (key->keyidx == 4 || key->keyidx == 5)) {
- data->igtk_cipher = key->cipher;
- } else if (data->bigtk_support &&
- (key->keyidx == 6 || key->keyidx == 7)) {
- data->bigtk_cipher = key->cipher;
- } else {
- data->unhandled_cipher = true;
- return;
- }
- break;
- default:
- /* everything else - disconnect from AP */
- data->unhandled_cipher = true;
- return;
- }
-
- data->num_keys++;
-}
-
static void
iwl_mvm_d3_set_igtk_bigtk_ipn(const struct iwl_multicast_key_data *key,
struct ieee80211_key_seq *seq, u32 cipher)
@@ -1896,9 +1824,6 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
if (link_id >= 0 && key->link_id >= 0 && link_id != key->link_id)
return;
- if (data->unhandled_cipher)
- return;
-
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
@@ -1947,52 +1872,24 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
struct ieee80211_vif *vif,
- struct iwl_mvm *mvm, u32 gtk_cipher)
+ struct iwl_mvm *mvm)
{
int i, j;
struct ieee80211_key_conf *key;
- DEFINE_RAW_FLEX(struct ieee80211_key_conf, conf, key,
- WOWLAN_KEY_MAX_SIZE);
int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
- u8 key_data[WOWLAN_KEY_MAX_SIZE];
-
- conf->cipher = gtk_cipher;
-
- BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
- BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < WLAN_KEY_LEN_CCMP);
- BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < WLAN_KEY_LEN_GCMP_256);
- BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < WLAN_KEY_LEN_TKIP);
- BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < sizeof(status->gtk[0].key));
-
- switch (gtk_cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_GCMP:
- conf->keylen = WLAN_KEY_LEN_CCMP;
- break;
- case WLAN_CIPHER_SUITE_GCMP_256:
- conf->keylen = WLAN_KEY_LEN_GCMP_256;
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- conf->keylen = WLAN_KEY_LEN_TKIP;
- break;
- default:
- WARN_ON(1);
- }
for (i = 0; i < ARRAY_SIZE(status->gtk); i++) {
if (!status->gtk[i].len)
continue;
- conf->keyidx = status->gtk[i].id;
IWL_DEBUG_WOWLAN(mvm,
- "Received from FW GTK cipher %d, key index %d\n",
- conf->cipher, conf->keyidx);
- memcpy(conf->key, status->gtk[i].key,
- sizeof(status->gtk[i].key));
- memcpy(key_data, status->gtk[i].key, sizeof(status->gtk[i].key));
-
- key = ieee80211_gtk_rekey_add(vif, status->gtk[i].id, key_data,
- sizeof(key_data), link_id);
+ "Received from FW GTK: key index %d\n",
+ status->gtk[i].id);
+
+ key = ieee80211_gtk_rekey_add(vif, status->gtk[i].id,
+ status->gtk[i].key,
+ sizeof(status->gtk[i].key),
+ link_id);
if (IS_ERR(key)) {
/* FW may send also the old keys */
if (PTR_ERR(key) == -EALREADY)
@@ -2015,53 +1912,26 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
static bool
iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
- struct ieee80211_vif *vif, u32 cipher,
+ struct ieee80211_vif *vif,
struct iwl_multicast_key_data *key_data)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- DEFINE_RAW_FLEX(struct ieee80211_key_conf, conf, key,
- WOWLAN_KEY_MAX_SIZE);
struct ieee80211_key_conf *key_config;
struct ieee80211_key_seq seq;
int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
- u8 key[WOWLAN_KEY_MAX_SIZE];
s8 keyidx = key_data->id;
- conf->cipher = cipher;
- conf->keyidx = keyidx;
-
if (!key_data->len)
return true;
- iwl_mvm_d3_set_igtk_bigtk_ipn(key_data, &seq, conf->cipher);
-
- switch (cipher) {
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- conf->keylen = WLAN_KEY_LEN_BIP_GMAC_128;
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- conf->keylen = WLAN_KEY_LEN_BIP_GMAC_256;
- break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- conf->keylen = WLAN_KEY_LEN_AES_CMAC;
- break;
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- conf->keylen = WLAN_KEY_LEN_BIP_CMAC_256;
- break;
- default:
- WARN_ON(1);
- }
- BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < sizeof(key_data->key));
- memcpy(conf->key, key_data->key, conf->keylen);
-
- memcpy(key, key_data->key, sizeof(key_data->key));
-
- key_config = ieee80211_gtk_rekey_add(vif, keyidx, key, sizeof(key),
- link_id);
+ key_config = ieee80211_gtk_rekey_add(vif, keyidx, key_data->key,
+ sizeof(key_data->key), link_id);
if (IS_ERR(key_config)) {
/* FW may send also the old keys */
return PTR_ERR(key_config) == -EALREADY;
}
+
+ iwl_mvm_d3_set_igtk_bigtk_ipn(key_data, &seq, key_config->cipher);
ieee80211_set_key_rx_seq(key_config, 0, &seq);
if (keyidx == 4 || keyidx == 5) {
@@ -2115,27 +1985,6 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
if (!status || !vif->bss_conf.bssid)
return false;
-
- if (iwl_mvm_lookup_wowlan_status_ver(mvm) > 6 ||
- iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
- WOWLAN_INFO_NOTIFICATION,
- 0))
- gtkdata.igtk_support = true;
-
- if (iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
- WOWLAN_INFO_NOTIFICATION,
- 0) >= 3)
- gtkdata.bigtk_support = true;
-
- /* find last GTK that we used initially, if any */
- ieee80211_iter_keys(mvm->hw, vif,
- iwl_mvm_d3_find_last_keys, &gtkdata);
- /* not trying to keep connections with MFP/unhandled ciphers */
- if (gtkdata.unhandled_cipher)
- return false;
- if (!gtkdata.num_keys)
- goto out;
-
/*
* invalidate all other GTKs that might still exist and update
* the one that we used
@@ -2149,17 +1998,15 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
IWL_DEBUG_WOWLAN(mvm, "num of GTK rekeying %d\n",
status->num_of_gtk_rekeys);
- if (!iwl_mvm_gtk_rekey(status, vif, mvm, gtkdata.gtk_cipher))
+ if (!iwl_mvm_gtk_rekey(status, vif, mvm))
return false;
if (!iwl_mvm_d3_igtk_bigtk_rekey_add(status, vif,
- gtkdata.igtk_cipher,
&status->igtk))
return false;
for (i = 0; i < ARRAY_SIZE(status->bigtk); i++) {
if (!iwl_mvm_d3_igtk_bigtk_rekey_add(status, vif,
- gtkdata.bigtk_cipher,
&status->bigtk[i]))
return false;
}
@@ -2168,7 +2015,6 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
(void *)&replay_ctr, GFP_KERNEL);
}
-out:
if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
WOWLAN_GET_STATUSES,
IWL_FW_CMD_VER_UNKNOWN) < 10) {
@@ -2236,7 +2082,7 @@ static void iwl_mvm_convert_gtk_v3(struct iwl_wowlan_status_data *status,
}
static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
- struct iwl_wowlan_igtk_status *data)
+ struct iwl_wowlan_igtk_status_v1 *data)
{
int i;
@@ -2260,7 +2106,7 @@ static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
}
static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status,
- const struct iwl_wowlan_igtk_status *data)
+ const struct iwl_wowlan_igtk_status_v1 *data)
{
int data_idx, status_idx = 0;
@@ -2291,7 +2137,7 @@ static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status,
}
static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
- struct iwl_wowlan_info_notif *data,
+ struct iwl_wowlan_info_notif_v5 *data,
struct iwl_wowlan_status_data *status,
u32 len)
{
@@ -2727,7 +2573,6 @@ enum iwl_d3_notif {
/* manage d3 resume data */
struct iwl_d3_data {
struct iwl_wowlan_status_data *status;
- bool test;
u32 d3_end_flags;
u32 notif_expected; /* bitmap - see &enum iwl_d3_notif */
u32 notif_received; /* bitmap - see &enum iwl_d3_notif */
@@ -2919,18 +2764,11 @@ iwl_mvm_choose_query_wakeup_reasons(struct iwl_mvm *mvm,
if (mvm->net_detect) {
iwl_mvm_query_netdetect_reasons(mvm, vif, d3_data);
- } else {
- bool keep = iwl_mvm_query_wakeup_reasons(mvm, vif,
- d3_data->status);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (keep)
- mvm->keep_vif = vif;
-#endif
-
- return keep;
+ return false;
}
- return false;
+
+ return iwl_mvm_query_wakeup_reasons(mvm, vif,
+ d3_data->status);
}
#define IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT (IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET | \
@@ -3069,7 +2907,7 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
iwl_mvm_parse_wowlan_info_notif_v3(mvm, notif,
d3_data->status, len);
} else if (wowlan_info_ver == 5) {
- struct iwl_wowlan_info_notif *notif =
+ struct iwl_wowlan_info_notif_v5 *notif =
(void *)pkt->data;
iwl_mvm_parse_wowlan_info_notif(mvm, notif,
@@ -3143,10 +2981,9 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
return d3_data->notif_received == d3_data->notif_expected;
}
-static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test)
+static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm)
{
int ret;
- enum iwl_d3_status d3_status;
struct iwl_host_cmd cmd = {
.id = D0I3_END_CMD,
.flags = CMD_WANT_SKB,
@@ -3154,15 +2991,10 @@ static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test)
bool reset = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
- ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !reset);
+ ret = iwl_trans_d3_resume(mvm->trans, !reset);
if (ret)
return ret;
- if (d3_status != IWL_D3_STATUS_ALIVE) {
- IWL_INFO(mvm, "Device was reset during suspend\n");
- return -ENOENT;
- }
-
/*
* We should trigger resume flow using command only for 22000 family
* AX210 and above don't need the command since they have
@@ -3207,7 +3039,7 @@ static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm,
ARRAY_SIZE(d3_resume_notif),
iwl_mvm_wait_d3_notif, d3_data);
- ret = iwl_mvm_resume_firmware(mvm, d3_data->test);
+ ret = iwl_mvm_resume_firmware(mvm);
if (ret) {
iwl_remove_notification(&mvm->notif_wait, &wait_d3_notif);
return ret;
@@ -3227,13 +3059,12 @@ static inline bool iwl_mvm_d3_resume_notif_based(struct iwl_mvm *mvm)
D3_END_NOTIFICATION, 0);
}
-static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
+static int __iwl_mvm_resume(struct iwl_mvm *mvm)
{
struct ieee80211_vif *vif = NULL;
int ret = 1;
struct iwl_mvm_nd_results results = {};
struct iwl_d3_data d3_data = {
- .test = test,
.notif_expected =
IWL_D3_NOTIF_WOWLAN_INFO |
IWL_D3_NOTIF_D3_END_NOTIF,
@@ -3271,7 +3102,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
rt_status = iwl_mvm_check_rt_status(mvm, vif);
if (rt_status != FW_ALIVE) {
- set_bit(STATUS_FW_ERROR, &mvm->trans->status);
+ iwl_trans_notify_fw_error(mvm->trans);
if (rt_status == FW_ERROR) {
IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n");
iwl_mvm_dump_nic_error_log(mvm);
@@ -3298,13 +3129,11 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
if (ret)
goto err;
} else {
- ret = iwl_mvm_resume_firmware(mvm, test);
+ ret = iwl_mvm_resume_firmware(mvm);
if (ret < 0)
goto err;
}
- iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN);
-
/* when reset is required we can't send these following commands */
if (d3_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE)
goto query_wakeup_reasons;
@@ -3346,7 +3175,7 @@ out:
kfree(d3_data.status);
iwl_mvm_free_nd(mvm);
- if (!d3_data.test && !mvm->net_detect)
+ if (!mvm->net_detect)
ieee80211_iterate_active_interfaces_mtx(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_d3_disconnect_iter,
@@ -3385,7 +3214,7 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
- ret = __iwl_mvm_resume(mvm, false);
+ ret = __iwl_mvm_resume(mvm);
iwl_mvm_resume_tcm(mvm);
@@ -3420,7 +3249,7 @@ void iwl_mvm_fast_suspend(struct iwl_mvm *mvm)
IWL_ERR(mvm,
"fast suspend: couldn't send D3_CONFIG_CMD %d\n", ret);
- ret = iwl_trans_d3_suspend(mvm->trans, false, false);
+ ret = iwl_trans_d3_suspend(mvm->trans, false);
if (ret)
IWL_ERR(mvm, "fast suspend: trans_d3_suspend failed %d\n", ret);
}
@@ -3443,7 +3272,7 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
rt_status = iwl_mvm_check_rt_status(mvm, NULL);
if (rt_status != FW_ALIVE) {
- set_bit(STATUS_FW_ERROR, &mvm->trans->status);
+ iwl_trans_notify_fw_error(mvm->trans);
if (rt_status == FW_ERROR) {
IWL_ERR(mvm,
"iwl_mvm_check_rt_status failed, device is gone during suspend\n");
@@ -3455,7 +3284,6 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
&iwl_dump_desc_assert,
false, 0);
}
- mvm->trans->state = IWL_TRANS_NO_FW;
ret = -ENODEV;
goto out;
@@ -3473,125 +3301,3 @@ out:
return ret;
}
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
-{
- struct iwl_mvm *mvm = inode->i_private;
- int err;
-
- if (mvm->d3_test_active)
- return -EBUSY;
-
- file->private_data = inode->i_private;
-
- iwl_mvm_pause_tcm(mvm, true);
-
- iwl_fw_runtime_suspend(&mvm->fwrt);
-
- /* start pseudo D3 */
- rtnl_lock();
- wiphy_lock(mvm->hw->wiphy);
- err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
- wiphy_unlock(mvm->hw->wiphy);
- rtnl_unlock();
- if (err > 0)
- err = -EINVAL;
- if (err)
- return err;
-
- mvm->d3_test_active = true;
- mvm->keep_vif = NULL;
- return 0;
-}
-
-static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- unsigned long end = jiffies + 60 * HZ;
- u32 pme_asserted;
-
- while (true) {
- /* read pme_ptr if available */
- if (mvm->d3_test_pme_ptr) {
- pme_asserted = iwl_trans_read_mem32(mvm->trans,
- mvm->d3_test_pme_ptr);
- if (pme_asserted)
- break;
- }
-
- if (msleep_interruptible(100))
- break;
-
- if (time_is_before_jiffies(end)) {
- IWL_ERR(mvm,
- "ending pseudo-D3 with timeout after ~60 seconds\n");
- return -ETIMEDOUT;
- }
- }
-
- return 0;
-}
-
-static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- /* skip the one we keep connection on */
- if (_data == vif)
- return;
-
- if (vif->type == NL80211_IFTYPE_STATION)
- ieee80211_connection_loss(vif);
-}
-
-static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
-{
- struct iwl_mvm *mvm = inode->i_private;
- bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
-
- mvm->d3_test_active = false;
-
- iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
-
- rtnl_lock();
- wiphy_lock(mvm->hw->wiphy);
- __iwl_mvm_resume(mvm, true);
- wiphy_unlock(mvm->hw->wiphy);
- rtnl_unlock();
-
- iwl_mvm_resume_tcm(mvm);
-
- iwl_fw_runtime_resume(&mvm->fwrt);
-
- iwl_abort_notification_waits(&mvm->notif_wait);
- if (!unified_image) {
- int remaining_time = 10;
-
- ieee80211_restart_hw(mvm->hw);
-
- /* wait for restart and disconnect all interfaces */
- while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
- remaining_time > 0) {
- remaining_time--;
- msleep(1000);
- }
-
- if (remaining_time == 0)
- IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
- }
-
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
-
- return 0;
-}
-
-const struct file_operations iwl_dbgfs_d3_test_ops = {
- .open = iwl_mvm_d3_test_open,
- .read = iwl_mvm_d3_test_read,
- .release = iwl_mvm_d3_test_release,
-};
-#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index fbe4e4a50852..a56c352a459a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -762,96 +762,6 @@ static ssize_t iwl_dbgfs_max_tx_op_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
-static ssize_t iwl_dbgfs_int_mlo_scan_write(struct ieee80211_vif *vif,
- char *buf, size_t count,
- loff_t *ppos)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- u32 action;
- int ret;
-
- if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
- return -EINVAL;
-
- if (kstrtou32(buf, 0, &action))
- return -EINVAL;
-
- mutex_lock(&mvm->mutex);
-
- if (!action) {
- ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false);
- } else if (action == 1) {
- ret = iwl_mvm_int_mlo_scan(mvm, vif);
- } else {
- ret = -EINVAL;
- }
-
- mutex_unlock(&mvm->mutex);
-
- return ret ?: count;
-}
-
-static ssize_t iwl_dbgfs_esr_disable_reason_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- unsigned long esr_mask;
- char *buf;
- int bufsz, pos, i;
- ssize_t rv;
-
- mutex_lock(&mvm->mutex);
- esr_mask = mvmvif->esr_disable_reason;
- mutex_unlock(&mvm->mutex);
-
- bufsz = hweight32(esr_mask) * 32 + 40;
- buf = kmalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- pos = scnprintf(buf, bufsz, "EMLSR state: '0x%lx'\nreasons:\n",
- esr_mask);
- for_each_set_bit(i, &esr_mask, BITS_PER_LONG)
- pos += scnprintf(buf + pos, bufsz - pos, " - %s\n",
- iwl_get_esr_state_string(BIT(i)));
-
- rv = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return rv;
-}
-
-static ssize_t iwl_dbgfs_esr_disable_reason_write(struct ieee80211_vif *vif,
- char *buf, size_t count,
- loff_t *ppos)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- u32 reason;
- u8 block;
- int ret;
-
- ret = sscanf(buf, "%u %hhu", &reason, &block);
- if (ret < 0)
- return ret;
-
- if (hweight16(reason) != 1 || !(reason & IWL_MVM_BLOCK_ESR_REASONS))
- return -EINVAL;
-
- mutex_lock(&mvm->mutex);
- if (block)
- iwl_mvm_block_esr(mvm, vif, reason,
- iwl_mvm_get_primary_link(vif));
- else
- iwl_mvm_unblock_esr(mvm, vif, reason);
- mutex_unlock(&mvm->mutex);
-
- return count;
-}
-
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -884,8 +794,6 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(max_tx_op, 10);
-MVM_DEBUGFS_WRITE_FILE_OPS(int_mlo_scan, 32);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(esr_disable_reason, 32);
void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
@@ -916,8 +824,6 @@ void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
MVM_DEBUGFS_ADD_FILE_VIF(max_tx_op, mvmvif->dbgfs_dir, 0600);
debugfs_create_bool("ftm_unprotected", 0200, mvmvif->dbgfs_dir,
&mvmvif->ftm_unprotected);
- MVM_DEBUGFS_ADD_FILE_VIF(int_mlo_scan, mvmvif->dbgfs_dir, 0200);
- MVM_DEBUGFS_ADD_FILE_VIF(esr_disable_reason, mvmvif->dbgfs_dir, 0600);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index f0e184c8a81a..683c0ba5fb39 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1134,7 +1134,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
if (count == 6 && !strcmp(buf, "nolog\n")) {
set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status);
- iwl_trans_suppress_cmd_error_once(mvm->trans);
+ mvm->trans->suppress_cmd_error_once = true;
}
/* take the return value to make compiler happy - it will fail anyway */
@@ -2159,7 +2159,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR);
#ifdef CONFIG_PM_SLEEP
- MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400);
debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir,
&mvm->d3_wake_sysassert);
debugfs_create_u32("last_netdetect_scans", 0400, mvm->debugfs_dir,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index d931c6eaf12f..865f973f677d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -837,7 +837,7 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
.flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
};
- if (!mvm->trans->ltr_enabled)
+ if (!iwl_trans_is_ltr_enabled(mvm->trans))
return 0;
return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
index 2269acc55c0e..738facceb240 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
@@ -5,50 +5,6 @@
#include "mvm.h"
#include "time-event.h"
-#define HANDLE_ESR_REASONS(HOW) \
- HOW(BLOCKED_PREVENTION) \
- HOW(BLOCKED_WOWLAN) \
- HOW(BLOCKED_TPT) \
- HOW(BLOCKED_FW) \
- HOW(BLOCKED_NON_BSS) \
- HOW(BLOCKED_ROC) \
- HOW(BLOCKED_TMP_NON_BSS) \
- HOW(EXIT_MISSED_BEACON) \
- HOW(EXIT_LOW_RSSI) \
- HOW(EXIT_COEX) \
- HOW(EXIT_BANDWIDTH) \
- HOW(EXIT_CSA) \
- HOW(EXIT_LINK_USAGE)
-
-static const char *const iwl_mvm_esr_states_names[] = {
-#define NAME_ENTRY(x) [ilog2(IWL_MVM_ESR_##x)] = #x,
- HANDLE_ESR_REASONS(NAME_ENTRY)
-};
-
-const char *iwl_get_esr_state_string(enum iwl_mvm_esr_state state)
-{
- int offs = ilog2(state);
-
- if (offs >= ARRAY_SIZE(iwl_mvm_esr_states_names) ||
- !iwl_mvm_esr_states_names[offs])
- return "UNKNOWN";
-
- return iwl_mvm_esr_states_names[offs];
-}
-
-static void iwl_mvm_print_esr_state(struct iwl_mvm *mvm, u32 mask)
-{
-#define NAME_FMT(x) "%s"
-#define NAME_PR(x) (mask & IWL_MVM_ESR_##x) ? "[" #x "]" : "",
- IWL_DEBUG_INFO(mvm,
- "EMLSR state = " HANDLE_ESR_REASONS(NAME_FMT)
- " (0x%x)\n",
- HANDLE_ESR_REASONS(NAME_PR)
- mask);
-#undef NAME_FMT
-#undef NAME_PR
-}
-
static int iwl_mvm_link_cmd_send(struct iwl_mvm *mvm,
struct iwl_link_config_cmd *cmd,
enum iwl_ctxt_action action)
@@ -114,65 +70,6 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_ADD);
}
-struct iwl_mvm_esr_iter_data {
- struct ieee80211_vif *vif;
- unsigned int link_id;
- bool lift_block;
-};
-
-static void iwl_mvm_esr_vif_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_esr_iter_data *data = _data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int link_id;
-
- if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
- return;
-
- for_each_mvm_vif_valid_link(mvmvif, link_id) {
- struct iwl_mvm_vif_link_info *link_info =
- mvmvif->link[link_id];
- if (vif == data->vif && link_id == data->link_id)
- continue;
- if (link_info->active)
- data->lift_block = false;
- }
-}
-
-int iwl_mvm_esr_non_bss_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- unsigned int link_id, bool active)
-{
- /* An active link of a non-station vif blocks EMLSR. Upon activation
- * block EMLSR on the bss vif. Upon deactivation, check if this link
- * was the last non-station link active, and if so unblock the bss vif
- */
- struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
- struct iwl_mvm_esr_iter_data data = {
- .vif = vif,
- .link_id = link_id,
- .lift_block = true,
- };
-
- if (IS_ERR_OR_NULL(bss_vif))
- return 0;
-
- if (active)
- return iwl_mvm_block_esr_sync(mvm, bss_vif,
- IWL_MVM_ESR_BLOCKED_NON_BSS);
-
- ieee80211_iterate_active_interfaces(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_esr_vif_iterator, &data);
- if (data.lift_block) {
- mutex_lock(&mvm->mutex);
- iwl_mvm_unblock_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_NON_BSS);
- mutex_unlock(&mvm->mutex);
- }
-
- return 0;
-}
-
int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
u32 changes, bool active)
@@ -388,452 +285,6 @@ int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return ret;
}
-struct iwl_mvm_rssi_to_grade {
- s8 rssi[2];
- u16 grade;
-};
-
-#define RSSI_TO_GRADE_LINE(_lb, _hb_uhb, _grade) \
- { \
- .rssi = {_lb, _hb_uhb}, \
- .grade = _grade \
- }
-
-/*
- * This array must be sorted by increasing RSSI for proper functionality.
- * The grades are actually estimated throughput, represented as fixed-point
- * with a scale factor of 1/10.
- */
-static const struct iwl_mvm_rssi_to_grade rssi_to_grade_map[] = {
- RSSI_TO_GRADE_LINE(-85, -89, 177),
- RSSI_TO_GRADE_LINE(-83, -86, 344),
- RSSI_TO_GRADE_LINE(-82, -85, 516),
- RSSI_TO_GRADE_LINE(-80, -83, 688),
- RSSI_TO_GRADE_LINE(-77, -79, 1032),
- RSSI_TO_GRADE_LINE(-73, -76, 1376),
- RSSI_TO_GRADE_LINE(-70, -74, 1548),
- RSSI_TO_GRADE_LINE(-69, -72, 1750),
- RSSI_TO_GRADE_LINE(-65, -68, 2064),
- RSSI_TO_GRADE_LINE(-61, -66, 2294),
- RSSI_TO_GRADE_LINE(-58, -61, 2580),
- RSSI_TO_GRADE_LINE(-55, -58, 2868),
- RSSI_TO_GRADE_LINE(-46, -55, 3098),
- RSSI_TO_GRADE_LINE(-43, -54, 3442)
-};
-
-#define MAX_GRADE (rssi_to_grade_map[ARRAY_SIZE(rssi_to_grade_map) - 1].grade)
-
-#define DEFAULT_CHAN_LOAD_LB 30
-#define DEFAULT_CHAN_LOAD_HB 15
-#define DEFAULT_CHAN_LOAD_UHB 0
-
-/* Factors calculation is done with fixed-point with a scaling factor of 1/256 */
-#define SCALE_FACTOR 256
-
-/* Convert a percentage from [0,100] to [0,255] */
-#define NORMALIZE_PERCENT_TO_255(percentage) ((percentage) * SCALE_FACTOR / 100)
-
-static unsigned int
-iwl_mvm_get_puncturing_factor(const struct ieee80211_bss_conf *link_conf)
-{
- enum nl80211_chan_width chan_width =
- link_conf->chanreq.oper.width;
- int mhz = nl80211_chan_width_to_mhz(chan_width);
- unsigned int n_subchannels, n_punctured, puncturing_penalty;
-
- if (WARN_ONCE(mhz < 20 || mhz > 320,
- "Invalid channel width : (%d)\n", mhz))
- return SCALE_FACTOR;
-
- /* No puncturing, no penalty */
- if (mhz < 80)
- return SCALE_FACTOR;
-
- /* total number of subchannels */
- n_subchannels = mhz / 20;
- /* how many of these are punctured */
- n_punctured = hweight16(link_conf->chanreq.oper.punctured);
-
- puncturing_penalty = n_punctured * SCALE_FACTOR / n_subchannels;
- return SCALE_FACTOR - puncturing_penalty;
-}
-
-static unsigned int
-iwl_mvm_get_chan_load(struct ieee80211_bss_conf *link_conf)
-{
- struct ieee80211_vif *vif = link_conf->vif;
- struct iwl_mvm_vif_link_info *mvm_link =
- iwl_mvm_vif_from_mac80211(link_conf->vif)->link[link_conf->link_id];
- const struct element *bss_load_elem;
- const struct ieee80211_bss_load_elem *bss_load;
- enum nl80211_band band = link_conf->chanreq.oper.chan->band;
- const struct cfg80211_bss_ies *ies;
- unsigned int chan_load;
- u32 chan_load_by_us;
-
- rcu_read_lock();
- if (ieee80211_vif_link_active(vif, link_conf->link_id))
- ies = rcu_dereference(link_conf->bss->beacon_ies);
- else
- ies = rcu_dereference(link_conf->bss->ies);
-
- if (ies)
- bss_load_elem = cfg80211_find_elem(WLAN_EID_QBSS_LOAD,
- ies->data, ies->len);
- else
- bss_load_elem = NULL;
-
- /* If there isn't BSS Load element, take the defaults */
- if (!bss_load_elem ||
- bss_load_elem->datalen != sizeof(*bss_load)) {
- rcu_read_unlock();
- switch (band) {
- case NL80211_BAND_2GHZ:
- chan_load = DEFAULT_CHAN_LOAD_LB;
- break;
- case NL80211_BAND_5GHZ:
- chan_load = DEFAULT_CHAN_LOAD_HB;
- break;
- case NL80211_BAND_6GHZ:
- chan_load = DEFAULT_CHAN_LOAD_UHB;
- break;
- default:
- chan_load = 0;
- break;
- }
- /* The defaults are given in percentage */
- return NORMALIZE_PERCENT_TO_255(chan_load);
- }
-
- bss_load = (const void *)bss_load_elem->data;
- /* Channel util is in range 0-255 */
- chan_load = bss_load->channel_util;
- rcu_read_unlock();
-
- if (!mvm_link || !mvm_link->active)
- return chan_load;
-
- if (WARN_ONCE(!mvm_link->phy_ctxt,
- "Active link (%u) without phy ctxt assigned!\n",
- link_conf->link_id))
- return chan_load;
-
- /* channel load by us is given in percentage */
- chan_load_by_us =
- NORMALIZE_PERCENT_TO_255(mvm_link->phy_ctxt->channel_load_by_us);
-
- /* Use only values that firmware sends that can possibly be valid */
- if (chan_load_by_us <= chan_load)
- chan_load -= chan_load_by_us;
-
- return chan_load;
-}
-
-static unsigned int
-iwl_mvm_get_chan_load_factor(struct ieee80211_bss_conf *link_conf)
-{
- return SCALE_FACTOR - iwl_mvm_get_chan_load(link_conf);
-}
-
-/* This function calculates the grade of a link. Returns 0 in error case */
-VISIBLE_IF_IWLWIFI_KUNIT
-unsigned int iwl_mvm_get_link_grade(struct ieee80211_bss_conf *link_conf)
-{
- enum nl80211_band band;
- int i, rssi_idx;
- s32 link_rssi;
- unsigned int grade = MAX_GRADE;
-
- if (WARN_ON_ONCE(!link_conf))
- return 0;
-
- band = link_conf->chanreq.oper.chan->band;
- if (WARN_ONCE(band != NL80211_BAND_2GHZ &&
- band != NL80211_BAND_5GHZ &&
- band != NL80211_BAND_6GHZ,
- "Invalid band (%u)\n", band))
- return 0;
-
- link_rssi = MBM_TO_DBM(link_conf->bss->signal);
- /*
- * For 6 GHz the RSSI of the beacons is lower than
- * the RSSI of the data.
- */
- if (band == NL80211_BAND_6GHZ)
- link_rssi += 4;
-
- rssi_idx = band == NL80211_BAND_2GHZ ? 0 : 1;
-
- /* No valid RSSI - take the lowest grade */
- if (!link_rssi)
- link_rssi = rssi_to_grade_map[0].rssi[rssi_idx];
-
- /* Get grade based on RSSI */
- for (i = 0; i < ARRAY_SIZE(rssi_to_grade_map); i++) {
- const struct iwl_mvm_rssi_to_grade *line =
- &rssi_to_grade_map[i];
-
- if (link_rssi > line->rssi[rssi_idx])
- continue;
- grade = line->grade;
- break;
- }
-
- /* apply the channel load and puncturing factors */
- grade = grade * iwl_mvm_get_chan_load_factor(link_conf) / SCALE_FACTOR;
- grade = grade * iwl_mvm_get_puncturing_factor(link_conf) / SCALE_FACTOR;
- return grade;
-}
-EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_get_link_grade);
-
-static
-u8 iwl_mvm_set_link_selection_data(struct ieee80211_vif *vif,
- struct iwl_mvm_link_sel_data *data,
- unsigned long usable_links,
- u8 *best_link_idx)
-{
- u8 n_data = 0;
- u16 max_grade = 0;
- unsigned long link_id;
-
- /* TODO: don't select links that weren't discovered in the last scan */
- for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- struct ieee80211_bss_conf *link_conf =
- link_conf_dereference_protected(vif, link_id);
-
- if (WARN_ON_ONCE(!link_conf))
- continue;
-
- data[n_data].link_id = link_id;
- data[n_data].chandef = &link_conf->chanreq.oper;
- data[n_data].signal = link_conf->bss->signal / 100;
- data[n_data].grade = iwl_mvm_get_link_grade(link_conf);
-
- if (data[n_data].grade > max_grade) {
- max_grade = data[n_data].grade;
- *best_link_idx = n_data;
- }
- n_data++;
- }
-
- return n_data;
-}
-
-struct iwl_mvm_bw_to_rssi_threshs {
- s8 low;
- s8 high;
-};
-
-#define BW_TO_RSSI_THRESHOLDS(_bw) \
- [IWL_PHY_CHANNEL_MODE ## _bw] = { \
- .low = IWL_MVM_LOW_RSSI_THRESH_##_bw##MHZ, \
- .high = IWL_MVM_HIGH_RSSI_THRESH_##_bw##MHZ \
- }
-
-s8 iwl_mvm_get_esr_rssi_thresh(struct iwl_mvm *mvm,
- const struct cfg80211_chan_def *chandef,
- bool low)
-{
- const struct iwl_mvm_bw_to_rssi_threshs bw_to_rssi_threshs_map[] = {
- BW_TO_RSSI_THRESHOLDS(20),
- BW_TO_RSSI_THRESHOLDS(40),
- BW_TO_RSSI_THRESHOLDS(80),
- BW_TO_RSSI_THRESHOLDS(160)
- /* 320 MHz has the same thresholds as 20 MHz */
- };
- const struct iwl_mvm_bw_to_rssi_threshs *threshs;
- u8 chan_width = iwl_mvm_get_channel_width(chandef);
-
- if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ &&
- chandef->chan->band != NL80211_BAND_5GHZ &&
- chandef->chan->band != NL80211_BAND_6GHZ))
- return S8_MAX;
-
- /* 6 GHz will always use 20 MHz thresholds, regardless of the BW */
- if (chan_width == IWL_PHY_CHANNEL_MODE320)
- chan_width = IWL_PHY_CHANNEL_MODE20;
-
- threshs = &bw_to_rssi_threshs_map[chan_width];
-
- return low ? threshs->low : threshs->high;
-}
-
-static u32
-iwl_mvm_esr_disallowed_with_link(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- const struct iwl_mvm_link_sel_data *link,
- bool primary)
-{
- struct wiphy *wiphy = mvm->hw->wiphy;
- struct ieee80211_bss_conf *conf;
- enum iwl_mvm_esr_state ret = 0;
- s8 thresh;
-
- conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]);
- if (WARN_ON_ONCE(!conf))
- return false;
-
- /* BT Coex effects eSR mode only if one of the links is on LB */
- if (link->chandef->chan->band == NL80211_BAND_2GHZ &&
- (!iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link->signal,
- primary)))
- ret |= IWL_MVM_ESR_EXIT_COEX;
-
- thresh = iwl_mvm_get_esr_rssi_thresh(mvm, link->chandef,
- false);
-
- if (link->signal < thresh)
- ret |= IWL_MVM_ESR_EXIT_LOW_RSSI;
-
- if (conf->csa_active)
- ret |= IWL_MVM_ESR_EXIT_CSA;
-
- if (ret) {
- IWL_DEBUG_INFO(mvm,
- "Link %d is not allowed for esr\n",
- link->link_id);
- iwl_mvm_print_esr_state(mvm, ret);
- }
- return ret;
-}
-
-VISIBLE_IF_IWLWIFI_KUNIT
-bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
- const struct iwl_mvm_link_sel_data *a,
- const struct iwl_mvm_link_sel_data *b)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- enum iwl_mvm_esr_state ret = 0;
-
- /* Per-link considerations */
- if (iwl_mvm_esr_disallowed_with_link(mvm, vif, a, true) ||
- iwl_mvm_esr_disallowed_with_link(mvm, vif, b, false))
- return false;
-
- if (a->chandef->chan->band == b->chandef->chan->band ||
- a->chandef->width != b->chandef->width)
- ret |= IWL_MVM_ESR_EXIT_BANDWIDTH;
-
- if (ret) {
- IWL_DEBUG_INFO(mvm,
- "Links %d and %d are not a valid pair for EMLSR\n",
- a->link_id, b->link_id);
- iwl_mvm_print_esr_state(mvm, ret);
- return false;
- }
-
- return true;
-
-}
-EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_mld_valid_link_pair);
-
-/*
- * Returns the combined eSR grade of two given links.
- * Returns 0 if eSR is not allowed with these 2 links.
- */
-static
-unsigned int iwl_mvm_get_esr_grade(struct ieee80211_vif *vif,
- const struct iwl_mvm_link_sel_data *a,
- const struct iwl_mvm_link_sel_data *b,
- u8 *primary_id)
-{
- struct ieee80211_bss_conf *primary_conf;
- struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy;
- unsigned int primary_load;
-
- lockdep_assert_wiphy(wiphy);
-
- /* a is always primary, b is always secondary */
- if (b->grade > a->grade)
- swap(a, b);
-
- *primary_id = a->link_id;
-
- if (!iwl_mvm_mld_valid_link_pair(vif, a, b))
- return 0;
-
- primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]);
-
- if (WARN_ON_ONCE(!primary_conf))
- return 0;
-
- primary_load = iwl_mvm_get_chan_load(primary_conf);
-
- return a->grade +
- ((b->grade * primary_load) / SCALE_FACTOR);
-}
-
-void iwl_mvm_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
-{
- struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
- struct iwl_mvm_link_sel_data *best_link;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- u32 max_active_links = iwl_mvm_max_active_links(mvm, vif);
- u16 usable_links = ieee80211_vif_usable_links(vif);
- u8 best, primary_link, best_in_pair, n_data;
- u16 max_esr_grade = 0, new_active_links;
-
- lockdep_assert_wiphy(mvm->hw->wiphy);
-
- if (!mvmvif->authorized || !ieee80211_vif_is_mld(vif))
- return;
-
- if (!IWL_MVM_AUTO_EML_ENABLE)
- return;
-
- /* The logic below is a simple version that doesn't suit more than 2
- * links
- */
- WARN_ON_ONCE(max_active_links > 2);
-
- n_data = iwl_mvm_set_link_selection_data(vif, data, usable_links,
- &best);
-
- if (WARN(!n_data, "Couldn't find a valid grade for any link!\n"))
- return;
-
- best_link = &data[best];
- primary_link = best_link->link_id;
- new_active_links = BIT(best_link->link_id);
-
- /* eSR is not supported/blocked, or only one usable link */
- if (max_active_links == 1 || !iwl_mvm_vif_has_esr_cap(mvm, vif) ||
- mvmvif->esr_disable_reason || n_data == 1)
- goto set_active;
-
- for (u8 a = 0; a < n_data; a++)
- for (u8 b = a + 1; b < n_data; b++) {
- u16 esr_grade = iwl_mvm_get_esr_grade(vif, &data[a],
- &data[b],
- &best_in_pair);
-
- if (esr_grade <= max_esr_grade)
- continue;
-
- max_esr_grade = esr_grade;
- primary_link = best_in_pair;
- new_active_links = BIT(data[a].link_id) |
- BIT(data[b].link_id);
- }
-
- /* No valid pair was found, go with the best link */
- if (hweight16(new_active_links) <= 1)
- goto set_active;
-
- /* For equal grade - prefer EMLSR */
- if (best_link->grade > max_esr_grade) {
- primary_link = best_link->link_id;
- new_active_links = BIT(best_link->link_id);
- }
-set_active:
- IWL_DEBUG_INFO(mvm, "Link selection result: 0x%x. Primary = %d\n",
- new_active_links, primary_link);
- ieee80211_set_active_links_async(vif, new_active_links);
- mvmvif->link_selection_res = new_active_links;
- mvmvif->link_selection_primary = primary_link;
-}
-
u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -856,266 +307,6 @@ u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif)
return __ffs(vif->active_links);
}
-/*
- * For non-MLO/single link, this will return the deflink/single active link,
- * respectively
- */
-u8 iwl_mvm_get_other_link(struct ieee80211_vif *vif, u8 link_id)
-{
- switch (hweight16(vif->active_links)) {
- case 0:
- return 0;
- default:
- WARN_ON(1);
- fallthrough;
- case 1:
- return __ffs(vif->active_links);
- case 2:
- return __ffs(vif->active_links & ~BIT(link_id));
- }
-}
-
-/* Reasons that can cause esr prevention */
-#define IWL_MVM_ESR_PREVENT_REASONS IWL_MVM_ESR_EXIT_MISSED_BEACON
-#define IWL_MVM_PREVENT_ESR_TIMEOUT (HZ * 400)
-#define IWL_MVM_ESR_PREVENT_SHORT (HZ * 300)
-#define IWL_MVM_ESR_PREVENT_LONG (HZ * 600)
-
-static bool iwl_mvm_check_esr_prevention(struct iwl_mvm *mvm,
- struct iwl_mvm_vif *mvmvif,
- enum iwl_mvm_esr_state reason)
-{
- bool timeout_expired = time_after(jiffies,
- mvmvif->last_esr_exit.ts +
- IWL_MVM_PREVENT_ESR_TIMEOUT);
- unsigned long delay;
-
- lockdep_assert_held(&mvm->mutex);
-
- /* Only handle reasons that can cause prevention */
- if (!(reason & IWL_MVM_ESR_PREVENT_REASONS))
- return false;
-
- /*
- * Reset the counter if more than 400 seconds have passed between one
- * exit and the other, or if we exited due to a different reason.
- * Will also reset the counter after the long prevention is done.
- */
- if (timeout_expired || mvmvif->last_esr_exit.reason != reason) {
- mvmvif->exit_same_reason_count = 1;
- return false;
- }
-
- mvmvif->exit_same_reason_count++;
- if (WARN_ON(mvmvif->exit_same_reason_count < 2 ||
- mvmvif->exit_same_reason_count > 3))
- return false;
-
- mvmvif->esr_disable_reason |= IWL_MVM_ESR_BLOCKED_PREVENTION;
-
- /*
- * For the second exit, use a short prevention, and for the third one,
- * use a long prevention.
- */
- delay = mvmvif->exit_same_reason_count == 2 ?
- IWL_MVM_ESR_PREVENT_SHORT :
- IWL_MVM_ESR_PREVENT_LONG;
-
- IWL_DEBUG_INFO(mvm,
- "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n",
- delay / HZ, mvmvif->exit_same_reason_count,
- iwl_get_esr_state_string(reason), reason);
-
- wiphy_delayed_work_queue(mvm->hw->wiphy,
- &mvmvif->prevent_esr_done_wk, delay);
- return true;
-}
-
-#define IWL_MVM_TRIGGER_LINK_SEL_TIME (IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC * HZ)
-
-/* API to exit eSR mode */
-void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason,
- u8 link_to_keep)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- u16 new_active_links;
- bool prevented;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!IWL_MVM_AUTO_EML_ENABLE)
- return;
-
- /* Nothing to do */
- if (!mvmvif->esr_active)
- return;
-
- if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mvmvif->authorized))
- return;
-
- if (WARN_ON(!(vif->active_links & BIT(link_to_keep))))
- link_to_keep = __ffs(vif->active_links);
-
- new_active_links = BIT(link_to_keep);
- IWL_DEBUG_INFO(mvm,
- "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n",
- iwl_get_esr_state_string(reason), reason,
- vif->active_links, new_active_links);
-
- ieee80211_set_active_links_async(vif, new_active_links);
-
- /* Prevent EMLSR if needed */
- prevented = iwl_mvm_check_esr_prevention(mvm, mvmvif, reason);
-
- /* Remember why and when we exited EMLSR */
- mvmvif->last_esr_exit.ts = jiffies;
- mvmvif->last_esr_exit.reason = reason;
-
- /*
- * If EMLSR is prevented now - don't try to get back to EMLSR.
- * If we exited due to a blocking event, we will try to get back to
- * EMLSR when the corresponding unblocking event will happen.
- */
- if (prevented || reason & IWL_MVM_BLOCK_ESR_REASONS)
- return;
-
- /* If EMLSR is not blocked - try enabling it again in 30 seconds */
- wiphy_delayed_work_queue(mvm->hw->wiphy,
- &mvmvif->mlo_int_scan_wk,
- round_jiffies_relative(IWL_MVM_TRIGGER_LINK_SEL_TIME));
-}
-
-void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason,
- u8 link_to_keep)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!IWL_MVM_AUTO_EML_ENABLE)
- return;
-
- /* This should be called only with disable reasons */
- if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
- return;
-
- if (mvmvif->esr_disable_reason & reason)
- return;
-
- IWL_DEBUG_INFO(mvm,
- "Blocking EMLSR mode. reason = %s (0x%x)\n",
- iwl_get_esr_state_string(reason), reason);
-
- mvmvif->esr_disable_reason |= reason;
-
- iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason);
-
- iwl_mvm_exit_esr(mvm, vif, reason, link_to_keep);
-}
-
-int iwl_mvm_block_esr_sync(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason)
-{
- int primary_link = iwl_mvm_get_primary_link(vif);
- int ret;
-
- if (!IWL_MVM_AUTO_EML_ENABLE || !ieee80211_vif_is_mld(vif))
- return 0;
-
- /* This should be called only with blocking reasons */
- if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
- return 0;
-
- /* leave ESR immediately, not only async with iwl_mvm_block_esr() */
- ret = ieee80211_set_active_links(vif, BIT(primary_link));
- if (ret)
- return ret;
-
- mutex_lock(&mvm->mutex);
- /* only additionally block for consistency and to avoid concurrency */
- iwl_mvm_block_esr(mvm, vif, reason, primary_link);
- mutex_unlock(&mvm->mutex);
-
- return 0;
-}
-
-static void iwl_mvm_esr_unblocked(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- bool need_new_sel = time_after(jiffies, mvmvif->last_esr_exit.ts +
- IWL_MVM_TRIGGER_LINK_SEL_TIME);
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!ieee80211_vif_is_mld(vif) || !mvmvif->authorized ||
- mvmvif->esr_active)
- return;
-
- IWL_DEBUG_INFO(mvm, "EMLSR is unblocked\n");
-
- /* If we exited due to an EXIT reason, and the exit was in less than
- * 30 seconds, then a MLO scan was scheduled already.
- */
- if (!need_new_sel &&
- !(mvmvif->last_esr_exit.reason & IWL_MVM_BLOCK_ESR_REASONS)) {
- IWL_DEBUG_INFO(mvm, "Wait for MLO scan\n");
- return;
- }
-
- /*
- * If EMLSR was blocked for more than 30 seconds, or the last link
- * selection decided to not enter EMLSR, trigger a new scan.
- */
- if (need_new_sel || hweight16(mvmvif->link_selection_res) < 2) {
- IWL_DEBUG_INFO(mvm, "Trigger MLO scan\n");
- wiphy_delayed_work_queue(mvm->hw->wiphy,
- &mvmvif->mlo_int_scan_wk, 0);
- /*
- * If EMLSR was blocked for less than 30 seconds, and the last link
- * selection decided to use EMLSR, activate EMLSR using the previous
- * link selection result.
- */
- } else {
- IWL_DEBUG_INFO(mvm,
- "Use the latest link selection result: 0x%x\n",
- mvmvif->link_selection_res);
- ieee80211_set_active_links_async(vif,
- mvmvif->link_selection_res);
- }
-}
-
-void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!IWL_MVM_AUTO_EML_ENABLE)
- return;
-
- /* This should be called only with disable reasons */
- if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
- return;
-
- /* No Change */
- if (!(mvmvif->esr_disable_reason & reason))
- return;
-
- mvmvif->esr_disable_reason &= ~reason;
-
- IWL_DEBUG_INFO(mvm,
- "Unblocking EMLSR mode. reason = %s (0x%x)\n",
- iwl_get_esr_state_string(reason), reason);
- iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason);
-
- if (!mvmvif->esr_disable_reason)
- iwl_mvm_esr_unblocked(mvm, vif);
-}
-
void iwl_mvm_init_link(struct iwl_mvm_vif_link_info *link)
{
link->bcast_sta.sta_id = IWL_INVALID_STA;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 8805ab344895..9c9e0e1c6e1d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1586,13 +1586,11 @@ iwl_mvm_handle_missed_beacons_notif(struct iwl_mvm *mvm,
u32 id = le32_to_cpu(mb->link_id);
union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
u32 mac_type;
- int link_id;
u8 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
MISSED_BEACONS_NOTIFICATION,
0);
u8 new_notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
MISSED_BEACONS_NOTIF, 0);
- struct ieee80211_bss_conf *bss_conf;
/* If the firmware uses the new notification (from MAC_CONF_GROUP),
* refer to that notification's version.
@@ -1617,16 +1615,10 @@ iwl_mvm_handle_missed_beacons_notif(struct iwl_mvm *mvm,
if (!vif)
return;
- bss_conf = &vif->bss_conf;
- link_id = bss_conf->link_id;
mac_type = iwl_mvm_get_mac_type(vif);
IWL_DEBUG_INFO(mvm, "missed beacon mac_type=%u,\n", mac_type);
- mvm->trans->dbg.dump_file_name_ext_valid = true;
- snprintf(mvm->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
- "MacId_%d_MacType_%d", id, mac_type);
-
rx_missed_bcon = le32_to_cpu(mb->consec_missed_beacons);
rx_missed_bcon_since_rx =
le32_to_cpu(mb->consec_missed_beacons_since_last_rx);
@@ -1644,41 +1636,11 @@ iwl_mvm_handle_missed_beacons_notif(struct iwl_mvm *mvm,
"missed_beacons:%d, missed_beacons_since_rx:%d\n",
rx_missed_bcon, rx_missed_bcon_since_rx);
}
- } else if (link_id >= 0 && hweight16(vif->active_links) > 1) {
- u32 bss_param_ch_cnt_link_id =
- bss_conf->bss_param_ch_cnt_link_id;
- u32 scnd_lnk_bcn_lost = 0;
-
- if (notif_ver >= 5 &&
- !IWL_FW_CHECK(mvm,
- le32_to_cpu(mb->other_link_id) == IWL_MVM_FW_LINK_ID_INVALID,
- "No data for other link id but we are in EMLSR active_links: 0x%x\n",
- vif->active_links))
- scnd_lnk_bcn_lost =
- le32_to_cpu(mb->consec_missed_beacons_other_link);
-
- /* Exit EMLSR if we lost more than
- * IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH beacons on boths links
- * OR more than IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH on any link.
- * OR more than IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED
- * and the link's bss_param_ch_count has changed.
- */
- if ((rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS &&
- scnd_lnk_bcn_lost >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS) ||
- rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH ||
- (bss_param_ch_cnt_link_id != link_id &&
- rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED))
- iwl_mvm_exit_esr(mvm, vif,
- IWL_MVM_ESR_EXIT_MISSED_BEACON,
- iwl_mvm_get_primary_link(vif));
} else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) {
if (!iwl_mvm_has_new_tx_api(mvm))
ieee80211_beacon_loss(vif);
else
ieee80211_cqm_beacon_loss_notify(vif, GFP_ATOMIC);
-
- /* try to switch links, no-op if we don't have MLO */
- iwl_mvm_int_mlo_scan(mvm, vif);
}
iwl_dbg_tlv_time_point(&mvm->fwrt,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 4ad3d32683d8..44029ceb8f77 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1425,12 +1425,6 @@ void iwl_mvm_mac_stop(struct ieee80211_hw *hw, bool suspend)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- /* Stop internal MLO scan, if running */
- mutex_lock(&mvm->mutex);
- iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false);
- mutex_unlock(&mvm->mutex);
-
- wiphy_work_cancel(mvm->hw->wiphy, &mvm->trig_link_selection_wk);
wiphy_work_flush(mvm->hw->wiphy, &mvm->async_handlers_wiphy_wk);
flush_work(&mvm->async_handlers_wk);
flush_work(&mvm->add_stream_wk);
@@ -1715,57 +1709,6 @@ static int iwl_mvm_alloc_bcast_mcast_sta(struct iwl_mvm *mvm,
IWL_STA_MULTICAST);
}
-static void iwl_mvm_prevent_esr_done_wk(struct wiphy *wiphy,
- struct wiphy_work *wk)
-{
- struct iwl_mvm_vif *mvmvif =
- container_of(wk, struct iwl_mvm_vif, prevent_esr_done_wk.work);
- struct iwl_mvm *mvm = mvmvif->mvm;
- struct ieee80211_vif *vif =
- container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
-
- guard(mvm)(mvm);
- iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_PREVENTION);
-}
-
-static void iwl_mvm_mlo_int_scan_wk(struct wiphy *wiphy, struct wiphy_work *wk)
-{
- struct iwl_mvm_vif *mvmvif = container_of(wk, struct iwl_mvm_vif,
- mlo_int_scan_wk.work);
- struct ieee80211_vif *vif =
- container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
-
- guard(mvm)(mvmvif->mvm);
- iwl_mvm_int_mlo_scan(mvmvif->mvm, vif);
-}
-
-static void iwl_mvm_unblock_esr_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
-{
- struct iwl_mvm_vif *mvmvif =
- container_of(wk, struct iwl_mvm_vif, unblock_esr_tpt_wk);
- struct iwl_mvm *mvm = mvmvif->mvm;
- struct ieee80211_vif *vif =
- container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
-
- guard(mvm)(mvm);
- iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT);
-}
-
-static void iwl_mvm_unblock_esr_tmp_non_bss(struct wiphy *wiphy,
- struct wiphy_work *wk)
-{
- struct iwl_mvm_vif *mvmvif =
- container_of(wk, struct iwl_mvm_vif,
- unblock_esr_tmp_non_bss_wk.work);
- struct iwl_mvm *mvm = mvmvif->mvm;
- struct ieee80211_vif *vif =
- container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
-
- mutex_lock(&mvm->mutex);
- iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TMP_NON_BSS);
- mutex_unlock(&mvm->mutex);
-}
-
void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif)
{
lockdep_assert_held(&mvm->mutex);
@@ -1777,18 +1720,6 @@ void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif)
INIT_DELAYED_WORK(&mvmvif->csa_work,
iwl_mvm_channel_switch_disconnect_wk);
-
- wiphy_delayed_work_init(&mvmvif->prevent_esr_done_wk,
- iwl_mvm_prevent_esr_done_wk);
-
- wiphy_delayed_work_init(&mvmvif->mlo_int_scan_wk,
- iwl_mvm_mlo_int_scan_wk);
-
- wiphy_work_init(&mvmvif->unblock_esr_tpt_wk,
- iwl_mvm_unblock_esr_tpt);
-
- wiphy_delayed_work_init(&mvmvif->unblock_esr_tmp_non_bss_wk,
- iwl_mvm_unblock_esr_tmp_non_bss);
}
static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
@@ -1926,16 +1857,6 @@ void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
flush_work(&mvm->roc_done_wk);
}
- wiphy_delayed_work_cancel(mvm->hw->wiphy,
- &mvmvif->prevent_esr_done_wk);
-
- wiphy_delayed_work_cancel(mvm->hw->wiphy,
- &mvmvif->mlo_int_scan_wk);
-
- wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk);
- wiphy_delayed_work_cancel(mvm->hw->wiphy,
- &mvmvif->unblock_esr_tmp_non_bss_wk);
-
cancel_delayed_work_sync(&mvmvif->csa_work);
}
@@ -4008,21 +3929,6 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
callbacks->mac_ctxt_changed(mvm, vif, false);
iwl_mvm_mei_host_associated(mvm, vif, mvm_sta);
-
- memset(&mvmvif->last_esr_exit, 0,
- sizeof(mvmvif->last_esr_exit));
-
- iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT, 0);
-
- /* Block until FW notif will arrive */
- iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW, 0);
-
- /* when client is authorized (AP station marked as such),
- * try to enable the best link(s).
- */
- if (vif->type == NL80211_IFTYPE_STATION &&
- !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
- iwl_mvm_select_links(mvm, vif);
}
mvm_sta->authorized = true;
@@ -4070,16 +3976,6 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm,
/* disable beacon filtering */
iwl_mvm_disable_beacon_filter(mvm, vif);
-
- wiphy_delayed_work_cancel(mvm->hw->wiphy,
- &mvmvif->prevent_esr_done_wk);
-
- wiphy_delayed_work_cancel(mvm->hw->wiphy,
- &mvmvif->mlo_int_scan_wk);
-
- wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk);
- wiphy_delayed_work_cancel(mvm->hw->wiphy,
- &mvmvif->unblock_esr_tmp_non_bss_wk);
}
return 0;
@@ -4920,7 +4816,6 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const struct iwl_mvm_roc_ops *ops)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
u32 lmac_id;
int ret;
@@ -4933,13 +4828,6 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
*/
flush_work(&mvm->roc_done_wk);
- if (!IS_ERR_OR_NULL(bss_vif)) {
- ret = iwl_mvm_block_esr_sync(mvm, bss_vif,
- IWL_MVM_ESR_BLOCKED_ROC);
- if (ret)
- return ret;
- }
-
guard(mvm)(mvm);
switch (vif->type) {
@@ -5604,9 +5492,9 @@ static void iwl_mvm_csa_block_txqs(void *data, struct ieee80211_sta *sta)
}
#define IWL_MAX_CSA_BLOCK_TX 1500
-int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_channel_switch *chsw)
+static int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
{
struct ieee80211_vif *csa_vif;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -5724,9 +5612,9 @@ int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
return ret;
}
-static int iwl_mvm_mac_pre_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_channel_switch *chsw)
+int iwl_mvm_mac_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index bf24f8cb673e..b1dca76b7141 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -340,20 +340,6 @@ static int iwl_mvm_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- /* update EMLSR mode */
- if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
- int ret;
-
- ret = iwl_mvm_esr_non_bss_link(mvm, vif, link_conf->link_id,
- true);
- /*
- * Don't activate this link if failed to exit EMLSR in
- * the BSS interface
- */
- if (ret)
- return ret;
- }
-
guard(mvm)(mvm);
return __iwl_mvm_mld_assign_vif_chanctx(mvm, vif, link_conf, ctx, false);
}
@@ -472,10 +458,6 @@ static void iwl_mvm_mld_unassign_vif_chanctx(struct ieee80211_hw *hw,
iwl_mvm_add_link(mvm, vif, link_conf);
}
mutex_unlock(&mvm->mutex);
-
- /* update EMLSR mode */
- if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
- iwl_mvm_esr_non_bss_link(mvm, vif, link_conf->link_id, false);
}
static void
@@ -684,25 +666,6 @@ static int iwl_mvm_mld_mac_sta_state(struct ieee80211_hw *hw,
&callbacks);
}
-static bool iwl_mvm_esr_bw_criteria(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *link_conf)
-{
- struct ieee80211_bss_conf *other_link;
- int link_id;
-
- /* Exit EMLSR if links don't have equal bandwidths */
- for_each_vif_active_link(vif, other_link, link_id) {
- if (link_id == link_conf->link_id)
- continue;
- if (link_conf->chanreq.oper.width ==
- other_link->chanreq.oper.width)
- return true;
- }
-
- return false;
-}
-
static void
iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -737,14 +700,6 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS;
}
- if ((changes & BSS_CHANGED_BANDWIDTH) &&
- ieee80211_vif_link_active(vif, link_conf->link_id) &&
- mvmvif->esr_active &&
- !iwl_mvm_esr_bw_criteria(mvm, vif, link_conf))
- iwl_mvm_exit_esr(mvm, vif,
- IWL_MVM_ESR_EXIT_BANDWIDTH,
- iwl_mvm_get_primary_link(vif));
-
/* if associated, maybe puncturing changed - we'll check later */
if (vif->cfg.assoc)
link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS;
@@ -879,11 +834,6 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
}
-
- if (changes & (BSS_CHANGED_MLD_VALID_LINKS | BSS_CHANGED_MLD_TTLM) &&
- ieee80211_vif_is_mld(vif) && mvmvif->authorized)
- wiphy_delayed_work_queue(mvm->hw->wiphy,
- &mvmvif->mlo_int_scan_wk, 0);
}
static void
@@ -1239,91 +1189,6 @@ iwl_mvm_mld_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return NEG_TTLM_RES_ACCEPT;
}
-static int
-iwl_mvm_mld_mac_pre_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_channel_switch *chsw)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
-
- mutex_lock(&mvm->mutex);
- if (mvmvif->esr_active) {
- u8 primary = iwl_mvm_get_primary_link(vif);
- int selected;
-
- /* prefer primary unless quiet CSA on it */
- if (chsw->link_id == primary && chsw->block_tx)
- selected = iwl_mvm_get_other_link(vif, primary);
- else
- selected = primary;
-
- /*
- * remembers to tell the firmware that this link can't tx
- * Note that this logic seems to be unrelated to esr, but it
- * really is needed only when esr is active. When we have a
- * single link, the firmware will handle all this on its own.
- * In multi-link scenarios, we can learn about the CSA from
- * another link and this logic is too complex for the firmware
- * to track.
- * Since we want to de-activate the link that got a CSA, we
- * need to tell the firmware not to send any frame on that link
- * as the firmware may not be aware that link is under a CSA
- * with mode=1 (no Tx allowed).
- */
- if (chsw->block_tx && mvmvif->link[chsw->link_id])
- mvmvif->link[chsw->link_id]->csa_block_tx = true;
-
- iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_CSA, selected);
- mutex_unlock(&mvm->mutex);
-
- /*
- * If we've not kept the link active that's doing the CSA
- * then we don't need to do anything else, just return.
- */
- if (selected != chsw->link_id)
- return 0;
-
- mutex_lock(&mvm->mutex);
- }
-
- ret = iwl_mvm_pre_channel_switch(mvm, vif, chsw);
- mutex_unlock(&mvm->mutex);
-
- return ret;
-}
-
-#define IWL_MVM_MLD_UNBLOCK_ESR_NON_BSS_TIMEOUT (5 * HZ)
-
-static void iwl_mvm_mld_prep_add_interface(struct ieee80211_hw *hw,
- enum nl80211_iftype type)
-{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
- struct iwl_mvm_vif *mvmvif;
- int ret;
-
- IWL_DEBUG_MAC80211(mvm, "prep_add_interface: type=%u\n",
- type);
-
- if (IS_ERR_OR_NULL(bss_vif) ||
- !(type == NL80211_IFTYPE_AP ||
- type == NL80211_IFTYPE_P2P_GO ||
- type == NL80211_IFTYPE_P2P_CLIENT))
- return;
-
- mvmvif = iwl_mvm_vif_from_mac80211(bss_vif);
- ret = iwl_mvm_block_esr_sync(mvm, bss_vif,
- IWL_MVM_ESR_BLOCKED_TMP_NON_BSS);
- if (ret)
- return;
-
- wiphy_delayed_work_queue(mvmvif->mvm->hw->wiphy,
- &mvmvif->unblock_esr_tmp_non_bss_wk,
- IWL_MVM_MLD_UNBLOCK_ESR_NON_BSS_TIMEOUT);
-}
-
const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.tx = iwl_mvm_mac_tx,
.wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
@@ -1377,7 +1242,7 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.tx_last_beacon = iwl_mvm_tx_last_beacon,
.channel_switch = iwl_mvm_channel_switch,
- .pre_channel_switch = iwl_mvm_mld_mac_pre_channel_switch,
+ .pre_channel_switch = iwl_mvm_mac_pre_channel_switch,
.post_channel_switch = iwl_mvm_post_channel_switch,
.abort_channel_switch = iwl_mvm_abort_channel_switch,
.channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon,
@@ -1418,5 +1283,4 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.change_sta_links = iwl_mvm_mld_change_sta_links,
.can_activate_links = iwl_mvm_mld_can_activate_links,
.can_neg_ttlm = iwl_mvm_mld_can_neg_ttlm,
- .prep_add_interface = iwl_mvm_mld_prep_add_interface,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index e1010521c3ea..d9a2801636cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -852,8 +852,6 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta, link_id);
}
- kfree(mvm_sta->mpdu_counters);
- mvm_sta->mpdu_counters = NULL;
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index fdaeefa305e1..b515028adc8f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -348,68 +348,6 @@ struct iwl_mvm_vif_link_info {
};
/**
- * enum iwl_mvm_esr_state - defines reasons for which the EMLSR is exited or
- * blocked.
- * The low 16 bits are used for blocking reasons, and the 16 higher bits
- * are used for exit reasons.
- * For the blocking reasons - use iwl_mvm_(un)block_esr(), and for the exit
- * reasons - use iwl_mvm_exit_esr().
- *
- * Note: new reasons shall be added to HANDLE_ESR_REASONS as well (for logs)
- *
- * @IWL_MVM_ESR_BLOCKED_PREVENTION: Prevent EMLSR to avoid entering and exiting
- * in a loop.
- * @IWL_MVM_ESR_BLOCKED_WOWLAN: WOWLAN is preventing the enablement of EMLSR
- * @IWL_MVM_ESR_BLOCKED_TPT: block EMLSR when there is not enough traffic
- * @IWL_MVM_ESR_BLOCKED_FW: FW didn't recommended/forced exit from EMLSR
- * @IWL_MVM_ESR_BLOCKED_NON_BSS: An active non-BSS interface's link is
- * preventing EMLSR
- * @IWL_MVM_ESR_BLOCKED_ROC: remain-on-channel is preventing EMLSR
- * @IWL_MVM_ESR_BLOCKED_TMP_NON_BSS: An expected active non-BSS interface's link
- * is preventing EMLSR. This is a temporary blocking that is set when there
- * is an indication that a non-BSS interface is to be added.
- * @IWL_MVM_ESR_EXIT_MISSED_BEACON: exited EMLSR due to missed beacons
- * @IWL_MVM_ESR_EXIT_LOW_RSSI: link is deactivated/not allowed for EMLSR
- * due to low RSSI.
- * @IWL_MVM_ESR_EXIT_COEX: link is deactivated/not allowed for EMLSR
- * due to BT Coex.
- * @IWL_MVM_ESR_EXIT_BANDWIDTH: Bandwidths of primary and secondry links
- * preventing the enablement of EMLSR
- * @IWL_MVM_ESR_EXIT_CSA: CSA happened, so exit EMLSR
- * @IWL_MVM_ESR_EXIT_LINK_USAGE: Exit EMLSR due to low tpt on secondary link
- */
-enum iwl_mvm_esr_state {
- IWL_MVM_ESR_BLOCKED_PREVENTION = 0x1,
- IWL_MVM_ESR_BLOCKED_WOWLAN = 0x2,
- IWL_MVM_ESR_BLOCKED_TPT = 0x4,
- IWL_MVM_ESR_BLOCKED_FW = 0x8,
- IWL_MVM_ESR_BLOCKED_NON_BSS = 0x10,
- IWL_MVM_ESR_BLOCKED_ROC = 0x20,
- IWL_MVM_ESR_BLOCKED_TMP_NON_BSS = 0x40,
- IWL_MVM_ESR_EXIT_MISSED_BEACON = 0x10000,
- IWL_MVM_ESR_EXIT_LOW_RSSI = 0x20000,
- IWL_MVM_ESR_EXIT_COEX = 0x40000,
- IWL_MVM_ESR_EXIT_BANDWIDTH = 0x80000,
- IWL_MVM_ESR_EXIT_CSA = 0x100000,
- IWL_MVM_ESR_EXIT_LINK_USAGE = 0x200000,
-};
-
-#define IWL_MVM_BLOCK_ESR_REASONS 0xffff
-
-const char *iwl_get_esr_state_string(enum iwl_mvm_esr_state state);
-
-/**
- * struct iwl_mvm_esr_exit - details of the last exit from EMLSR mode.
- * @reason: The reason for the last exit from EMLSR.
- * &iwl_mvm_prevent_esr_reasons. Will be 0 before exiting EMLSR.
- * @ts: the time stamp of the last time we existed EMLSR.
- */
-struct iwl_mvm_esr_exit {
- unsigned long ts;
- enum iwl_mvm_esr_state reason;
-};
-
-/**
* struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
* @mvm: pointer back to the mvm struct
* @id: between 0 and 3
@@ -443,7 +381,6 @@ struct iwl_mvm_esr_exit {
* @deflink: default link data for use in non-MLO
* @link: link data for each link in MLO
* @esr_active: indicates eSR mode is active
- * @esr_disable_reason: a bitmap of &enum iwl_mvm_esr_state
* @pm_enabled: indicates powersave is enabled
* @link_selection_res: bitmap of active links as it was decided in the last
* link selection. Valid only for a MLO vif after assoc. 0 if there wasn't
@@ -451,15 +388,6 @@ struct iwl_mvm_esr_exit {
* @link_selection_primary: primary link selected by link selection
* @primary_link: primary link in eSR. Valid only for an associated MLD vif,
* and in eSR mode. Valid only for a STA.
- * @last_esr_exit: Details of the last exit from EMLSR.
- * @exit_same_reason_count: The number of times we exited due to the specified
- * @last_esr_exit::reason, only counting exits due to
- * &IWL_MVM_ESR_PREVENT_REASONS.
- * @prevent_esr_done_wk: work that should be done when esr prevention ends.
- * @mlo_int_scan_wk: work for the internal MLO scan.
- * @unblock_esr_tpt_wk: work for unblocking EMLSR when tpt is high enough.
- * @unblock_esr_tmp_non_bss_wk: work for removing the
- * IWL_MVM_ESR_BLOCKED_TMP_NON_BSS blocking for EMLSR.
* @roc_activity: currently running ROC activity for this vif (or
* ROC_NUM_ACTIVITIES if no activity is running).
* @session_prot_connection_loss: the connection was lost due to session
@@ -515,7 +443,6 @@ struct iwl_mvm_vif {
u8 authorized:1;
bool ps_disabled;
- u32 esr_disable_reason;
u32 ap_beacon_time;
bool bf_enabled;
bool ba_enabled;
@@ -591,12 +518,6 @@ struct iwl_mvm_vif {
u16 link_selection_res;
u8 link_selection_primary;
u8 primary_link;
- struct iwl_mvm_esr_exit last_esr_exit;
- u8 exit_same_reason_count;
- struct wiphy_delayed_work prevent_esr_done_wk;
- struct wiphy_delayed_work mlo_int_scan_wk;
- struct wiphy_work unblock_esr_tpt_wk;
- struct wiphy_delayed_work unblock_esr_tmp_non_bss_wk;
struct iwl_mvm_vif_link_info deflink;
struct iwl_mvm_vif_link_info *link[IEEE80211_MLD_MAX_NUM_LINKS];
@@ -622,7 +543,6 @@ enum iwl_scan_status {
IWL_MVM_SCAN_REGULAR = BIT(0),
IWL_MVM_SCAN_SCHED = BIT(1),
IWL_MVM_SCAN_NETDETECT = BIT(2),
- IWL_MVM_SCAN_INT_MLO = BIT(3),
IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8),
IWL_MVM_SCAN_STOPPING_SCHED = BIT(9),
@@ -635,8 +555,6 @@ enum iwl_scan_status {
IWL_MVM_SCAN_STOPPING_SCHED,
IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT |
IWL_MVM_SCAN_STOPPING_NETDETECT,
- IWL_MVM_SCAN_INT_MLO_MASK = IWL_MVM_SCAN_INT_MLO |
- IWL_MVM_SCAN_STOPPING_INT_MLO,
IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT,
IWL_MVM_SCAN_MASK = 0xff,
@@ -1017,8 +935,6 @@ struct iwl_mvm {
/* For async rx handlers that require the wiphy lock */
struct wiphy_work async_handlers_wiphy_wk;
- struct wiphy_work trig_link_selection_wk;
-
struct work_struct roc_done_wk;
unsigned long init_status;
@@ -1203,20 +1119,13 @@ struct iwl_mvm {
u8 offload_tid;
#ifdef CONFIG_IWLWIFI_DEBUGFS
bool d3_wake_sysassert;
- bool d3_test_active;
- u32 d3_test_pme_ptr;
- struct ieee80211_vif *keep_vif;
u32 last_netdetect_scans; /* no. of scans in the last net-detect wake */
#endif
#endif
wait_queue_head_t rx_sync_waitq;
- /* BT-Coex - only one of those will be used */
- union {
- struct iwl_bt_coex_prof_old_notif last_bt_notif;
- struct iwl_bt_coex_profile_notif last_bt_wifi_loss;
- };
+ struct iwl_bt_coex_prof_old_notif last_bt_notif;
struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
u8 bt_tx_prio;
@@ -2099,9 +2008,7 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
-void iwl_mvm_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif);
-u8 iwl_mvm_get_other_link(struct ieee80211_vif *vif, u8 link_id);
struct iwl_mvm_link_sel_data {
u8 link_id;
@@ -2111,13 +2018,6 @@ struct iwl_mvm_link_sel_data {
};
#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
-unsigned int iwl_mvm_get_link_grade(struct ieee80211_bss_conf *link_conf);
-bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
- const struct iwl_mvm_link_sel_data *a,
- const struct iwl_mvm_link_sel_data *b);
-
-s8 iwl_mvm_average_dbm_values(const struct iwl_umac_scan_channel_survey_notif *notif);
-
extern const struct iwl_hcmd_arr iwl_mvm_groups[];
extern const unsigned int iwl_mvm_groups_size;
#endif
@@ -2201,7 +2101,6 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
void iwl_mvm_scan_timeout_wk(struct work_struct *work);
-int iwl_mvm_int_mlo_scan(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_rx_channel_survey_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
@@ -2327,8 +2226,6 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm);
void iwl_mvm_rx_bt_coex_old_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
-void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event_data);
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
@@ -2929,9 +2826,10 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw);
void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw);
-int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_channel_switch *chsw);
+int iwl_mvm_mac_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw);
+
void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
@@ -2988,30 +2886,6 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
/* EMLSR */
bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason,
- u8 link_to_keep);
-int iwl_mvm_block_esr_sync(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason);
-void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason);
-void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason,
- u8 link_to_keep);
-s8 iwl_mvm_get_esr_rssi_thresh(struct iwl_mvm *mvm,
- const struct cfg80211_chan_def *chandef,
- bool low);
-void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- int link_id);
-bool
-iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- s32 link_rssi,
- bool primary);
-int iwl_mvm_esr_non_bss_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- unsigned int link_id, bool active);
-
void
iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index c7f08cde1f72..5ebd046371f5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -143,24 +143,6 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
}
-static void iwl_mvm_rx_esr_mode_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_esr_mode_notif *notif = (void *)pkt->data;
- struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
-
- /* FW recommendations is only for entering EMLSR */
- if (IS_ERR_OR_NULL(vif) || iwl_mvm_vif_from_mac80211(vif)->esr_active)
- return;
-
- if (le32_to_cpu(notif->action) == ESR_RECOMMEND_ENTER)
- iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW);
- else
- iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW,
- iwl_mvm_get_primary_link(vif));
-}
-
static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
@@ -345,9 +327,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_old_notif,
RX_HANDLER_ASYNC_LOCKED_WIPHY,
struct iwl_bt_coex_prof_old_notif),
- RX_HANDLER_GRP(BT_COEX_GROUP, PROFILE_NOTIF, iwl_mvm_rx_bt_coex_notif,
- RX_HANDLER_ASYNC_LOCKED_WIPHY,
- struct iwl_bt_coex_profile_notif),
RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
@@ -457,11 +436,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_ASYNC_UNLOCKED,
struct iwl_channel_switch_error_notif),
- RX_HANDLER_GRP(DATA_PATH_GROUP, ESR_MODE_NOTIF,
- iwl_mvm_rx_esr_mode_notif,
- RX_HANDLER_ASYNC_LOCKED_WIPHY,
- struct iwl_esr_mode_notif),
-
RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
struct iwl_datapath_monitor_notif),
@@ -661,7 +635,6 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
HCMD_NAME(SEC_KEY_CMD),
- HCMD_NAME(ESR_MODE_NOTIF),
HCMD_NAME(MONITOR_NOTIF),
HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
HCMD_NAME(BEACON_FILTER_IN_NOTIF),
@@ -1220,29 +1193,6 @@ static const struct iwl_mei_ops mei_ops = {
.nic_stolen = iwl_mvm_mei_nic_stolen,
};
-static void iwl_mvm_find_link_selection_vif(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- if (ieee80211_vif_is_mld(vif) && mvmvif->authorized)
- iwl_mvm_select_links(mvmvif->mvm, vif);
-}
-
-static void iwl_mvm_trig_link_selection(struct wiphy *wiphy,
- struct wiphy_work *wk)
-{
- struct iwl_mvm *mvm =
- container_of(wk, struct iwl_mvm, trig_link_selection_wk);
-
- mutex_lock(&mvm->mutex);
- ieee80211_iterate_active_interfaces(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_find_link_selection_vif,
- NULL);
- mutex_unlock(&mvm->mutex);
-}
-
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@@ -1411,9 +1361,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
wiphy_work_init(&mvm->async_handlers_wiphy_wk,
iwl_mvm_async_handlers_wiphy_wk);
- wiphy_work_init(&mvm->trig_link_selection_wk,
- iwl_mvm_trig_link_selection);
-
init_waitqueue_head(&mvm->rx_sync_waitq);
mvm->queue_sync_state = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 8fae0d41b119..8c1bb3a7ffca 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -563,7 +563,6 @@ static void iwl_mvm_update_link_sig(struct ieee80211_vif *vif, int sig,
int thold = bss_conf->cqm_rssi_thold;
int hyst = bss_conf->cqm_rssi_hyst;
int last_event;
- s8 exit_esr_thresh;
if (sig == 0) {
IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n");
@@ -619,27 +618,6 @@ static void iwl_mvm_update_link_sig(struct ieee80211_vif *vif, int sig,
sig,
GFP_KERNEL);
}
-
- /* ESR recalculation */
- if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
- return;
-
- /* We're not in EMLSR and our signal is bad, try to switch link maybe */
- if (sig < IWL_MVM_LOW_RSSI_MLO_SCAN_THRESH && !mvmvif->esr_active) {
- iwl_mvm_int_mlo_scan(mvm, vif);
- return;
- }
-
- /* We are in EMLSR, check if we need to exit */
- exit_esr_thresh =
- iwl_mvm_get_esr_rssi_thresh(mvm,
- &bss_conf->chanreq.oper,
- true);
-
- if (sig < exit_esr_thresh)
- iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_LOW_RSSI,
- iwl_mvm_get_other_link(vif,
- bss_conf->link_id));
}
static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
@@ -914,10 +892,6 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
link_info->beacon_stats.avg_signal =
-le32_to_cpu(link_stats->beacon_average_energy);
- if (link_info->phy_ctxt &&
- link_info->phy_ctxt->channel->band == NL80211_BAND_2GHZ)
- iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
-
/* make sure that beacon statistics don't go backwards with TCM
* request to clear statistics
*/
@@ -956,111 +930,6 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
}
}
-#define SEC_LINK_MIN_PERC 10
-#define SEC_LINK_MIN_TX 3000
-#define SEC_LINK_MIN_RX 400
-
-/* Accept a ~20% short window to avoid issues due to jitter */
-#define IWL_MVM_TPT_MIN_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ * 4 / 5)
-
-static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
-{
- struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
- struct iwl_mvm_vif *mvmvif;
- struct iwl_mvm_sta *mvmsta;
- unsigned long total_tx = 0, total_rx = 0;
- unsigned long sec_link_tx = 0, sec_link_rx = 0;
- u8 sec_link_tx_perc, sec_link_rx_perc;
- u8 sec_link;
- bool skip = false;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (IS_ERR_OR_NULL(bss_vif))
- return;
-
- mvmvif = iwl_mvm_vif_from_mac80211(bss_vif);
-
- if (!mvmvif->esr_active || !mvmvif->ap_sta)
- return;
-
- mvmsta = iwl_mvm_sta_from_mac80211(mvmvif->ap_sta);
- /* We only count for the AP sta in a MLO connection */
- if (!mvmsta->mpdu_counters)
- return;
-
- /* Get the FW ID of the secondary link */
- sec_link = iwl_mvm_get_other_link(bss_vif,
- iwl_mvm_get_primary_link(bss_vif));
- if (WARN_ON(!mvmvif->link[sec_link]))
- return;
- sec_link = mvmvif->link[sec_link]->fw_link_id;
-
- /* Sum up RX and TX MPDUs from the different queues/links */
- for (int q = 0; q < mvm->trans->info.num_rxqs; q++) {
- spin_lock_bh(&mvmsta->mpdu_counters[q].lock);
-
- /* The link IDs that doesn't exist will contain 0 */
- for (int link = 0; link < IWL_FW_MAX_LINK_ID; link++) {
- total_tx += mvmsta->mpdu_counters[q].per_link[link].tx;
- total_rx += mvmsta->mpdu_counters[q].per_link[link].rx;
- }
-
- sec_link_tx += mvmsta->mpdu_counters[q].per_link[sec_link].tx;
- sec_link_rx += mvmsta->mpdu_counters[q].per_link[sec_link].rx;
-
- /*
- * In EMLSR we have statistics every 5 seconds, so we can reset
- * the counters upon every statistics notification.
- * The FW sends the notification regularly, but it will be
- * misaligned at the start. Skipping the measurement if it is
- * short will synchronize us.
- */
- if (jiffies - mvmsta->mpdu_counters[q].window_start <
- IWL_MVM_TPT_MIN_COUNT_WINDOW)
- skip = true;
- mvmsta->mpdu_counters[q].window_start = jiffies;
- memset(mvmsta->mpdu_counters[q].per_link, 0,
- sizeof(mvmsta->mpdu_counters[q].per_link));
-
- spin_unlock_bh(&mvmsta->mpdu_counters[q].lock);
- }
-
- if (skip) {
- IWL_DEBUG_INFO(mvm, "MPDU statistics window was short\n");
- return;
- }
-
- IWL_DEBUG_INFO(mvm, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
- total_tx, total_rx);
-
- /* If we don't have enough MPDUs - exit EMLSR */
- if (total_tx < IWL_MVM_ENTER_ESR_TPT_THRESH &&
- total_rx < IWL_MVM_ENTER_ESR_TPT_THRESH) {
- iwl_mvm_block_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_TPT,
- iwl_mvm_get_primary_link(bss_vif));
- return;
- }
-
- IWL_DEBUG_INFO(mvm, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n",
- sec_link, sec_link_tx, sec_link_rx);
-
- /* Calculate the percentage of the secondary link TX/RX */
- sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0;
- sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0;
-
- /*
- * The TX/RX percentage is checked only if it exceeds the required
- * minimum. In addition, RX is checked only if the TX check failed.
- */
- if ((total_tx > SEC_LINK_MIN_TX &&
- sec_link_tx_perc < SEC_LINK_MIN_PERC) ||
- (total_rx > SEC_LINK_MIN_RX &&
- sec_link_rx_perc < SEC_LINK_MIN_PERC))
- iwl_mvm_exit_esr(mvm, bss_vif, IWL_MVM_ESR_EXIT_LINK_USAGE,
- iwl_mvm_get_primary_link(bss_vif));
-}
-
void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
@@ -1088,8 +957,6 @@ void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm,
ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter,
average_energy);
iwl_mvm_handle_per_phy_stats(mvm, stats->per_phy);
-
- iwl_mvm_update_esr_mode_tpt(mvm);
}
void iwl_mvm_handle_rx_system_oper_part1_stats(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 62e76a79f621..d35c63a673b6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -332,6 +332,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
struct ieee80211_key_conf *key;
u32 len = le16_to_cpu(desc->mpdu_len);
const u8 *frame = (void *)hdr;
+ const u8 *mmie;
if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_NONE)
return 0;
@@ -375,11 +376,15 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
goto report;
}
- if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2)
+ if (len < key->icv_len)
goto report;
/* get the real key ID */
- keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2];
+ mmie = frame + (len - key->icv_len);
+
+ /* the position of the key_id in ieee80211_mmie_16 is the same */
+ keyid = le16_to_cpu(((const struct ieee80211_mmie *) mmie)->key_id);
+
/* and if that's the other key, look it up */
if (keyid != key->keyidx) {
/*
@@ -2099,7 +2104,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
u8 crypt_len = 0;
- u8 sta_id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID);
size_t desc_size;
struct iwl_mvm_rx_phy_data phy_data = {};
u32 format;
@@ -2246,6 +2250,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rcu_read_lock();
if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
+ u8 sta_id = le32_get_bits(desc->status,
+ IWL_RX_MPDU_STATUS_STA_ID);
+
if (!WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) {
struct ieee80211_link_sta *link_sta;
@@ -2373,16 +2380,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
}
-
- if (ieee80211_is_data(hdr->frame_control)) {
- u8 sub_frame_idx = desc->amsdu_info &
- IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
-
- /* 0 means not an A-MSDU, and 1 means a new A-MSDU */
- if (!sub_frame_idx || sub_frame_idx == 1)
- iwl_mvm_count_mpdu(mvmsta, sta_id, 1, false,
- queue);
- }
}
/* management stuff on default queue */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 9ce1ce0dab34..b588f1dcf20d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1392,8 +1392,6 @@ static u32 iwl_mvm_scan_umac_ooc_priority(int type)
{
if (type == IWL_MVM_SCAN_REGULAR)
return IWL_SCAN_PRIORITY_EXT_6;
- if (type == IWL_MVM_SCAN_INT_MLO)
- return IWL_SCAN_PRIORITY_EXT_4;
return IWL_SCAN_PRIORITY_EXT_2;
}
@@ -3220,7 +3218,6 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_umac_scan_complete *notif = (void *)pkt->data;
u32 uid = __le32_to_cpu(notif->uid);
bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
- bool select_links = false;
mvm->mei_scan_filter.is_mei_limited_scan = false;
@@ -3267,13 +3264,6 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
- } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_INT_MLO) {
- IWL_DEBUG_SCAN(mvm, "Internal MLO scan completed\n");
- /*
- * Other scan types won't necessarily scan for the MLD links channels.
- * Therefore, only select links after successful internal scan.
- */
- select_links = notif->status == IWL_SCAN_OFFLOAD_COMPLETED;
}
mvm->scan_status &= ~mvm->scan_uid_status[uid];
@@ -3286,9 +3276,6 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
mvm->last_ebs_successful = false;
mvm->scan_uid_status[uid] = 0;
-
- if (select_links)
- wiphy_work_queue(mvm->hw->wiphy, &mvm->trig_link_selection_wk);
}
void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
@@ -3483,11 +3470,6 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
mvm->scan_uid_status[uid] = 0;
}
- uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_INT_MLO);
- if (uid >= 0) {
- IWL_DEBUG_SCAN(mvm, "Internal MLO scan aborted\n");
- mvm->scan_uid_status[uid] = 0;
- }
uid = iwl_mvm_scan_uid_by_status(mvm,
IWL_MVM_SCAN_STOPPING_REGULAR);
@@ -3583,89 +3565,6 @@ out:
return ret;
}
-static int iwl_mvm_int_mlo_scan_start(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_channel **channels,
- size_t n_channels)
-{
- struct cfg80211_scan_request *req = NULL;
- struct ieee80211_scan_ies ies = {};
- size_t size, i;
- int ret;
-
- lockdep_assert_held(&mvm->mutex);
-
- IWL_DEBUG_SCAN(mvm, "Starting Internal MLO scan: n_channels=%zu\n",
- n_channels);
-
- if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif) ||
- hweight16(vif->valid_links) == 1)
- return -EINVAL;
-
- size = struct_size(req, channels, n_channels);
- req = kzalloc(size, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
-
- /* set the requested channels */
- for (i = 0; i < n_channels; i++)
- req->channels[i] = channels[i];
-
- req->n_channels = n_channels;
-
- /* set the rates */
- for (i = 0; i < NUM_NL80211_BANDS; i++)
- if (mvm->hw->wiphy->bands[i])
- req->rates[i] =
- (1 << mvm->hw->wiphy->bands[i]->n_bitrates) - 1;
-
- req->wdev = ieee80211_vif_to_wdev(vif);
- req->wiphy = mvm->hw->wiphy;
- req->scan_start = jiffies;
- req->tsf_report_link_id = -1;
-
- ret = _iwl_mvm_single_scan_start(mvm, vif, req, &ies,
- IWL_MVM_SCAN_INT_MLO);
- kfree(req);
-
- IWL_DEBUG_SCAN(mvm, "Internal MLO scan: ret=%d\n", ret);
- return ret;
-}
-
-int iwl_mvm_int_mlo_scan(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
-{
- struct ieee80211_channel *channels[IEEE80211_MLD_MAX_NUM_LINKS];
- unsigned long usable_links = ieee80211_vif_usable_links(vif);
- size_t n_channels = 0;
- u8 link_id;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (mvm->scan_status & IWL_MVM_SCAN_INT_MLO) {
- IWL_DEBUG_SCAN(mvm, "Internal MLO scan is already running\n");
- return -EBUSY;
- }
-
- rcu_read_lock();
-
- for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- struct ieee80211_bss_conf *link_conf =
- rcu_dereference(vif->link_conf[link_id]);
-
- if (WARN_ON_ONCE(!link_conf))
- continue;
-
- channels[n_channels++] = link_conf->chanreq.oper.chan;
- }
-
- rcu_read_unlock();
-
- if (!n_channels)
- return -EINVAL;
-
- return iwl_mvm_int_mlo_scan_start(mvm, vif, channels, n_channels);
-}
-
static int iwl_mvm_chanidx_from_phy(struct iwl_mvm *mvm,
enum nl80211_band band,
u16 phy_chan_num)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 11c6b86db4ec..363232bb74fa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1835,18 +1835,6 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
- /* MPDUs are counted only when EMLSR is possible */
- if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
- !sta->tdls && ieee80211_vif_is_mld(vif)) {
- mvm_sta->mpdu_counters =
- kcalloc(mvm->trans->info.num_rxqs,
- sizeof(*mvm_sta->mpdu_counters),
- GFP_KERNEL);
- if (mvm_sta->mpdu_counters)
- for (int q = 0; q < mvm->trans->info.num_rxqs; q++)
- spin_lock_init(&mvm_sta->mpdu_counters[q].lock);
- }
-
return 0;
}
@@ -4328,80 +4316,3 @@ void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "Failed to cancel the channel switch\n");
}
-
-static int iwl_mvm_fw_sta_id_to_fw_link_id(struct iwl_mvm_vif *mvmvif,
- u8 fw_sta_id)
-{
- struct ieee80211_link_sta *link_sta =
- rcu_dereference(mvmvif->mvm->fw_id_to_link_sta[fw_sta_id]);
- struct iwl_mvm_vif_link_info *link;
-
- if (WARN_ON_ONCE(!link_sta))
- return -EINVAL;
-
- link = mvmvif->link[link_sta->link_id];
-
- if (WARN_ON_ONCE(!link))
- return -EINVAL;
-
- return link->fw_link_id;
-}
-
-#define IWL_MVM_TPT_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ)
-
-void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,
- bool tx, int queue)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvm_sta->vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- struct iwl_mvm_tpt_counter *queue_counter;
- struct iwl_mvm_mpdu_counter *link_counter;
- u32 total_mpdus = 0;
- int fw_link_id;
-
- /* Count only for a BSS sta, and only when EMLSR is possible */
- if (!mvm_sta->mpdu_counters)
- return;
-
- /* Map sta id to link id */
- fw_link_id = iwl_mvm_fw_sta_id_to_fw_link_id(mvmvif, fw_sta_id);
- if (fw_link_id < 0)
- return;
-
- queue_counter = &mvm_sta->mpdu_counters[queue];
- link_counter = &queue_counter->per_link[fw_link_id];
-
- spin_lock_bh(&queue_counter->lock);
-
- if (tx)
- link_counter->tx += count;
- else
- link_counter->rx += count;
-
- /*
- * When not in EMLSR, the window and the decision to enter EMLSR are
- * handled during counting, when in EMLSR - in the statistics flow
- */
- if (mvmvif->esr_active)
- goto out;
-
- if (time_is_before_jiffies(queue_counter->window_start +
- IWL_MVM_TPT_COUNT_WINDOW)) {
- memset(queue_counter->per_link, 0,
- sizeof(queue_counter->per_link));
- queue_counter->window_start = jiffies;
-
- IWL_DEBUG_INFO(mvm, "MPDU counters are cleared\n");
- }
-
- for (int i = 0; i < IWL_FW_MAX_LINK_ID; i++)
- total_mpdus += tx ? queue_counter->per_link[i].tx :
- queue_counter->per_link[i].rx;
-
- if (total_mpdus > IWL_MVM_ENTER_ESR_TPT_THRESH)
- wiphy_work_queue(mvmvif->mvm->hw->wiphy,
- &mvmvif->unblock_esr_tpt_wk);
-
-out:
- spin_unlock_bh(&queue_counter->lock);
-}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index f6906061510b..c25edc7c1813 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -344,24 +344,6 @@ struct iwl_mvm_link_sta {
u8 avg_energy;
};
-struct iwl_mvm_mpdu_counter {
- u32 tx;
- u32 rx;
-};
-
-/**
- * struct iwl_mvm_tpt_counter - per-queue MPDU counter
- *
- * @lock: Needed to protect the counters when modified from statistics.
- * @per_link: per-link counters.
- * @window_start: timestamp of the counting-window start
- */
-struct iwl_mvm_tpt_counter {
- spinlock_t lock;
- struct iwl_mvm_mpdu_counter per_link[IWL_FW_MAX_LINK_ID];
- unsigned long window_start;
-} ____cacheline_aligned_in_smp;
-
/**
* struct iwl_mvm_sta - representation of a station in the driver
* @vif: the interface the station belongs to
@@ -409,7 +391,6 @@ struct iwl_mvm_tpt_counter {
* @link: per link sta entries. For non-MLO only link[0] holds data. For MLO,
* link[0] points to deflink and link[link_id] is allocated when new link
* sta is added.
- * @mpdu_counters: RX/TX MPDUs counters for each queue.
*
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is placed in that
@@ -449,8 +430,6 @@ struct iwl_mvm_sta {
struct iwl_mvm_link_sta deflink;
struct iwl_mvm_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
-
- struct iwl_mvm_tpt_counter *mpdu_counters;
};
u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data);
@@ -533,9 +512,6 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
-void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,
- bool tx, int queue);
-
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start, u16 buf_size, u16 timeout);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
index bb33f4a06f1c..2267be4cfb44 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
@@ -1,3 +1,3 @@
-iwlmvm-tests-y += module.o links.o hcmd.o
+iwlmvm-tests-y += module.o hcmd.o
obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlmvm-tests.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c
deleted file mode 100644
index d692f1813d44..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c
+++ /dev/null
@@ -1,433 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * KUnit tests for channel helper functions
- *
- * Copyright (C) 2024 Intel Corporation
- */
-#include <net/mac80211.h>
-#include "../mvm.h"
-#include <kunit/test.h>
-
-MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
-
-static struct wiphy wiphy = {
- .mtx = __MUTEX_INITIALIZER(wiphy.mtx),
-};
-
-static struct ieee80211_hw hw = {
- .wiphy = &wiphy,
-};
-
-static struct ieee80211_channel chan_5ghz = {
- .band = NL80211_BAND_5GHZ,
-};
-
-static struct ieee80211_channel chan_6ghz = {
- .band = NL80211_BAND_6GHZ,
-};
-
-static struct ieee80211_channel chan_2ghz = {
- .band = NL80211_BAND_2GHZ,
-};
-
-static struct cfg80211_chan_def chandef_a = {};
-
-static struct cfg80211_chan_def chandef_b = {};
-
-static struct iwl_mvm_phy_ctxt ctx = {};
-
-static struct iwl_mvm_vif_link_info mvm_link = {
- .phy_ctxt = &ctx,
- .active = true
-};
-
-static struct cfg80211_bss bss = {};
-
-static struct ieee80211_bss_conf link_conf = {.bss = &bss};
-
-static const struct iwl_fw_cmd_version entry = {
- .group = LEGACY_GROUP,
- .cmd = BT_PROFILE_NOTIFICATION,
- .notif_ver = 4
-};
-
-static struct iwl_fw fw = {
- .ucode_capa = {
- .n_cmd_versions = 1,
- .cmd_versions = &entry,
- },
-};
-
-static struct iwl_mvm mvm = {
- .hw = &hw,
- .fw = &fw,
-};
-
-static const struct link_grading_case {
- const char *desc;
- const struct cfg80211_chan_def chandef;
- s32 signal;
- s16 channel_util;
- int chan_load_by_us;
- unsigned int grade;
-} link_grading_cases[] = {
- {
- .desc = "UHB, RSSI below range, no factors",
- .chandef = {
- .chan = &chan_6ghz,
- .width = NL80211_CHAN_WIDTH_20,
- },
- .signal = -100,
- .grade = 177,
- },
- {
- .desc = "LB, RSSI in range, no factors",
- .chandef = {
- .chan = &chan_2ghz,
- .width = NL80211_CHAN_WIDTH_20,
- },
- .signal = -84,
- .grade = 344,
- },
- {
- .desc = "HB, RSSI above range, no factors",
- .chandef = {
- .chan = &chan_5ghz,
- .width = NL80211_CHAN_WIDTH_20,
- },
- .signal = -50,
- .grade = 3442,
- },
- {
- .desc = "HB, BSS Load IE (20 percent), inactive link, no puncturing factor",
- .chandef = {
- .chan = &chan_5ghz,
- .width = NL80211_CHAN_WIDTH_20,
- },
- .signal = -66,
- .channel_util = 51,
- .grade = 1836,
- },
- {
- .desc = "LB, BSS Load IE (20 percent), active link, chan_load_by_us=10 percent. No puncturing factor",
- .chandef = {
- .chan = &chan_2ghz,
- .width = NL80211_CHAN_WIDTH_20,
- },
- .signal = -61,
- .channel_util = 51,
- .chan_load_by_us = 10,
- .grade = 2061,
- },
- {
- .desc = "UHB, BSS Load IE (40 percent), active link, chan_load_by_us=50 (invalid) percent. No puncturing factor",
- .chandef = {
- .chan = &chan_6ghz,
- .width = NL80211_CHAN_WIDTH_20,
- },
- .signal = -66,
- .channel_util = 102,
- .chan_load_by_us = 50,
- .grade = 1552,
- },
- { .desc = "HB, 80 MHz, no channel load factor, punctured percentage 0",
- .chandef = {
- .chan = &chan_5ghz,
- .width = NL80211_CHAN_WIDTH_80,
- .punctured = 0x0000
- },
- .signal = -72,
- .grade = 1750,
- },
- { .desc = "HB, 160 MHz, no channel load factor, punctured percentage 25",
- .chandef = {
- .chan = &chan_5ghz,
- .width = NL80211_CHAN_WIDTH_160,
- .punctured = 0x3
- },
- .signal = -72,
- .grade = 1312,
- },
- { .desc = "UHB, 320 MHz, no channel load factor, punctured percentage 12.5 (2/16)",
- .chandef = {
- .chan = &chan_6ghz,
- .width = NL80211_CHAN_WIDTH_320,
- .punctured = 0x3
- },
- .signal = -72,
- .grade = 1806,
- },
- { .desc = "HB, 160 MHz, channel load 20, channel load by us 10, punctured percentage 25",
- .chandef = {
- .chan = &chan_5ghz,
- .width = NL80211_CHAN_WIDTH_160,
- .punctured = 0x3
- },
- .channel_util = 51,
- .chan_load_by_us = 10,
- .signal = -72,
- .grade = 1179,
- },
-};
-
-KUNIT_ARRAY_PARAM_DESC(link_grading, link_grading_cases, desc)
-
-static void setup_link_conf(struct kunit *test)
-{
- const struct link_grading_case *params = test->param_value;
- size_t vif_size = sizeof(struct ieee80211_vif) +
- sizeof(struct iwl_mvm_vif);
- struct ieee80211_vif *vif = kunit_kzalloc(test, vif_size, GFP_KERNEL);
- struct ieee80211_bss_load_elem *bss_load;
- struct element *element;
- size_t ies_size = sizeof(struct cfg80211_bss_ies) + sizeof(*bss_load) + sizeof(element);
- struct cfg80211_bss_ies *ies;
- struct iwl_mvm_vif *mvmvif;
-
- KUNIT_ASSERT_NOT_NULL(test, vif);
-
- mvmvif = iwl_mvm_vif_from_mac80211(vif);
- if (params->chan_load_by_us > 0) {
- ctx.channel_load_by_us = params->chan_load_by_us;
- mvmvif->link[0] = &mvm_link;
- }
-
- link_conf.vif = vif;
- link_conf.chanreq.oper = params->chandef;
- bss.signal = DBM_TO_MBM(params->signal);
-
- ies = kunit_kzalloc(test, ies_size, GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, ies);
- ies->len = sizeof(*bss_load) + sizeof(struct element);
-
- element = (void *)ies->data;
- element->datalen = sizeof(*bss_load);
- element->id = 11;
-
- bss_load = (void *)element->data;
- bss_load->channel_util = params->channel_util;
-
- rcu_assign_pointer(bss.ies, ies);
- rcu_assign_pointer(bss.beacon_ies, ies);
-}
-
-static void test_link_grading(struct kunit *test)
-{
- const struct link_grading_case *params = test->param_value;
- unsigned int ret;
-
- setup_link_conf(test);
-
- rcu_read_lock();
- ret = iwl_mvm_get_link_grade(&link_conf);
- rcu_read_unlock();
-
- KUNIT_EXPECT_EQ(test, ret, params->grade);
-
- kunit_kfree(test, link_conf.vif);
- RCU_INIT_POINTER(bss.ies, NULL);
-}
-
-static struct kunit_case link_grading_test_cases[] = {
- KUNIT_CASE_PARAM(test_link_grading, link_grading_gen_params),
- {}
-};
-
-static struct kunit_suite link_grading = {
- .name = "iwlmvm-link-grading",
- .test_cases = link_grading_test_cases,
-};
-
-kunit_test_suite(link_grading);
-
-static const struct valid_link_pair_case {
- const char *desc;
- bool bt;
- struct ieee80211_channel *chan_a;
- struct ieee80211_channel *chan_b;
- enum nl80211_chan_width cw_a;
- enum nl80211_chan_width cw_b;
- s32 sig_a;
- s32 sig_b;
- bool csa_a;
- bool valid;
-} valid_link_pair_cases[] = {
- {
- .desc = "HB + UHB, valid.",
- .chan_a = &chan_6ghz,
- .chan_b = &chan_5ghz,
- .valid = true,
- },
- {
- .desc = "LB + HB, no BT.",
- .chan_a = &chan_2ghz,
- .chan_b = &chan_5ghz,
- .valid = true,
- },
- {
- .desc = "LB + HB, with BT.",
- .bt = true,
- .chan_a = &chan_2ghz,
- .chan_b = &chan_5ghz,
- .valid = false,
- },
- {
- .desc = "Same band",
- .chan_a = &chan_2ghz,
- .chan_b = &chan_2ghz,
- .valid = false,
- },
- {
- .desc = "RSSI: LB, 20 MHz, low",
- .chan_a = &chan_2ghz,
- .cw_a = NL80211_CHAN_WIDTH_20,
- .sig_a = -68,
- .chan_b = &chan_5ghz,
- .valid = false,
- },
- {
- .desc = "RSSI: UHB, 20 MHz, high",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_20,
- .sig_a = -66,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_20,
- .valid = true,
- },
- {
- .desc = "RSSI: UHB, 40 MHz, low",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_40,
- .sig_a = -65,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_40,
- .valid = false,
- },
- {
- .desc = "RSSI: UHB, 40 MHz, high",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_40,
- .sig_a = -63,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_40,
- .valid = true,
- },
- {
- .desc = "RSSI: UHB, 80 MHz, low",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_80,
- .sig_a = -62,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_80,
- .valid = false,
- },
- {
- .desc = "RSSI: UHB, 80 MHz, high",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_80,
- .sig_a = -60,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_80,
- .valid = true,
- },
- {
- .desc = "RSSI: UHB, 160 MHz, low",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_160,
- .sig_a = -59,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_160,
- .valid = false,
- },
- {
- .desc = "RSSI: HB, 160 MHz, high",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_160,
- .sig_a = -5,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_160,
- .valid = true,
- },
- {
- .desc = "CSA active",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_160,
- .sig_a = -5,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_160,
- .valid = false,
- /* same as previous entry with valid=true except for CSA */
- .csa_a = true,
- },
-};
-
-KUNIT_ARRAY_PARAM_DESC(valid_link_pair, valid_link_pair_cases, desc)
-
-static void test_valid_link_pair(struct kunit *test)
-{
- const struct valid_link_pair_case *params = test->param_value;
- size_t vif_size = sizeof(struct ieee80211_vif) +
- sizeof(struct iwl_mvm_vif);
- struct ieee80211_vif *vif = kunit_kzalloc(test, vif_size, GFP_KERNEL);
- struct iwl_trans *trans = kunit_kzalloc(test, sizeof(struct iwl_trans),
- GFP_KERNEL);
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm_link_sel_data link_a = {
- .chandef = &chandef_a,
- .link_id = 1,
- .signal = params->sig_a,
- };
- struct iwl_mvm_link_sel_data link_b = {
- .chandef = &chandef_b,
- .link_id = 5,
- .signal = params->sig_b,
- };
- struct ieee80211_bss_conf *conf;
- bool result;
-
- KUNIT_ASSERT_NOT_NULL(test, vif);
- KUNIT_ASSERT_NOT_NULL(test, trans);
-
- chandef_a.chan = params->chan_a;
- chandef_b.chan = params->chan_b;
-
- chandef_a.width = params->cw_a ?: NL80211_CHAN_WIDTH_20;
- chandef_b.width = params->cw_b ?: NL80211_CHAN_WIDTH_20;
-
- mvm.trans = trans;
-
- mvm.last_bt_notif.wifi_loss_low_rssi = params->bt;
- mvmvif->mvm = &mvm;
-
- conf = kunit_kzalloc(test, sizeof(*vif->link_conf[0]), GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, conf);
- conf->chanreq.oper = chandef_a;
- conf->csa_active = params->csa_a;
- vif->link_conf[link_a.link_id] = (void __rcu *)conf;
-
- conf = kunit_kzalloc(test, sizeof(*vif->link_conf[0]), GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, conf);
- conf->chanreq.oper = chandef_b;
- vif->link_conf[link_b.link_id] = (void __rcu *)conf;
-
- wiphy_lock(&wiphy);
- result = iwl_mvm_mld_valid_link_pair(vif, &link_a, &link_b);
- wiphy_unlock(&wiphy);
-
- KUNIT_EXPECT_EQ(test, result, params->valid);
-
- kunit_kfree(test, vif);
- kunit_kfree(test, trans);
-}
-
-static struct kunit_case valid_link_pair_test_cases[] = {
- KUNIT_CASE_PARAM(test_valid_link_pair, valid_link_pair_gen_params),
- {},
-};
-
-static struct kunit_suite valid_link_pair = {
- .name = "iwlmvm-valid-link-pair",
- .test_cases = valid_link_pair_test_cases,
-};
-
-kunit_test_suite(valid_link_pair);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index aa653782d6d7..0c9c2492d8a7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -47,7 +47,6 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
{
- struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
struct ieee80211_vif *vif = mvm->p2p_device_vif;
lockdep_assert_held(&mvm->mutex);
@@ -125,8 +124,6 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
iwl_mvm_rm_aux_sta(mvm);
}
- if (!IS_ERR_OR_NULL(bss_vif))
- iwl_mvm_unblock_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_ROC);
mutex_unlock(&mvm->mutex);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 25d1a882a6a0..bb97837baeda 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1769,9 +1769,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
IWL_DEBUG_TX_REPLY(mvm,
"Next reclaimed packet:%d\n",
next_reclaimed);
- if (tid < IWL_MAX_TID_COUNT)
- iwl_mvm_count_mpdu(mvmsta, sta_id, 1,
- true, 0);
} else {
IWL_DEBUG_TX_REPLY(mvm,
"NDP - don't update next_reclaimed\n");
@@ -2150,13 +2147,10 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
ba_res->tx_rate, false);
}
- if (mvmsta) {
+ if (mvmsta)
iwl_mvm_tx_airtime(mvm, mvmsta,
le32_to_cpu(ba_res->wireless_time));
- iwl_mvm_count_mpdu(mvmsta, sta_id,
- le16_to_cpu(ba_res->txed), true, 0);
- }
rcu_read_unlock();
return;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 62da0132f383..22602c32faa5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -22,11 +22,6 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
{
int ret;
-#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
- if (WARN_ON(mvm->d3_test_active))
- return -EIO;
-#endif
-
/*
* Synchronous commands from this op-mode must hold
* the mutex, this ensures we don't try to send two
@@ -79,11 +74,6 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
lockdep_assert_held(&mvm->mutex);
-#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
- if (WARN_ON(mvm->d3_test_active))
- return -EIO;
-#endif
-
/*
* Only synchronous commands can wait for status,
* we use WANT_SKB so the caller can't.
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index f9e2095d6490..b21a4d8eb105 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -124,13 +124,13 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_mac_cfg)},/* low 5GHz active */
{IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_mac_cfg)},/* high 5GHz active */
-/* 6x30 Series */
- {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1000_mac_cfg)},
- {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1000_mac_cfg)},
- {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1000_mac_cfg)},
- {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1000_mac_cfg)},
- {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1000_mac_cfg)},
- {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1000_mac_cfg)},
+/* 1030/6x30 Series */
+ {IWL_PCI_DEVICE(0x008A, 0x5305, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5307, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5325, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5327, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x008B, 0x5315, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x008B, 0x5317, iwl6030_mac_cfg)},
{IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_mac_cfg)},
{IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_mac_cfg)},
{IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_mac_cfg)},
@@ -181,12 +181,12 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x08AE, 0x1027, iwl1000_mac_cfg)},
/* 130 Series WiFi */
- {IWL_PCI_DEVICE(0x0896, 0x5005, iwl1000_mac_cfg)},
- {IWL_PCI_DEVICE(0x0896, 0x5007, iwl1000_mac_cfg)},
- {IWL_PCI_DEVICE(0x0897, 0x5015, iwl1000_mac_cfg)},
- {IWL_PCI_DEVICE(0x0897, 0x5017, iwl1000_mac_cfg)},
- {IWL_PCI_DEVICE(0x0896, 0x5025, iwl1000_mac_cfg)},
- {IWL_PCI_DEVICE(0x0896, 0x5027, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5005, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5007, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0897, 0x5015, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0897, 0x5017, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5025, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5027, iwl6030_mac_cfg)},
/* 2x00 Series */
{IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_mac_cfg)},
@@ -1179,16 +1179,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static void iwl_pci_remove(struct pci_dev *pdev)
{
struct iwl_trans *trans = pci_get_drvdata(pdev);
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (!trans)
return;
- cancel_delayed_work_sync(&trans_pcie->me_recheck_wk);
-
- iwl_drv_stop(trans->drv);
-
- iwl_trans_pcie_free(trans);
+ iwl_pcie_gen1_2_remove(trans);
}
#ifdef CONFIG_PM_SLEEP
@@ -1260,7 +1255,7 @@ static int _iwl_pci_resume(struct device *device, bool restore)
* won't really know how to recover.
*/
iwl_pcie_prepare_card_hw(trans);
- iwl_finish_nic_init(trans);
+ iwl_trans_activate_nic(trans);
iwl_op_mode_device_powered_off(trans->op_mode);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
index f48aeebb151c..207c56e338dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
@@ -400,6 +400,11 @@ struct iwl_pcie_txqs {
* @me_recheck_wk: worker to recheck WiAMT/CSME presence
* @invalid_tx_cmd: invalid TX command buffer
* @wait_command_queue: wait queue for sync commands
+ * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
+ * The user should use iwl_trans_{alloc,free}_tx_cmd.
+ * @dev_cmd_pool_name: name for the TX command allocation pool
+ * @pm_support: set to true in start_hw if link pm is supported
+ * @ltr_enabled: set to true if the LTR is enabled
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
@@ -506,6 +511,12 @@ struct iwl_trans_pcie {
struct iwl_dma_ptr invalid_tx_cmd;
wait_queue_head_t wait_command_queue;
+
+ struct kmem_cache *dev_cmd_pool;
+ char dev_cmd_pool_name[50];
+
+ bool pm_support;
+ bool ltr_enabled;
};
static inline struct iwl_trans_pcie *
@@ -783,6 +794,23 @@ static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
return le16_to_cpu(tb->hi_n_len) >> 4;
}
+static inline struct iwl_device_tx_cmd *
+iwl_pcie_gen1_2_alloc_tx_cmd(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ return kmem_cache_zalloc(trans_pcie->dev_cmd_pool, GFP_ATOMIC);
+}
+
+static inline void
+iwl_pcie_gen1_2_free_tx_cmd(struct iwl_trans *trans,
+ struct iwl_device_tx_cmd *dev_cmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ kmem_cache_free(trans_pcie->dev_cmd_pool, dev_cmd);
+}
+
void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs, bool is_flush);
void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
@@ -818,6 +846,8 @@ static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
trans_pcie->fh_init_mask);
iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
trans_pcie->hw_init_mask);
+ trans_pcie->fh_mask = 0;
+ trans_pcie->hw_mask = 0;
}
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}
@@ -1000,6 +1030,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
} else {
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
trans_pcie->fh_init_mask);
+ trans_pcie->fh_mask = 0;
iwl_enable_hw_int_msk_msix(trans,
MSIX_HW_INT_CAUSES_REG_RF_KILL);
}
@@ -1047,7 +1078,7 @@ static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
void iwl_pcie_rx_allocator_work(struct work_struct *data);
/* common trans ops for all generations transports */
-void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans);
+void iwl_pcie_gen1_2_op_mode_enter(struct iwl_trans *trans);
int _iwl_trans_pcie_start_hw(struct iwl_trans *trans);
int iwl_trans_pcie_start_hw(struct iwl_trans *trans);
void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans);
@@ -1064,9 +1095,8 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
const struct iwl_dump_sanitize_ops *sanitize_ops,
void *sanitize_ctx);
int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
- enum iwl_d3_status *status,
- bool test, bool reset);
-int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset);
+ bool reset);
+int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool reset);
void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable);
void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
@@ -1081,6 +1111,7 @@ int iwl_pci_gen1_2_probe(struct pci_dev *pdev,
const struct pci_device_id *ent,
const struct iwl_mac_cfg *mac_cfg,
u8 __iomem *hw_base, u32 hw_rev);
+void iwl_pcie_gen1_2_remove(struct iwl_trans *trans);
/* transport gen 1 exported functions */
void iwl_trans_pcie_fw_alive(struct iwl_trans *trans);
@@ -1105,6 +1136,7 @@ int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
struct iwl_dma_ptr *ptr, size_t size);
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
void iwl_pcie_apply_destination(struct iwl_trans *trans);
+int iwl_pcie_gen1_2_activate_nic(struct iwl_trans *trans);
/* transport gen 2 exported functions */
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
@@ -1124,4 +1156,17 @@ int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
struct iwl_trans_rxq_dma_data *data);
+static inline bool iwl_pcie_gen1_is_pm_supported(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ return trans_pcie->pm_support;
+}
+
+static inline bool iwl_pcie_gen1_2_is_ltr_enabled(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ return trans_pcie->ltr_enabled;
+}
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c
index 1951be3a30b7..b15c5d486527 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c
@@ -47,7 +47,7 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
iwl_pcie_apm_config(trans);
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_trans_activate_nic(trans);
if (ret)
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
index 327366bf87de..59307b5df441 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
@@ -25,13 +25,52 @@
#include "fw/dbg.h"
#include "fw/api/tx.h"
#include "fw/acpi.h"
-#include "fw/api/tx.h"
#include "mei/iwl-mei.h"
#include "internal.h"
#include "iwl-fh.h"
#include "pcie/iwl-context-info-v2.h"
#include "pcie/utils.h"
+#define IWL_HOST_MON_BLOCK_PEMON 0x00
+#define IWL_HOST_MON_BLOCK_HIPM 0x22
+
+#define IWL_HOST_MON_BLOCK_PEMON_VEC0 0x00
+#define IWL_HOST_MON_BLOCK_PEMON_VEC1 0x01
+#define IWL_HOST_MON_BLOCK_PEMON_WFPM 0x06
+
+static void iwl_dump_host_monitor_block(struct iwl_trans *trans,
+ u32 block, u32 vec, u32 iter)
+{
+ int i;
+
+ IWL_ERR(trans, "Host monitor block 0x%x vector 0x%x\n", block, vec);
+ iwl_write32(trans, CSR_MONITOR_CFG_REG, (block << 8) | vec);
+ for (i = 0; i < iter; i++)
+ IWL_ERR(trans, " value [iter %d]: 0x%08x\n",
+ i, iwl_read32(trans, CSR_MONITOR_STATUS_REG));
+}
+
+static void iwl_pcie_dump_host_monitor(struct iwl_trans *trans)
+{
+ switch (trans->mac_cfg->device_family) {
+ case IWL_DEVICE_FAMILY_22000:
+ case IWL_DEVICE_FAMILY_AX210:
+ IWL_ERR(trans, "CSR_RESET = 0x%x\n",
+ iwl_read32(trans, CSR_RESET));
+ iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
+ IWL_HOST_MON_BLOCK_PEMON_VEC0, 15);
+ iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
+ IWL_HOST_MON_BLOCK_PEMON_VEC1, 15);
+ iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
+ IWL_HOST_MON_BLOCK_PEMON_WFPM, 15);
+ iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_HIPM,
+ IWL_HOST_MON_BLOCK_PEMON_VEC0, 1);
+ break;
+ default:
+ return;
+ }
+}
+
/* extended range in FW SRAM */
#define IWL_FW_MEM_EXTENDED_START 0x40000
#define IWL_FW_MEM_EXTENDED_END 0x57FFF
@@ -175,13 +214,13 @@ void iwl_pcie_apm_config(struct iwl_trans *trans)
iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
- trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
+ trans_pcie->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
- trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
+ trans_pcie->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
(lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
- trans->ltr_enabled ? "En" : "Dis");
+ trans_pcie->ltr_enabled ? "En" : "Dis");
}
/*
@@ -228,7 +267,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
if (trans->mac_cfg->base->pll_cfg)
iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_trans_activate_nic(trans);
if (ret)
return ret;
@@ -301,7 +340,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
ret = iwl_trans_pcie_sw_reset(trans, true);
if (!ret)
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_trans_activate_nic(trans);
if (WARN_ON(ret)) {
/* Release XTAL ON request */
@@ -1397,17 +1436,10 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
}
static void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
- bool test, bool reset)
+ bool reset)
{
iwl_disable_interrupts(trans);
- /*
- * in testing mode, the host stays awake and the
- * hardware won't be reset (not even partially)
- */
- if (test)
- return;
-
iwl_pcie_disable_ict(trans);
iwl_pcie_synchronize_irqs(trans);
@@ -1478,7 +1510,7 @@ static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
return ret;
}
-int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
+int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool reset)
{
int ret;
@@ -1491,26 +1523,18 @@ int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
if (ret)
return ret;
- iwl_pcie_d3_complete_suspend(trans, test, reset);
+ iwl_pcie_d3_complete_suspend(trans, reset);
return 0;
}
int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
- enum iwl_d3_status *status,
- bool test, bool reset)
+ bool reset)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
int ret;
- if (test) {
- iwl_enable_interrupts(trans);
- *status = IWL_D3_STATUS_ALIVE;
- ret = 0;
- goto out;
- }
-
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
@@ -1518,9 +1542,12 @@ int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- ret = iwl_finish_nic_init(trans);
- if (ret)
+ ret = iwl_trans_activate_nic(trans);
+ if (ret) {
+ IWL_ERR(trans, "Failed to init nic upon resume. err = %d\n",
+ ret);
return ret;
+ }
/*
* Reconfigure IVAR table in case of MSIX or reset ict table in
@@ -1554,18 +1581,13 @@ int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_read_umac_prph(trans, WFPM_GP2));
val = iwl_read32(trans, CSR_RESET);
- if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
- *status = IWL_D3_STATUS_RESET;
- else
- *status = IWL_D3_STATUS_ALIVE;
-
-out:
- if (*status == IWL_D3_STATUS_ALIVE)
- ret = iwl_pcie_d3_handshake(trans, false);
- else
+ if (val & CSR_RESET_REG_FLAG_NEVO_RESET) {
+ IWL_INFO(trans, "Device was reset during suspend\n");
trans->state = IWL_TRANS_NO_FW;
+ return -ENOENT;
+ }
- return ret;
+ return iwl_pcie_d3_handshake(trans, false);
}
static void
@@ -1744,7 +1766,7 @@ static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
{
int ret;
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_trans_activate_nic(trans);
if (ret < 0)
return ret;
@@ -1882,7 +1904,7 @@ void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}
-void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans)
+void iwl_pcie_gen1_2_op_mode_enter(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2000,6 +2022,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
free_percpu(trans_pcie->txqs.tso_hdr_page);
}
+ kmem_cache_destroy(trans_pcie->dev_cmd_pool);
iwl_trans_free(trans);
}
@@ -3516,7 +3539,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
struct iwl_trans_dump_data *dump_data;
u32 len, num_rbs = 0, monitor_len = 0;
int i, ptr;
- bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
+ bool dump_rbs = iwl_trans_is_fw_error(trans) &&
!trans->mac_cfg->mq_rx_supported &&
dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
@@ -3684,28 +3707,40 @@ void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);
}
-static int iwl_trans_pcie_set_txcmd_info(const struct iwl_mac_cfg *mac_cfg,
- unsigned int *txcmd_size,
- unsigned int *txcmd_align)
+static int iwl_trans_pcie_alloc_txcmd_pool(struct iwl_trans *trans)
{
- if (!mac_cfg->gen2) {
- *txcmd_size = sizeof(struct iwl_tx_cmd_v6);
- *txcmd_align = sizeof(void *);
- } else if (mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
- *txcmd_size = sizeof(struct iwl_tx_cmd_v9);
- *txcmd_align = 64;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned int txcmd_size, txcmd_align;
+
+ if (!trans->mac_cfg->gen2) {
+ txcmd_size = sizeof(struct iwl_tx_cmd_v6);
+ txcmd_align = sizeof(void *);
+ } else if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
+ txcmd_size = sizeof(struct iwl_tx_cmd_v9);
+ txcmd_align = 64;
} else {
- *txcmd_size = sizeof(struct iwl_tx_cmd);
- *txcmd_align = 128;
+ txcmd_size = sizeof(struct iwl_tx_cmd);
+ txcmd_align = 128;
}
- *txcmd_size += sizeof(struct iwl_cmd_header);
- *txcmd_size += 36; /* biggest possible 802.11 header */
+ txcmd_size += sizeof(struct iwl_cmd_header);
+ txcmd_size += 36; /* biggest possible 802.11 header */
/* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
- if (WARN_ON((mac_cfg->gen2 && *txcmd_size >= *txcmd_align)))
+ if (WARN_ON((trans->mac_cfg->gen2 && txcmd_size >= txcmd_align)))
return -EINVAL;
+ snprintf(trans_pcie->dev_cmd_pool_name,
+ sizeof(trans_pcie->dev_cmd_pool_name),
+ "iwl_cmd_pool:%s", dev_name(trans->dev));
+
+ trans_pcie->dev_cmd_pool =
+ kmem_cache_create(trans_pcie->dev_cmd_pool_name,
+ txcmd_size, txcmd_align,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!trans_pcie->dev_cmd_pool)
+ return -ENOMEM;
+
return 0;
}
@@ -3715,18 +3750,12 @@ iwl_trans_pcie_alloc(struct pci_dev *pdev,
struct iwl_trans_info *info, u8 __iomem *hw_base)
{
struct iwl_trans_pcie *trans_pcie, **priv;
- unsigned int txcmd_size, txcmd_align;
struct iwl_trans *trans;
unsigned int bc_tbl_n_entries;
int ret, addr_size;
- ret = iwl_trans_pcie_set_txcmd_info(mac_cfg, &txcmd_size,
- &txcmd_align);
- if (ret)
- return ERR_PTR(ret);
-
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev,
- mac_cfg, txcmd_size, txcmd_align);
+ mac_cfg);
if (!trans)
return ERR_PTR(-ENOMEM);
@@ -3737,6 +3766,10 @@ iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* Initialize the wait queue for commands */
init_waitqueue_head(&trans_pcie->wait_command_queue);
+ ret = iwl_trans_pcie_alloc_txcmd_pool(trans);
+ if (ret)
+ goto out_free_trans;
+
if (trans->mac_cfg->gen2) {
trans_pcie->txqs.tfd.addr_size = 64;
trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
@@ -3756,7 +3789,7 @@ iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
if (!trans_pcie->txqs.tso_hdr_page) {
ret = -ENOMEM;
- goto out_free_trans;
+ goto out_free_txcmd_pool;
}
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
@@ -3906,6 +3939,8 @@ out_free_ndev:
free_netdev(trans_pcie->napi_dev);
out_free_tso:
free_percpu(trans_pcie->txqs.tso_hdr_page);
+out_free_txcmd_pool:
+ kmem_cache_destroy(trans_pcie->dev_cmd_pool);
out_free_trans:
iwl_trans_free(trans);
return ERR_PTR(ret);
@@ -3999,6 +4034,7 @@ static void get_crf_id(struct iwl_trans *iwl_trans,
/* Read cdb info (also contains the jacket info if needed in the future */
hw_wfpm_id = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR);
+
IWL_INFO(iwl_trans, "Detected crf-id 0x%x, cnv-id 0x%x wfpm id 0x%x\n",
info->hw_crf_id, info->hw_cnv_id, hw_wfpm_id);
}
@@ -4014,10 +4050,8 @@ static int map_crf_id(struct iwl_trans *iwl_trans,
u32 val = info->hw_crf_id;
u32 step_id = REG_CRF_ID_STEP(val);
u32 slave_id = REG_CRF_ID_SLAVE(val);
- u32 jacket_id_cnv = REG_CRF_ID_SLAVE(info->hw_cnv_id);
u32 hw_wfpm_id = iwl_read_umac_prph_no_grab(iwl_trans,
WFPM_OTP_CFG1_ADDR);
- u32 jacket_id_wfpm = WFPM_OTP_CFG1_IS_JACKET(hw_wfpm_id);
u32 cdb_id_wfpm = WFPM_OTP_CFG1_IS_CDB(hw_wfpm_id);
/* Map between crf id to rf id */
@@ -4066,21 +4100,12 @@ static int map_crf_id(struct iwl_trans *iwl_trans,
IWL_INFO(iwl_trans, "Adding cdb to rf id\n");
}
- /* Set Jacket capabilities */
- if (jacket_id_wfpm || jacket_id_cnv) {
- info->hw_rf_id += BIT(29);
- IWL_INFO(iwl_trans, "Adding jacket to rf id\n");
- }
-
IWL_INFO(iwl_trans,
"Detected rf-type 0x%x step-id 0x%x slave-id 0x%x from crf id 0x%x\n",
REG_CRF_ID_TYPE(val), step_id, slave_id, info->hw_rf_id);
IWL_INFO(iwl_trans,
- "Detected cdb-id 0x%x jacket-id 0x%x from wfpm id 0x%x\n",
- cdb_id_wfpm, jacket_id_wfpm, hw_wfpm_id);
- IWL_INFO(iwl_trans, "Detected jacket-id 0x%x from cnvi id 0x%x\n",
- jacket_id_cnv, info->hw_cnv_id);
-
+ "Detected cdb-id 0x%x from wfpm id 0x%x\n",
+ cdb_id_wfpm, hw_wfpm_id);
out:
return ret;
}
@@ -4163,7 +4188,7 @@ int iwl_pci_gen1_2_probe(struct pci_dev *pdev,
*/
ret = iwl_pcie_prepare_card_hw(iwl_trans);
if (!ret) {
- ret = iwl_finish_nic_init(iwl_trans);
+ ret = iwl_trans_activate_nic(iwl_trans);
if (ret)
goto out_free_trans;
if (iwl_trans_grab_nic_access(iwl_trans)) {
@@ -4271,3 +4296,63 @@ out_free_trans:
iwl_trans_pcie_free(iwl_trans);
return ret;
}
+
+void iwl_pcie_gen1_2_remove(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ cancel_delayed_work_sync(&trans_pcie->me_recheck_wk);
+
+ iwl_drv_stop(trans->drv);
+
+ iwl_trans_pcie_free(trans);
+}
+
+int iwl_pcie_gen1_2_activate_nic(struct iwl_trans *trans)
+{
+ const struct iwl_mac_cfg *mac_cfg = trans->mac_cfg;
+ u32 poll_ready;
+ int err;
+
+ if (mac_cfg->bisr_workaround) {
+ /* ensure the TOP FSM isn't still in previous reset */
+ mdelay(2);
+ }
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ if (mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ |
+ CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
+ poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
+ } else {
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY;
+ }
+
+ if (mac_cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ udelay(2);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is supported, e.g. iwl_write_prph()
+ * and accesses to uCode SRAM.
+ */
+ err = iwl_poll_bits(trans, CSR_GP_CNTRL, poll_ready, 25000);
+ if (err < 0) {
+ IWL_DEBUG_INFO(trans, "Failed to wake NIC\n");
+
+ iwl_pcie_dump_host_monitor(trans);
+ }
+
+ if (mac_cfg->bisr_workaround) {
+ /* ensure BISR shift has finished */
+ udelay(200);
+ }
+
+ return err;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
index d912e709a92c..2201c148c9dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
@@ -2602,8 +2602,9 @@ static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
}
if (test_bit(STATUS_FW_ERROR, &trans->status)) {
- if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
- &trans->status)) {
+ if (trans->suppress_cmd_error_once) {
+ trans->suppress_cmd_error_once = false;
+ } else {
IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
dump_stack();
}
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/tests/Makefile
index 1b49241c578f..b996c45d43e7 100644
--- a/drivers/net/wireless/intel/iwlwifi/tests/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/tests/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-iwlwifi-tests-y += module.o devinfo.o utils.o
+iwlwifi-tests-y += module.o devinfo.o utils.o nvm_parse.o
ccflags-y += -I$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/nvm_parse.c b/drivers/net/wireless/intel/iwlwifi/tests/nvm_parse.c
new file mode 100644
index 000000000000..853911900bfd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/tests/nvm_parse.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for NVM parse
+ *
+ * Copyright (C) 2025 Intel Corporation
+ */
+#include <kunit/static_stub.h>
+#include <kunit/test.h>
+#include <iwl-nvm-parse.h>
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+static const struct nvm_flag_case {
+ const char *desc;
+ u16 nvm_flags;
+ u32 reg_rule_flags;
+ u32 set_reg_rule_flags;
+ u32 clear_reg_rule_flags;
+} nvm_flag_cases[] = {
+ {
+ .desc = "Restricting VLP client and AP access",
+ .nvm_flags = 0,
+ .set_reg_rule_flags = NL80211_RRF_NO_6GHZ_VLP_CLIENT,
+ .clear_reg_rule_flags = NL80211_RRF_ALLOW_6GHZ_VLP_AP,
+ },
+ {
+ .desc = "Allow VLP client and AP access",
+ .nvm_flags = NVM_CHANNEL_VLP,
+ .set_reg_rule_flags = NL80211_RRF_ALLOW_6GHZ_VLP_AP,
+ .clear_reg_rule_flags = NL80211_RRF_NO_6GHZ_VLP_CLIENT,
+ },
+ {
+ .desc = "Allow VLP client access, while restricting AP access",
+ .nvm_flags = NVM_CHANNEL_VLP | NVM_CHANNEL_VLP_AP_NOT_ALLOWED,
+ .set_reg_rule_flags = 0,
+ .clear_reg_rule_flags = NL80211_RRF_ALLOW_6GHZ_VLP_AP |
+ NL80211_RRF_NO_6GHZ_VLP_CLIENT,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(nvm_flag, nvm_flag_cases, desc)
+
+static void test_nvm_flags(struct kunit *test)
+{
+ const struct nvm_flag_case *params = test->param_value;
+ struct iwl_reg_capa reg_capa = {};
+ u32 flags = 0;
+
+ flags = iwl_nvm_get_regdom_bw_flags(NULL, 0, params->nvm_flags,
+ reg_capa);
+
+ if ((params->set_reg_rule_flags & flags) != params->set_reg_rule_flags)
+ KUNIT_FAIL(test, "Expected set bits:0x%08x flags:0x%08x\n",
+ params->set_reg_rule_flags, flags);
+
+ if (params->clear_reg_rule_flags & flags)
+ KUNIT_FAIL(test, "Expected clear bits:0x%08x flags:0x%08x\n",
+ params->clear_reg_rule_flags, flags);
+}
+
+static struct kunit_case nvm_flags_test_cases[] = {
+ KUNIT_CASE_PARAM(test_nvm_flags,
+ nvm_flag_gen_params),
+ {},
+};
+
+static struct kunit_suite nvm_flags_suite = {
+ .name = "iwlwifi-nvm_flags",
+ .test_cases = nvm_flags_test_cases,
+};
+
+kunit_test_suite(nvm_flags_suite);
diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c
index 2deb1bb54f24..1294a1d6528e 100644
--- a/drivers/net/wireless/intersil/p54/txrx.c
+++ b/drivers/net/wireless/intersil/p54/txrx.c
@@ -317,7 +317,7 @@ static void p54_pspoll_workaround(struct p54_common *priv, struct sk_buff *skb)
tim_len = tim[1];
tim_ie = (struct ieee80211_tim_ie *) &tim[2];
- new_psm = ieee80211_check_tim(tim_ie, tim_len, priv->aid);
+ new_psm = ieee80211_check_tim(tim_ie, tim_len, priv->aid, false);
if (new_psm != priv->powersave_override) {
priv->powersave_override = new_psm;
p54_set_ps(priv);
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 4c8c7a5fdf23..be23a29e7de0 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -686,10 +686,9 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy,
return;
}
- /* Don't send world or same regdom info to firmware */
- if (strncmp(request->alpha2, "00", 2) &&
- strncmp(request->alpha2, adapter->country_code,
- sizeof(request->alpha2))) {
+ /* Don't send same regdom info to firmware */
+ if (strncmp(request->alpha2, adapter->country_code,
+ sizeof(request->alpha2)) != 0) {
memcpy(adapter->country_code, request->alpha2,
sizeof(request->alpha2));
mwifiex_send_domain_info_cmd_fw(wiphy);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 1ec069bc8ea1..ff177b06f42d 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -494,6 +494,11 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter)
return;
}
+ if (adapter->rgpower_data) {
+ release_firmware(adapter->rgpower_data);
+ adapter->rgpower_data = NULL;
+ }
+
mwifiex_unregister(adapter);
pr_debug("info: %s: free adapter\n", __func__);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 9ac36bef980e..27559e2ddc31 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -982,6 +982,7 @@ struct mwifiex_adapter {
u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
u16 max_mgmt_ie_index;
const struct firmware *cal_data;
+ const struct firmware *rgpower_data;
struct device_node *dt_node;
/* 11AC */
@@ -1579,6 +1580,8 @@ int mwifiex_11h_handle_event_chanswann(struct mwifiex_private *priv);
int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv,
struct device_node *node, const char *prefix);
void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv);
+int mwifiex_send_rgpower_table(struct mwifiex_private *priv, const u8 *data,
+ const size_t size);
extern const struct ethtool_ops mwifiex_ethtool_ops;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index c93281f5a47c..dcca71158fc6 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -1483,6 +1483,119 @@ int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv,
return 0;
}
+static int mwifiex_rgpower_table_advance_to_content(u8 **pos, const u8 *data,
+ const size_t size)
+{
+ while (*pos - data < size) {
+ /* skip spaces, tabs and empty lines */
+ if (**pos == '\r' || **pos == '\n' || **pos == '\0' ||
+ isspace(**pos)) {
+ (*pos)++;
+ continue;
+ }
+ /* skip line comments */
+ if (**pos == '#') {
+ *pos = strchr(*pos, '\n');
+ if (!*pos)
+ return -EINVAL;
+ (*pos)++;
+ continue;
+ }
+ return 0;
+ }
+ return 0;
+}
+
+int mwifiex_send_rgpower_table(struct mwifiex_private *priv, const u8 *data,
+ const size_t size)
+{
+ int ret = 0;
+ bool start_raw = false;
+ u8 *ptr, *token, *pos = NULL;
+ u8 *_data __free(kfree) = NULL;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct mwifiex_ds_misc_cmd *hostcmd __free(kfree) = NULL;
+
+ hostcmd = kzalloc(sizeof(*hostcmd), GFP_KERNEL);
+ if (!hostcmd)
+ return -ENOMEM;
+
+ _data = kmemdup(data, size, GFP_KERNEL);
+ if (!_data)
+ return -ENOMEM;
+
+ pos = _data;
+ ptr = hostcmd->cmd;
+ while ((pos - _data) < size) {
+ ret = mwifiex_rgpower_table_advance_to_content(&pos, _data, size);
+ if (ret) {
+ mwifiex_dbg(
+ adapter, ERROR,
+ "%s: failed to advance to content in rgpower table\n",
+ __func__);
+ return ret;
+ }
+
+ if (*pos == '}' && start_raw) {
+ hostcmd->len = get_unaligned_le16(&hostcmd->cmd[2]);
+ ret = mwifiex_send_cmd(priv, 0, 0, 0, hostcmd, false);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR,
+ "%s: failed to send hostcmd %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ memset(hostcmd->cmd, 0, MWIFIEX_SIZE_OF_CMD_BUFFER);
+ ptr = hostcmd->cmd;
+ start_raw = false;
+ pos++;
+ continue;
+ }
+
+ if (!start_raw) {
+ pos = strchr(pos, '=');
+ if (pos) {
+ pos = strchr(pos, '{');
+ if (pos) {
+ start_raw = true;
+ pos++;
+ continue;
+ }
+ }
+ mwifiex_dbg(adapter, ERROR,
+ "%s: syntax error in hostcmd\n", __func__);
+ return -EINVAL;
+ }
+
+ if (start_raw) {
+ while ((*pos != '\r' && *pos != '\n') &&
+ (token = strsep((char **)&pos, " "))) {
+ if (ptr - hostcmd->cmd >=
+ MWIFIEX_SIZE_OF_CMD_BUFFER) {
+ mwifiex_dbg(
+ adapter, ERROR,
+ "%s: hostcmd is larger than %d, aborting\n",
+ __func__, MWIFIEX_SIZE_OF_CMD_BUFFER);
+ return -ENOMEM;
+ }
+
+ ret = kstrtou8(token, 16, ptr);
+ if (ret < 0) {
+ mwifiex_dbg(
+ adapter, ERROR,
+ "%s: failed to parse hostcmd %d token: %s\n",
+ __func__, ret, token);
+ return ret;
+ }
+ ptr++;
+ }
+ }
+ }
+
+ return ret;
+}
+
/* This function prepares command of set_cfg_data. */
static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd, void *data_buf)
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index f79589cafe57..ef6722ffdc74 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -180,7 +180,7 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
return mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
}
-void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv)
+static void mwifiex_dnld_dt_txpwr_table(struct mwifiex_private *priv)
{
if (priv->adapter->dt_node) {
char txpwr[] = {"marvell,00_txpwrlimit"};
@@ -190,6 +190,62 @@ void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv)
}
}
+static int mwifiex_request_rgpower_table(struct mwifiex_private *priv)
+{
+ struct mwifiex_802_11d_domain_reg *domain_info = &priv->adapter->domain_reg;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ char rgpower_table_name[30];
+ char country_code[3];
+
+ strscpy(country_code, domain_info->country_code, sizeof(country_code));
+
+ /* World regulatory domain "00" has WW as country code */
+ if (strncmp(country_code, "00", 2) == 0)
+ strscpy(country_code, "WW", sizeof(country_code));
+
+ snprintf(rgpower_table_name, sizeof(rgpower_table_name),
+ "nxp/rgpower_%s.bin", country_code);
+
+ mwifiex_dbg(adapter, INFO, "info: %s: requesting regulatory power table %s\n",
+ __func__, rgpower_table_name);
+
+ if (adapter->rgpower_data) {
+ release_firmware(adapter->rgpower_data);
+ adapter->rgpower_data = NULL;
+ }
+
+ if ((request_firmware(&adapter->rgpower_data, rgpower_table_name,
+ adapter->dev))) {
+ mwifiex_dbg(
+ adapter, INFO,
+ "info: %s: failed to request regulatory power table\n",
+ __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mwifiex_dnld_rgpower_table(struct mwifiex_private *priv)
+{
+ int ret;
+
+ ret = mwifiex_request_rgpower_table(priv);
+ if (ret)
+ return ret;
+
+ return mwifiex_send_rgpower_table(priv, priv->adapter->rgpower_data->data,
+ priv->adapter->rgpower_data->size);
+}
+
+void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv)
+{
+ if (mwifiex_dnld_rgpower_table(priv) == 0)
+ return;
+
+ mwifiex_dnld_dt_txpwr_table(priv);
+}
+
static int mwifiex_process_country_ie(struct mwifiex_private *priv,
struct cfg80211_bss *bss)
{
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index a395829ebadf..c39e7f313ea1 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -794,12 +794,6 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
-static int change_bss(struct wiphy *wiphy, struct net_device *dev,
- struct bss_parameters *params)
-{
- return 0;
-}
-
static int set_wiphy_params(struct wiphy *wiphy, int radio_idx, u32 changed)
{
int ret = -EINVAL;
@@ -1709,7 +1703,6 @@ static const struct cfg80211_ops wilc_cfg80211_ops = {
.change_station = change_station,
.get_station = get_station,
.dump_station = dump_station,
- .change_bss = change_bss,
.set_wiphy_params = set_wiphy_params,
.external_auth = external_auth,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 7db29e90eb4f..f8a6f9c968a1 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -679,7 +679,7 @@ static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev,
/* Check whenever the PHY can be turned off again. */
/* 1. What about buffered unicast traffic for our AID? */
- cam = ieee80211_check_tim(tim_ie, tim_len, rt2x00dev->aid);
+ cam = ieee80211_check_tim(tim_ie, tim_len, rt2x00dev->aid, false);
/* 2. Maybe the AP wants to send multicast/broadcast data? */
cam |= (tim_ie->bitmap_ctrl & 0x01);
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 6241e4fed4f6..bcab12c3b4c1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -519,7 +519,7 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
/* 1. What about buffered unicast traffic for our AID? */
u_buffed = ieee80211_check_tim(tim_ie, tim_len,
- rtlpriv->mac80211.assoc_id);
+ rtlpriv->mac80211.assoc_id, false);
/* 2. Maybe the AP wants to send multicast/broadcast data? */
m_buffed = tim_ie->bitmap_ctrl & 0x01;
diff --git a/drivers/net/wireless/virtual/virt_wifi.c b/drivers/net/wireless/virtual/virt_wifi.c
index 1fffeff2190c..4eae89376feb 100644
--- a/drivers/net/wireless/virtual/virt_wifi.c
+++ b/drivers/net/wireless/virtual/virt_wifi.c
@@ -277,7 +277,9 @@ static void virt_wifi_connect_complete(struct work_struct *work)
priv->is_connected = true;
/* Schedules an event that acquires the rtnl lock. */
- cfg80211_connect_result(priv->upperdev, requested_bss, NULL, 0, NULL, 0,
+ cfg80211_connect_result(priv->upperdev,
+ priv->is_connected ? fake_router_bssid : NULL,
+ NULL, 0, NULL, 0,
status, GFP_KERNEL);
netif_carrier_on(priv->upperdev);
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 812c1565114f..6b7493934535 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -903,6 +903,15 @@ static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
u32 upper, lower;
u64 ref48;
+ /* only type1 and type 2 PI formats have a reftag */
+ switch (ns->head->pi_type) {
+ case NVME_NS_DPS_PI_TYPE1:
+ case NVME_NS_DPS_PI_TYPE2:
+ break;
+ default:
+ return;
+ }
+
/* both rw and write zeroes share the same reftag format */
switch (ns->head->guard_type) {
case NVME_NVM_NS_16B_GUARD:
@@ -942,13 +951,7 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
if (nvme_ns_has_pi(ns->head)) {
cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT);
-
- switch (ns->head->pi_type) {
- case NVME_NS_DPS_PI_TYPE1:
- case NVME_NS_DPS_PI_TYPE2:
- nvme_set_ref_tag(ns, cmnd, req);
- break;
- }
+ nvme_set_ref_tag(ns, cmnd, req);
}
return BLK_STS_OK;
@@ -1039,6 +1042,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head)))
return BLK_STS_NOTSUPP;
control |= NVME_RW_PRINFO_PRACT;
+ nvme_set_ref_tag(ns, cmnd, req);
}
if (bio_integrity_flagged(req->bio, BIP_CHECK_GUARD))
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index dddb235dd020..660a95805524 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -250,7 +250,4 @@ config ELECTRA_CF
config PCCARD_NONSTATIC
bool
-config PCCARD_IODYN
- bool
-
endif # PCCARD
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index c9d51b150682..d16a0317ce43 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -12,7 +12,6 @@ obj-$(CONFIG_PCMCIA) += pcmcia.o
pcmcia_rsrc-y += rsrc_mgr.o
pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o
-pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o
obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index c75f55e1250a..adbc486af2ea 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -229,23 +229,6 @@ void pcmcia_unregister_socket(struct pcmcia_socket *socket)
EXPORT_SYMBOL(pcmcia_unregister_socket);
-struct pcmcia_socket *pcmcia_get_socket_by_nr(unsigned int nr)
-{
- struct pcmcia_socket *s;
-
- down_read(&pcmcia_socket_list_rwsem);
- list_for_each_entry(s, &pcmcia_socket_list, socket_list)
- if (s->sock == nr) {
- up_read(&pcmcia_socket_list_rwsem);
- return s;
- }
- up_read(&pcmcia_socket_list_rwsem);
-
- return NULL;
-
-}
-EXPORT_SYMBOL(pcmcia_get_socket_by_nr);
-
static int socket_reset(struct pcmcia_socket *skt)
{
int status, i;
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index 02a83ca44e77..5ac810ffda31 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -116,7 +116,6 @@ extern struct list_head pcmcia_socket_list;
extern const struct class pcmcia_socket_class;
int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c);
-struct pcmcia_socket *pcmcia_get_socket_by_nr(unsigned int nr);
void pcmcia_parse_uevents(struct pcmcia_socket *socket, unsigned int events);
#define PCMCIA_UEVENT_EJECT 0x0001
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index da6f66f357cc..18f4eef28dbc 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -1308,7 +1308,7 @@ static int pcmcia_bus_early_resume(struct pcmcia_socket *skt)
* physically present, even if the call to this function returns
* non-NULL. Furthermore, the device driver most likely is unbound
* almost immediately, so the timeframe where pcmcia_dev_present
- * returns NULL is probably really really small.
+ * returns NULL is probably really, really small.
*/
struct pcmcia_device *pcmcia_dev_present(struct pcmcia_device *_p_dev)
{
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 1b1dff56ec7b..d6f24c7d1562 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -215,6 +215,8 @@ static int __init omap_cf_probe(struct platform_device *pdev)
return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
cf = kzalloc(sizeof *cf, GFP_KERNEL);
if (!cf)
@@ -302,7 +304,13 @@ static void __exit omap_cf_remove(struct platform_device *pdev)
kfree(cf);
}
-static struct platform_driver omap_cf_driver = {
+/*
+ * omap_cf_remove() lives in .exit.text. For drivers registered via
+ * platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver omap_cf_driver __refdata = {
.driver = {
.name = driver_name,
},
diff --git a/drivers/pcmcia/rsrc_iodyn.c b/drivers/pcmcia/rsrc_iodyn.c
deleted file mode 100644
index b04b16496b0c..000000000000
--- a/drivers/pcmcia/rsrc_iodyn.c
+++ /dev/null
@@ -1,168 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * rsrc_iodyn.c -- Resource management routines for MEM-static sockets.
- *
- * The initial developer of the original code is David A. Hinds
- * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
- *
- * (C) 1999 David A. Hinds
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-
-#include <pcmcia/ss.h>
-#include <pcmcia/cistpl.h>
-#include "cs_internal.h"
-
-
-struct pcmcia_align_data {
- unsigned long mask;
- unsigned long offset;
-};
-
-static resource_size_t pcmcia_align(void *align_data,
- const struct resource *res,
- resource_size_t size, resource_size_t align)
-{
- struct pcmcia_align_data *data = align_data;
- resource_size_t start;
-
- start = (res->start & ~data->mask) + data->offset;
- if (start < res->start)
- start += data->mask + 1;
-
-#ifdef CONFIG_X86
- if (res->flags & IORESOURCE_IO) {
- if (start & 0x300)
- start = (start + 0x3ff) & ~0x3ff;
- }
-#endif
-
-#ifdef CONFIG_M68K
- if (res->flags & IORESOURCE_IO) {
- if ((res->start + size - 1) >= 1024)
- start = res->end;
- }
-#endif
-
- return start;
-}
-
-
-static struct resource *__iodyn_find_io_region(struct pcmcia_socket *s,
- unsigned long base, int num,
- unsigned long align)
-{
- struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO,
- dev_name(&s->dev));
- struct pcmcia_align_data data;
- unsigned long min = base;
- int ret;
-
- data.mask = align - 1;
- data.offset = base & data.mask;
-
-#ifdef CONFIG_PCI
- if (s->cb_dev) {
- ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1,
- min, 0, pcmcia_align, &data);
- } else
-#endif
- ret = allocate_resource(&ioport_resource, res, num, min, ~0UL,
- 1, pcmcia_align, &data);
-
- if (ret != 0) {
- kfree(res);
- res = NULL;
- }
- return res;
-}
-
-static int iodyn_find_io(struct pcmcia_socket *s, unsigned int attr,
- unsigned int *base, unsigned int num,
- unsigned int align, struct resource **parent)
-{
- int i, ret = 0;
-
- /* Check for an already-allocated window that must conflict with
- * what was asked for. It is a hack because it does not catch all
- * potential conflicts, just the most obvious ones.
- */
- for (i = 0; i < MAX_IO_WIN; i++) {
- if (!s->io[i].res)
- continue;
-
- if (!*base)
- continue;
-
- if ((s->io[i].res->start & (align-1)) == *base)
- return -EBUSY;
- }
-
- for (i = 0; i < MAX_IO_WIN; i++) {
- struct resource *res = s->io[i].res;
- unsigned int try;
-
- if (res && (res->flags & IORESOURCE_BITS) !=
- (attr & IORESOURCE_BITS))
- continue;
-
- if (!res) {
- if (align == 0)
- align = 0x10000;
-
- res = s->io[i].res = __iodyn_find_io_region(s, *base,
- num, align);
- if (!res)
- return -EINVAL;
-
- *base = res->start;
- s->io[i].res->flags =
- ((res->flags & ~IORESOURCE_BITS) |
- (attr & IORESOURCE_BITS));
- s->io[i].InUse = num;
- *parent = res;
- return 0;
- }
-
- /* Try to extend top of window */
- try = res->end + 1;
- if ((*base == 0) || (*base == try)) {
- if (adjust_resource(s->io[i].res, res->start,
- resource_size(res) + num))
- continue;
- *base = try;
- s->io[i].InUse += num;
- *parent = res;
- return 0;
- }
-
- /* Try to extend bottom of window */
- try = res->start - num;
- if ((*base == 0) || (*base == try)) {
- if (adjust_resource(s->io[i].res,
- res->start - num,
- resource_size(res) + num))
- continue;
- *base = try;
- s->io[i].InUse += num;
- *parent = res;
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
-
-struct pccard_resource_ops pccard_iodyn_ops = {
- .validate_mem = NULL,
- .find_io = iodyn_find_io,
- .find_mem = NULL,
- .init = static_init,
- .exit = NULL,
-};
-EXPORT_SYMBOL(pccard_iodyn_ops);
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index bf9d070a4496..da494fe451ba 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -375,7 +375,9 @@ static int do_validate_mem(struct pcmcia_socket *s,
if (validate && !s->fake_cis) {
/* move it to the validated data set */
- add_interval(&s_data->mem_db_valid, base, size);
+ ret = add_interval(&s_data->mem_db_valid, base, size);
+ if (ret)
+ return ret;
sub_interval(&s_data->mem_db, base, size);
}
diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c
index c7a906664c36..4eadd0485066 100644
--- a/drivers/pcmcia/socket_sysfs.c
+++ b/drivers/pcmcia/socket_sysfs.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/major.h>
#include <linux/errno.h>
#include <linux/mm.h>
@@ -98,7 +99,7 @@ static ssize_t pccard_show_card_pm_state(struct device *dev,
char *buf)
{
struct pcmcia_socket *s = to_socket(dev);
- return sysfs_emit(buf, "%s\n", s->state & SOCKET_SUSPEND ? "off" : "on");
+ return sysfs_emit(buf, "%s\n", str_off_on(s->state & SOCKET_SUSPEND));
}
static ssize_t pccard_store_card_pm_state(struct device *dev,
@@ -177,7 +178,7 @@ static ssize_t pccard_show_resource(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pcmcia_socket *s = to_socket(dev);
- return sysfs_emit(buf, "%s\n", s->resource_setup_done ? "yes" : "no");
+ return sysfs_emit(buf, "%s\n", str_yes_no(s->resource_setup_done));
}
static ssize_t pccard_store_resource(struct device *dev,
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 69336bd778ee..13eb22b35aa8 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -129,6 +129,7 @@ enum acer_wmi_predator_v4_oc {
enum acer_wmi_gaming_misc_setting {
ACER_WMID_MISC_SETTING_OC_1 = 0x0005,
ACER_WMID_MISC_SETTING_OC_2 = 0x0007,
+ /* Unreliable on some models */
ACER_WMID_MISC_SETTING_SUPPORTED_PROFILES = 0x000A,
ACER_WMID_MISC_SETTING_PLATFORM_PROFILE = 0x000B,
};
@@ -794,9 +795,6 @@ static bool platform_profile_support;
*/
static int last_non_turbo_profile = INT_MIN;
-/* The most performant supported profile */
-static int acer_predator_v4_max_perf;
-
enum acer_predator_v4_thermal_profile {
ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET = 0x00,
ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED = 0x01,
@@ -2014,7 +2012,7 @@ acer_predator_v4_platform_profile_set(struct device *dev,
if (err)
return err;
- if (tp != acer_predator_v4_max_perf)
+ if (tp != ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO)
last_non_turbo_profile = tp;
return 0;
@@ -2023,55 +2021,14 @@ acer_predator_v4_platform_profile_set(struct device *dev,
static int
acer_predator_v4_platform_profile_probe(void *drvdata, unsigned long *choices)
{
- unsigned long supported_profiles;
- int err;
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
- err = WMID_gaming_get_misc_setting(ACER_WMID_MISC_SETTING_SUPPORTED_PROFILES,
- (u8 *)&supported_profiles);
- if (err)
- return err;
-
- /* Iterate through supported profiles in order of increasing performance */
- if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_ECO, &supported_profiles)) {
- set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
- acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO;
- last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO;
- }
-
- if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET, &supported_profiles)) {
- set_bit(PLATFORM_PROFILE_QUIET, choices);
- acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET;
- last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET;
- }
-
- if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED, &supported_profiles)) {
- set_bit(PLATFORM_PROFILE_BALANCED, choices);
- acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED;
- last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED;
- }
-
- if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE, &supported_profiles)) {
- set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices);
- acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE;
-
- /* We only use this profile as a fallback option in case no prior
- * profile is supported.
- */
- if (last_non_turbo_profile < 0)
- last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE;
- }
-
- if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO, &supported_profiles)) {
- set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
- acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO;
-
- /* We need to handle the hypothetical case where only the turbo profile
- * is supported. In this case the turbo toggle will essentially be a
- * no-op.
- */
- if (last_non_turbo_profile < 0)
- last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO;
- }
+ /* Set default non-turbo profile */
+ last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED;
return 0;
}
@@ -2108,19 +2065,15 @@ static int acer_thermal_profile_change(void)
if (cycle_gaming_thermal_profile) {
platform_profile_cycle();
} else {
- /* Do nothing if no suitable platform profiles where found */
- if (last_non_turbo_profile < 0)
- return 0;
-
err = WMID_gaming_get_misc_setting(
ACER_WMID_MISC_SETTING_PLATFORM_PROFILE, &current_tp);
if (err)
return err;
- if (current_tp == acer_predator_v4_max_perf)
+ if (current_tp == ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO)
tp = last_non_turbo_profile;
else
- tp = acer_predator_v4_max_perf;
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO;
err = WMID_gaming_set_misc_setting(
ACER_WMID_MISC_SETTING_PLATFORM_PROFILE, tp);
@@ -2128,7 +2081,7 @@ static int acer_thermal_profile_change(void)
return err;
/* Store last profile for toggle */
- if (current_tp != acer_predator_v4_max_perf)
+ if (current_tp != ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO)
last_non_turbo_profile = current_tp;
platform_profile_notify(platform_profile_device);
diff --git a/drivers/platform/x86/amd/hfi/hfi.c b/drivers/platform/x86/amd/hfi/hfi.c
index 4f56149b3774..a465ac6f607e 100644
--- a/drivers/platform/x86/amd/hfi/hfi.c
+++ b/drivers/platform/x86/amd/hfi/hfi.c
@@ -385,12 +385,16 @@ static int amd_hfi_metadata_parser(struct platform_device *pdev,
amd_hfi_data->pcct_entry = pcct_entry;
pcct_ext = (struct acpi_pcct_ext_pcc_slave *)pcct_entry;
- if (pcct_ext->length <= 0)
- return -EINVAL;
+ if (pcct_ext->length <= 0) {
+ ret = -EINVAL;
+ goto out;
+ }
amd_hfi_data->shmem = devm_kzalloc(amd_hfi_data->dev, pcct_ext->length, GFP_KERNEL);
- if (!amd_hfi_data->shmem)
- return -ENOMEM;
+ if (!amd_hfi_data->shmem) {
+ ret = -ENOMEM;
+ goto out;
+ }
pcc_chan->shmem_base_addr = pcct_ext->base_address;
pcc_chan->shmem_size = pcct_ext->length;
@@ -398,6 +402,8 @@ static int amd_hfi_metadata_parser(struct platform_device *pdev,
/* parse the shared memory info from the PCCT table */
ret = amd_hfi_fill_metadata(amd_hfi_data);
+out:
+ /* Don't leak any ACPI memory */
acpi_put_table(pcct_tbl);
return ret;
diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
index 7ffc659b2794..18fb44139de2 100644
--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
+++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
@@ -248,6 +248,20 @@ static const struct dmi_system_id fwbug_list[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Lafite Pro V 14M"),
}
},
+ {
+ .ident = "TUXEDO InfinityBook Pro 14/15 AMD Gen10",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "XxHP4NAx"),
+ }
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14/15 AMD Gen10",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "XxKK4NAx_XxSP4NAx"),
+ }
+ },
{}
};
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index f84c3d03c1de..3a488cf9ca06 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -147,7 +147,12 @@ static struct quirk_entry quirk_asus_ignore_fan = {
};
static struct quirk_entry quirk_asus_zenbook_duo_kbd = {
- .ignore_key_wlan = true,
+ .key_wlan_event = ASUS_WMI_KEY_IGNORE,
+};
+
+static struct quirk_entry quirk_asus_z13 = {
+ .key_wlan_event = ASUS_WMI_KEY_ARMOURY,
+ .tablet_switch_mode = asus_wmi_kbd_dock_devid,
};
static int dmi_matched(const struct dmi_system_id *dmi)
@@ -539,6 +544,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
.driver_data = &quirk_asus_zenbook_duo_kbd,
},
+ {
+ .callback = dmi_matched,
+ .ident = "ASUS ROG Z13",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow Z13"),
+ },
+ .driver_data = &quirk_asus_z13,
+ },
{},
};
@@ -618,6 +632,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x93, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + DVI */
{ KE_KEY, 0x95, { KEY_MEDIA } },
{ KE_KEY, 0x99, { KEY_PHONE } }, /* Conflicts with fan mode switch */
+ { KE_KEY, 0X9D, { KEY_FN_F } },
{ KE_KEY, 0xA0, { KEY_SWITCHVIDEOMODE } }, /* SDSP HDMI only */
{ KE_KEY, 0xA1, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + HDMI */
{ KE_KEY, 0xA2, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + HDMI */
@@ -632,10 +647,13 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_IGNORE, 0xC0, }, /* External display connect/disconnect notification */
{ KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
{ KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
+ { KE_KEY, 0xCA, { KEY_F13 } }, /* Noise cancelling on Expertbook B9 */
+ { KE_KEY, 0xCB, { KEY_F14 } }, /* Fn+noise-cancel */
{ KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */
{ KE_IGNORE, 0xCF, }, /* AC mode */
{ KE_KEY, 0xFA, { KEY_PROG2 } }, /* Lid flip action */
{ KE_KEY, 0xBD, { KEY_PROG2 } }, /* Lid flip action on ROG xflow laptops */
+ { KE_KEY, ASUS_WMI_KEY_ARMOURY, { KEY_PROG3 } },
{ KE_END, 0},
};
@@ -655,11 +673,9 @@ static void asus_nb_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code,
if (atkbd_reports_vol_keys)
*code = ASUS_WMI_KEY_IGNORE;
break;
- case 0x5D: /* Wireless console Toggle */
- case 0x5E: /* Wireless console Enable */
- case 0x5F: /* Wireless console Disable */
- if (quirks->ignore_key_wlan)
- *code = ASUS_WMI_KEY_IGNORE;
+ case 0x5F: /* Wireless console Disable / Special Key */
+ if (quirks->key_wlan_event)
+ *code = quirks->key_wlan_event;
break;
}
}
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index f7191fdded14..e72a2b5d158e 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -5088,16 +5088,22 @@ static int asus_wmi_probe(struct platform_device *pdev)
asus_s2idle_check_register();
- return asus_wmi_add(pdev);
+ ret = asus_wmi_add(pdev);
+ if (ret)
+ asus_s2idle_check_unregister();
+
+ return ret;
}
static bool used;
+static DEFINE_MUTEX(register_mutex);
int __init_or_module asus_wmi_register_driver(struct asus_wmi_driver *driver)
{
struct platform_driver *platform_driver;
struct platform_device *platform_device;
+ guard(mutex)(&register_mutex);
if (used)
return -EBUSY;
@@ -5120,6 +5126,7 @@ EXPORT_SYMBOL_GPL(asus_wmi_register_driver);
void asus_wmi_unregister_driver(struct asus_wmi_driver *driver)
{
+ guard(mutex)(&register_mutex);
asus_s2idle_check_unregister();
platform_device_unregister(driver->platform_device);
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index 018dfde4025e..5cd4392b964e 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -18,6 +18,7 @@
#include <linux/i8042.h>
#define ASUS_WMI_KEY_IGNORE (-1)
+#define ASUS_WMI_KEY_ARMOURY 0xffff01
#define ASUS_WMI_BRN_DOWN 0x2e
#define ASUS_WMI_BRN_UP 0x2f
@@ -40,7 +41,7 @@ struct quirk_entry {
bool wmi_force_als_set;
bool wmi_ignore_fan;
bool filter_i8042_e1_extended_codes;
- bool ignore_key_wlan;
+ int key_wlan_event;
enum asus_wmi_tablet_switch_mode tablet_switch_mode;
int wapf;
/*
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index 60c8ac8d902c..8b3533d6ba09 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -122,6 +122,7 @@ enum hp_wmi_event_ids {
HPWMI_BATTERY_CHARGE_PERIOD = 0x10,
HPWMI_SANITIZATION_MODE = 0x17,
HPWMI_CAMERA_TOGGLE = 0x1A,
+ HPWMI_FN_P_HOTKEY = 0x1B,
HPWMI_OMEN_KEY = 0x1D,
HPWMI_SMART_EXPERIENCE_APP = 0x21,
};
@@ -981,6 +982,9 @@ static void hp_wmi_notify(union acpi_object *obj, void *context)
key_code, 1, true))
pr_info("Unknown key code - 0x%x\n", key_code);
break;
+ case HPWMI_FN_P_HOTKEY:
+ platform_profile_cycle();
+ break;
case HPWMI_OMEN_KEY:
if (event_data) /* Only should be true for HP Omen */
key_code = event_data;
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 540cd2fb0673..d040290e80ff 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -1625,6 +1625,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &tgl_l_pmc_dev),
X86_MATCH_VFM(INTEL_RAPTORLAKE, &adl_pmc_dev),
X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &adl_pmc_dev),
+ X86_MATCH_VFM(INTEL_BARTLETTLAKE, &adl_pmc_dev),
X86_MATCH_VFM(INTEL_METEORLAKE_L, &mtl_pmc_dev),
X86_MATCH_VFM(INTEL_ARROWLAKE, &arl_pmc_dev),
X86_MATCH_VFM(INTEL_ARROWLAKE_H, &arl_h_pmc_dev),
diff --git a/drivers/platform/x86/intel/tpmi_power_domains.c b/drivers/platform/x86/intel/tpmi_power_domains.c
index 9d8247bb9cfa..8641353b2e06 100644
--- a/drivers/platform/x86/intel/tpmi_power_domains.c
+++ b/drivers/platform/x86/intel/tpmi_power_domains.c
@@ -178,7 +178,7 @@ static int tpmi_get_logical_id(unsigned int cpu, struct tpmi_cpu_info *info)
info->punit_thread_id = FIELD_GET(LP_ID_MASK, data);
info->punit_core_id = FIELD_GET(MODULE_ID_MASK, data);
- info->pkg_id = topology_physical_package_id(cpu);
+ info->pkg_id = topology_logical_package_id(cpu);
info->linux_cpu = cpu;
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index fba2e62027b7..4cfc928bcf2d 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1243,7 +1243,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_async_xchg_ctx *ctxp =
container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
- struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
+ struct rqb_dmabuf *nvmebuf;
struct lpfc_hba *phba = ctxp->phba;
unsigned long iflag;
@@ -1251,13 +1251,18 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
ctxp->oxid, ctxp->size, raw_smp_processor_id());
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
+ nvmebuf = ctxp->rqb_buffer;
if (!nvmebuf) {
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
"6425 Defer rcv: no buffer oxid x%x: "
"flg %x ste %x\n",
ctxp->oxid, ctxp->flag, ctxp->state);
return;
}
+ ctxp->rqb_buffer = NULL;
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
tgtp = phba->targetport->private;
if (tgtp)
@@ -1265,9 +1270,6 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
/* Free the nvmebuf since a new buffer already replaced it */
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
- spin_lock_irqsave(&ctxp->ctxlock, iflag);
- ctxp->rqb_buffer = NULL;
- spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
}
/**
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index b17796d5ee66..add13e306898 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -475,13 +475,21 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
static int sr_revalidate_disk(struct scsi_cd *cd)
{
+ struct request_queue *q = cd->device->request_queue;
struct scsi_sense_hdr sshdr;
+ struct queue_limits lim;
+ int sector_size;
/* if the unit is not ready, nothing more to do */
if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
return 0;
sr_cd_check(&cd->cdi);
- return get_sectorsize(cd);
+ sector_size = get_sectorsize(cd);
+
+ lim = queue_limits_start_update(q);
+ lim.logical_block_size = sector_size;
+ lim.features |= BLK_FEAT_ROTATIONAL;
+ return queue_limits_commit_update_frozen(q, &lim);
}
static int sr_block_open(struct gendisk *disk, blk_mode_t mode)
@@ -721,10 +729,8 @@ fail:
static int get_sectorsize(struct scsi_cd *cd)
{
- struct request_queue *q = cd->device->request_queue;
static const u8 cmd[10] = { READ_CAPACITY };
unsigned char buffer[8] = { };
- struct queue_limits lim;
int err;
int sector_size;
struct scsi_failure failure_defs[] = {
@@ -795,9 +801,7 @@ static int get_sectorsize(struct scsi_cd *cd)
set_capacity(cd->disk, cd->capacity);
}
- lim = queue_limits_start_update(q);
- lim.logical_block_size = sector_size;
- return queue_limits_commit_update_frozen(q, &lim);
+ return sector_size;
}
static int get_capabilities(struct scsi_cd *cd)
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 177f9a33f3a2..9bf823348cd3 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -108,6 +108,8 @@ struct cqspi_st {
bool is_jh7110; /* Flag for StarFive JH7110 SoC */
bool disable_stig_mode;
+ refcount_t refcount;
+ refcount_t inflight_ops;
const struct cqspi_driver_platdata *ddata;
};
@@ -735,6 +737,9 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
u8 *rxbuf_end = rxbuf + n_rx;
int ret = 0;
+ if (!refcount_read(&cqspi->refcount))
+ return -ENODEV;
+
writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
@@ -1071,6 +1076,9 @@ static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata,
unsigned int write_bytes;
int ret;
+ if (!refcount_read(&cqspi->refcount))
+ return -ENODEV;
+
writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
@@ -1461,12 +1469,26 @@ static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller);
struct device *dev = &cqspi->pdev->dev;
+ if (refcount_read(&cqspi->inflight_ops) == 0)
+ return -ENODEV;
+
ret = pm_runtime_resume_and_get(dev);
if (ret) {
dev_err(&mem->spi->dev, "resume failed with %d\n", ret);
return ret;
}
+ if (!refcount_read(&cqspi->refcount))
+ return -EBUSY;
+
+ refcount_inc(&cqspi->inflight_ops);
+
+ if (!refcount_read(&cqspi->refcount)) {
+ if (refcount_read(&cqspi->inflight_ops))
+ refcount_dec(&cqspi->inflight_ops);
+ return -EBUSY;
+ }
+
ret = cqspi_mem_process(mem, op);
pm_runtime_put_autosuspend(dev);
@@ -1474,6 +1496,9 @@ static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
if (ret)
dev_err(&mem->spi->dev, "operation failed with %d\n", ret);
+ if (refcount_read(&cqspi->inflight_ops) > 1)
+ refcount_dec(&cqspi->inflight_ops);
+
return ret;
}
@@ -1925,6 +1950,9 @@ static int cqspi_probe(struct platform_device *pdev)
}
}
+ refcount_set(&cqspi->refcount, 1);
+ refcount_set(&cqspi->inflight_ops, 1);
+
ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
pdev->name, cqspi);
if (ret) {
@@ -1987,6 +2015,11 @@ static void cqspi_remove(struct platform_device *pdev)
{
struct cqspi_st *cqspi = platform_get_drvdata(pdev);
+ refcount_set(&cqspi->refcount, 0);
+
+ if (!refcount_dec_and_test(&cqspi->inflight_ops))
+ cqspi_wait_idle(cqspi);
+
spi_unregister_controller(cqspi->host);
cqspi_controller_enable(cqspi, 0);
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 313e444a34f3..431439d4cdda 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -3,8 +3,9 @@
// Freescale i.MX7ULP LPSPI driver
//
// Copyright 2016 Freescale Semiconductor, Inc.
-// Copyright 2018 NXP Semiconductors
+// Copyright 2018, 2023, 2025 NXP
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
@@ -70,7 +71,7 @@
#define DER_TDDE BIT(0)
#define CFGR1_PCSCFG BIT(27)
#define CFGR1_PINCFG (BIT(24)|BIT(25))
-#define CFGR1_PCSPOL BIT(8)
+#define CFGR1_PCSPOL_MASK GENMASK(11, 8)
#define CFGR1_NOSTALL BIT(3)
#define CFGR1_HOST BIT(0)
#define FSR_TXCOUNT (0xFF)
@@ -82,8 +83,11 @@
#define TCR_RXMSK BIT(19)
#define TCR_TXMSK BIT(18)
+#define SR_CLEAR_MASK GENMASK(13, 8)
+
struct fsl_lpspi_devtype_data {
- u8 prescale_max;
+ u8 prescale_max : 3; /* 0 == no limit */
+ bool query_hw_for_num_cs : 1;
};
struct lpspi_config {
@@ -129,20 +133,26 @@ struct fsl_lpspi_data {
};
/*
- * ERR051608 fixed or not:
- * https://www.nxp.com/docs/en/errata/i.MX93_1P87f.pdf
+ * Devices with ERR051608 have a max TCR_PRESCALE value of 1, otherwise there is
+ * no prescale limit: https://www.nxp.com/docs/en/errata/i.MX93_1P87f.pdf
*/
-static struct fsl_lpspi_devtype_data imx93_lpspi_devtype_data = {
+static const struct fsl_lpspi_devtype_data imx93_lpspi_devtype_data = {
.prescale_max = 1,
+ .query_hw_for_num_cs = true,
+};
+
+static const struct fsl_lpspi_devtype_data imx7ulp_lpspi_devtype_data = {
+ /* All defaults */
};
-static struct fsl_lpspi_devtype_data imx7ulp_lpspi_devtype_data = {
- .prescale_max = 7,
+static const struct fsl_lpspi_devtype_data s32g_lpspi_devtype_data = {
+ .query_hw_for_num_cs = true,
};
static const struct of_device_id fsl_lpspi_dt_ids[] = {
{ .compatible = "fsl,imx7ulp-spi", .data = &imx7ulp_lpspi_devtype_data,},
{ .compatible = "fsl,imx93-spi", .data = &imx93_lpspi_devtype_data,},
+ { .compatible = "nxp,s32g2-lpspi", .data = &s32g_lpspi_devtype_data,},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids);
@@ -321,7 +331,7 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
int scldiv;
perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
- prescale_max = fsl_lpspi->devtype_data->prescale_max;
+ prescale_max = fsl_lpspi->devtype_data->prescale_max ?: 7;
if (!config.speed_hz) {
dev_err(fsl_lpspi->dev,
@@ -423,7 +433,9 @@ static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
else
temp = CFGR1_PINCFG;
if (fsl_lpspi->config.mode & SPI_CS_HIGH)
- temp |= CFGR1_PCSPOL;
+ temp |= FIELD_PREP(CFGR1_PCSPOL_MASK,
+ BIT(fsl_lpspi->config.chip_select));
+
writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1);
temp = readl(fsl_lpspi->base + IMX7ULP_CR);
@@ -532,14 +544,13 @@ static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi)
fsl_lpspi_intctrl(fsl_lpspi, 0);
}
- /* W1C for all flags in SR */
- temp = 0x3F << 8;
- writel(temp, fsl_lpspi->base + IMX7ULP_SR);
-
/* Clear FIFO and disable module */
temp = CR_RRF | CR_RTF;
writel(temp, fsl_lpspi->base + IMX7ULP_CR);
+ /* W1C for all flags in SR */
+ writel(SR_CLEAR_MASK, fsl_lpspi->base + IMX7ULP_SR);
+
return 0;
}
@@ -730,12 +741,10 @@ static int fsl_lpspi_pio_transfer(struct spi_controller *controller,
fsl_lpspi_write_tx_fifo(fsl_lpspi);
ret = fsl_lpspi_wait_for_completion(controller);
- if (ret)
- return ret;
fsl_lpspi_reset(fsl_lpspi);
- return 0;
+ return ret;
}
static int fsl_lpspi_transfer_one(struct spi_controller *controller,
@@ -785,7 +794,7 @@ static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
if (temp_SR & SR_MBF ||
readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_TXCOUNT) {
writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR);
- fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE);
+ fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE | (temp_IER & IER_TDIE));
return IRQ_HANDLED;
}
@@ -930,7 +939,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f);
if (of_property_read_u32((&pdev->dev)->of_node, "num-cs",
&num_cs)) {
- if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx93-spi"))
+ if (devtype_data->query_hw_for_num_cs)
num_cs = ((temp >> 16) & 0xf);
else
num_cs = 1;
diff --git a/drivers/spi/spi-microchip-core-qspi.c b/drivers/spi/spi-microchip-core-qspi.c
index d13a9b755c7f..8dc98b17f77b 100644
--- a/drivers/spi/spi-microchip-core-qspi.c
+++ b/drivers/spi/spi-microchip-core-qspi.c
@@ -531,10 +531,6 @@ error:
static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
- struct mchp_coreqspi *qspi = spi_controller_get_devdata(mem->spi->controller);
- unsigned long clk_hz;
- u32 baud_rate_val;
-
if (!spi_mem_default_supports_op(mem, op))
return false;
@@ -557,14 +553,6 @@ static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_
return false;
}
- clk_hz = clk_get_rate(qspi->clk);
- if (!clk_hz)
- return false;
-
- baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * op->max_freq);
- if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER)
- return false;
-
return true;
}
diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c
index 0ceaad7dba3c..780abb967822 100644
--- a/drivers/spi/spi-qpic-snand.c
+++ b/drivers/spi/spi-qpic-snand.c
@@ -1615,11 +1615,13 @@ static int qcom_spi_probe(struct platform_device *pdev)
ret = spi_register_controller(ctlr);
if (ret) {
dev_err(&pdev->dev, "spi_register_controller failed.\n");
- goto err_spi_init;
+ goto err_register_controller;
}
return 0;
+err_register_controller:
+ nand_ecc_unregister_on_host_hw_engine(&snandc->qspi->ecc_eng);
err_spi_init:
qcom_nandc_unalloc(snandc);
err_snand_alloc:
@@ -1641,7 +1643,7 @@ static void qcom_spi_remove(struct platform_device *pdev)
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
spi_unregister_controller(ctlr);
-
+ nand_ecc_unregister_on_host_hw_engine(&snandc->qspi->ecc_eng);
qcom_nandc_unalloc(snandc);
clk_disable_unprepare(snandc->aon_clk);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index ac3d085808e9..315bab373729 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -2441,13 +2441,6 @@ exit:
return ret;
}
-static int cfg80211_rtw_change_bss(struct wiphy *wiphy,
- struct net_device *ndev,
- struct bss_parameters *params)
-{
- return 0;
-}
-
void rtw_cfg80211_rx_action(struct adapter *adapter, u8 *frame, uint frame_len, const char *msg)
{
s32 freq;
@@ -2704,7 +2697,6 @@ static struct cfg80211_ops rtw_cfg80211_ops = {
.del_station = cfg80211_rtw_del_station,
.change_station = cfg80211_rtw_change_station,
.dump_station = cfg80211_rtw_dump_station,
- .change_bss = cfg80211_rtw_change_bss,
.mgmt_tx = cfg80211_rtw_mgmt_tx,
};
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c953297aa89a..b21cb72835cc 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -111,6 +111,24 @@ struct btrfs_bio_ctrl {
*/
unsigned long submit_bitmap;
struct readahead_control *ractl;
+
+ /*
+ * The start offset of the last used extent map by a read operation.
+ *
+ * This is for proper compressed read merge.
+ * U64_MAX means we are starting the read and have made no progress yet.
+ *
+ * The current btrfs_bio_is_contig() only uses disk_bytenr as
+ * the condition to check if the read can be merged with previous
+ * bio, which is not correct. E.g. two file extents pointing to the
+ * same extent but with different offset.
+ *
+ * So here we need to do extra checks to only merge reads that are
+ * covered by the same extent map.
+ * Just extent_map::start will be enough, as they are unique
+ * inside the same inode.
+ */
+ u64 last_em_start;
};
static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
@@ -909,7 +927,7 @@ static void btrfs_readahead_expand(struct readahead_control *ractl,
* return 0 on success, otherwise return error
*/
static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
- struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
+ struct btrfs_bio_ctrl *bio_ctrl)
{
struct inode *inode = folio->mapping->host;
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
@@ -1019,12 +1037,11 @@ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached,
* non-optimal behavior (submitting 2 bios for the same extent).
*/
if (compress_type != BTRFS_COMPRESS_NONE &&
- prev_em_start && *prev_em_start != (u64)-1 &&
- *prev_em_start != em->start)
+ bio_ctrl->last_em_start != U64_MAX &&
+ bio_ctrl->last_em_start != em->start)
force_bio_submit = true;
- if (prev_em_start)
- *prev_em_start = em->start;
+ bio_ctrl->last_em_start = em->start;
btrfs_free_extent_map(em);
em = NULL;
@@ -1238,12 +1255,15 @@ int btrfs_read_folio(struct file *file, struct folio *folio)
const u64 start = folio_pos(folio);
const u64 end = start + folio_size(folio) - 1;
struct extent_state *cached_state = NULL;
- struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
+ struct btrfs_bio_ctrl bio_ctrl = {
+ .opf = REQ_OP_READ,
+ .last_em_start = U64_MAX,
+ };
struct extent_map *em_cached = NULL;
int ret;
lock_extents_for_read(inode, start, end, &cached_state);
- ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL);
+ ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl);
btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
btrfs_free_extent_map(em_cached);
@@ -2583,7 +2603,8 @@ void btrfs_readahead(struct readahead_control *rac)
{
struct btrfs_bio_ctrl bio_ctrl = {
.opf = REQ_OP_READ | REQ_RAHEAD,
- .ractl = rac
+ .ractl = rac,
+ .last_em_start = U64_MAX,
};
struct folio *folio;
struct btrfs_inode *inode = BTRFS_I(rac->mapping->host);
@@ -2591,12 +2612,11 @@ void btrfs_readahead(struct readahead_control *rac)
const u64 end = start + readahead_length(rac) - 1;
struct extent_state *cached_state = NULL;
struct extent_map *em_cached = NULL;
- u64 prev_em_start = (u64)-1;
lock_extents_for_read(inode, start, end, &cached_state);
while ((folio = readahead_folio(rac)) != NULL)
- btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start);
+ btrfs_do_readpage(folio, &em_cached, &bio_ctrl);
btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index dd82dcc7b2b7..e7218e78bff4 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -5696,7 +5696,17 @@ static void btrfs_del_inode_from_root(struct btrfs_inode *inode)
bool empty = false;
xa_lock(&root->inodes);
- entry = __xa_erase(&root->inodes, btrfs_ino(inode));
+ /*
+ * This btrfs_inode is being freed and has already been unhashed at this
+ * point. It's possible that another btrfs_inode has already been
+ * allocated for the same inode and inserted itself into the root, so
+ * don't delete it in that case.
+ *
+ * Note that this shouldn't need to allocate memory, so the gfp flags
+ * don't really matter.
+ */
+ entry = __xa_cmpxchg(&root->inodes, btrfs_ino(inode), inode, NULL,
+ GFP_ATOMIC);
if (entry == inode)
empty = xa_empty(&root->inodes);
xa_unlock(&root->inodes);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index ccaa9a3cf1ce..da102da169fd 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1455,6 +1455,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
struct btrfs_qgroup *qgroup;
LIST_HEAD(qgroup_list);
u64 num_bytes = src->excl;
+ u64 num_bytes_cmpr = src->excl_cmpr;
int ret = 0;
qgroup = find_qgroup_rb(fs_info, ref_root);
@@ -1466,11 +1467,12 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
struct btrfs_qgroup_list *glist;
qgroup->rfer += sign * num_bytes;
- qgroup->rfer_cmpr += sign * num_bytes;
+ qgroup->rfer_cmpr += sign * num_bytes_cmpr;
WARN_ON(sign < 0 && qgroup->excl < num_bytes);
+ WARN_ON(sign < 0 && qgroup->excl_cmpr < num_bytes_cmpr);
qgroup->excl += sign * num_bytes;
- qgroup->excl_cmpr += sign * num_bytes;
+ qgroup->excl_cmpr += sign * num_bytes_cmpr;
if (sign > 0)
qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index a262b494a89f..df1f6cc3fe21 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -299,9 +299,12 @@ static int btrfs_parse_compress(struct btrfs_fs_context *ctx,
btrfs_set_opt(ctx->mount_opt, COMPRESS);
btrfs_clear_opt(ctx->mount_opt, NODATACOW);
btrfs_clear_opt(ctx->mount_opt, NODATASUM);
- } else if (btrfs_match_compress_type(string, "lzo", false)) {
+ } else if (btrfs_match_compress_type(string, "lzo", true)) {
ctx->compress_type = BTRFS_COMPRESS_LZO;
- ctx->compress_level = 0;
+ ctx->compress_level = btrfs_compress_str2level(BTRFS_COMPRESS_LZO,
+ string + 3);
+ if (string[3] == ':' && string[4])
+ btrfs_warn(NULL, "Compression level ignored for LZO");
btrfs_set_opt(ctx->mount_opt, COMPRESS);
btrfs_clear_opt(ctx->mount_opt, NODATACOW);
btrfs_clear_opt(ctx->mount_opt, NODATASUM);
@@ -1079,7 +1082,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
seq_printf(seq, ",compress-force=%s", compress_type);
else
seq_printf(seq, ",compress=%s", compress_type);
- if (info->compress_level)
+ if (info->compress_level && info->compress_type != BTRFS_COMPRESS_LZO)
seq_printf(seq, ":%d", info->compress_level);
}
if (btrfs_test_opt(info, NOSSD))
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index fa7a929a0461..c6e3efd6f602 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2722,6 +2722,11 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
goto error;
}
+ if (bdev_nr_bytes(file_bdev(bdev_file)) <= BTRFS_DEVICE_RANGE_RESERVED) {
+ ret = -EINVAL;
+ goto error;
+ }
+
if (fs_devices->seeding) {
seeding_dev = true;
down_write(&sb->s_umount);
diff --git a/fs/coredump.c b/fs/coredump.c
index 5dce257c67fc..60bc9685e149 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -1466,11 +1466,15 @@ static int proc_dostring_coredump(const struct ctl_table *table, int write,
ssize_t retval;
char old_core_pattern[CORENAME_MAX_SIZE];
+ if (write)
+ return proc_dostring(table, write, buffer, lenp, ppos);
+
retval = strscpy(old_core_pattern, core_pattern, CORENAME_MAX_SIZE);
error = proc_dostring(table, write, buffer, lenp, ppos);
if (error)
return error;
+
if (!check_coredump_socket()) {
strscpy(core_pattern, old_core_pattern, retval + 1);
return -EINVAL;
diff --git a/fs/exec.c b/fs/exec.c
index 2a1e5e4042a1..e861a4b7ffda 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -2048,7 +2048,7 @@ static int proc_dointvec_minmax_coredump(const struct ctl_table *table, int writ
{
int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
- if (!error)
+ if (!error && !write)
validate_coredump_safety();
return error;
}
diff --git a/fs/fhandle.c b/fs/fhandle.c
index 68a7d2861c58..a907ddfac4d5 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -208,6 +208,14 @@ static int vfs_dentry_acceptable(void *context, struct dentry *dentry)
return 1;
/*
+ * Verify that the decoded dentry itself has a valid id mapping.
+ * In case the decoded dentry is the mountfd root itself, this
+ * verifies that the mountfd inode itself has a valid id mapping.
+ */
+ if (!privileged_wrt_inode_uidgid(user_ns, idmap, d_inode(dentry)))
+ return 0;
+
+ /*
* It's racy as we're not taking rename_lock but we're able to ignore
* permissions and we just need an approximation whether we were able
* to follow a path to the file.
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index e80cd8f2c049..5150aa25e64b 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1893,7 +1893,7 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode,
index = outarg->offset >> PAGE_SHIFT;
- while (num) {
+ while (num && ap->num_folios < num_pages) {
struct folio *folio;
unsigned int folio_offset;
unsigned int nr_bytes;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 2d817d7cab26..5c569c3cb53f 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1199,7 +1199,7 @@ static void fuse_fillattr(struct mnt_idmap *idmap, struct inode *inode,
if (attr->blksize != 0)
blkbits = ilog2(attr->blksize);
else
- blkbits = inode->i_sb->s_blocksize_bits;
+ blkbits = fc->blkbits;
stat->blksize = 1 << blkbits;
}
@@ -1377,6 +1377,7 @@ retry:
generic_fillattr(idmap, request_mask, inode, stat);
stat->mode = fi->orig_i_mode;
stat->ino = fi->orig_ino;
+ stat->blksize = 1 << fi->cached_i_blkbits;
if (test_bit(FUSE_I_BTIME, &fi->state)) {
stat->btime = fi->i_btime;
stat->result_mask |= STATX_BTIME;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 5525a4520b0f..4adcf09d4b01 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2960,7 +2960,7 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
.nodeid_out = ff_out->nodeid,
.fh_out = ff_out->fh,
.off_out = pos_out,
- .len = len,
+ .len = min_t(size_t, len, UINT_MAX & PAGE_MASK),
.flags = flags
};
struct fuse_write_out outarg;
@@ -3026,6 +3026,9 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
fc->no_copy_file_range = 1;
err = -EOPNOTSUPP;
}
+ if (!err && outarg.size > len)
+ err = -EIO;
+
if (err)
goto out;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index ec248d13c8bf..cc428d04be3e 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -210,6 +210,12 @@ struct fuse_inode {
/** Reference to backing file in passthrough mode */
struct fuse_backing *fb;
#endif
+
+ /*
+ * The underlying inode->i_blkbits value will not be modified,
+ * so preserve the blocksize specified by the server.
+ */
+ u8 cached_i_blkbits;
};
/** FUSE inode state bits */
@@ -969,6 +975,14 @@ struct fuse_conn {
/* Request timeout (in jiffies). 0 = no timeout */
unsigned int req_timeout;
} timeout;
+
+ /*
+ * This is a workaround until fuse uses iomap for reads.
+ * For fuseblk servers, this represents the blocksize passed in at
+ * mount time and for regular fuse servers, this is equivalent to
+ * inode->i_blkbits.
+ */
+ u8 blkbits;
};
/*
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 67c2318bfc42..7ddfd2b3cc9c 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -289,6 +289,11 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
}
}
+ if (attr->blksize)
+ fi->cached_i_blkbits = ilog2(attr->blksize);
+ else
+ fi->cached_i_blkbits = fc->blkbits;
+
/*
* Don't set the sticky bit in i_mode, unless we want the VFS
* to check permissions. This prevents failures due to the
@@ -1805,10 +1810,21 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
err = -EINVAL;
if (!sb_set_blocksize(sb, ctx->blksize))
goto err;
+ /*
+ * This is a workaround until fuse hooks into iomap for reads.
+ * Use PAGE_SIZE for the blocksize else if the writeback cache
+ * is enabled, buffered writes go through iomap and a read may
+ * overwrite partially written data if blocksize < PAGE_SIZE
+ */
+ fc->blkbits = sb->s_blocksize_bits;
+ if (ctx->blksize != PAGE_SIZE &&
+ !sb_set_blocksize(sb, PAGE_SIZE))
+ goto err;
#endif
} else {
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
+ fc->blkbits = sb->s_blocksize_bits;
}
sb->s_subtype = ctx->subtype;
diff --git a/fs/fuse/passthrough.c b/fs/fuse/passthrough.c
index 607ef735ad4a..eb97ac009e75 100644
--- a/fs/fuse/passthrough.c
+++ b/fs/fuse/passthrough.c
@@ -237,6 +237,11 @@ int fuse_backing_open(struct fuse_conn *fc, struct fuse_backing_map *map)
if (!file)
goto out;
+ /* read/write/splice/mmap passthrough only relevant for regular files */
+ res = d_is_dir(file->f_path.dentry) ? -EISDIR : -EINVAL;
+ if (!d_is_reg(file->f_path.dentry))
+ goto out_fput;
+
backing_sb = file_inode(file)->i_sb;
res = -ELOOP;
if (backing_sb->s_stack_depth >= fc->max_stack_depth)
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index c826e7ca49f5..76c8fd0bfc75 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -1016,7 +1016,7 @@ static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
if (kaddr)
*kaddr = fs->window_kaddr + offset;
if (pfn)
- *pfn = fs->window_phys_addr + offset;
+ *pfn = PHYS_PFN(fs->window_phys_addr + offset);
return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index ae6d1312b184..51f77c65c0c6 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2455,7 +2455,7 @@ struct vfsmount *clone_private_mount(const struct path *path)
return ERR_PTR(-EINVAL);
}
- if (!ns_capable(old_mnt->mnt_ns->user_ns, CAP_SYS_ADMIN))
+ if (!ns_capable(old_mnt->mnt_ns->user_ns, CAP_SYS_ADMIN))
return ERR_PTR(-EPERM);
if (__has_locked_children(old_mnt, path->dentry))
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 8fb4a950dd55..4e3dcc157a83 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -888,6 +888,8 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
if (fsinfo->xattr_support)
server->caps |= NFS_CAP_XATTR;
+ else
+ server->caps &= ~NFS_CAP_XATTR;
#endif
}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 86e36c630f09..8059ece82468 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -28,6 +28,7 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/gfp.h>
+#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/compaction.h>
@@ -280,6 +281,37 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
}
EXPORT_SYMBOL_GPL(nfs_file_fsync);
+void nfs_truncate_last_folio(struct address_space *mapping, loff_t from,
+ loff_t to)
+{
+ struct folio *folio;
+
+ if (from >= to)
+ return;
+
+ folio = filemap_lock_folio(mapping, from >> PAGE_SHIFT);
+ if (IS_ERR(folio))
+ return;
+
+ if (folio_mkclean(folio))
+ folio_mark_dirty(folio);
+
+ if (folio_test_uptodate(folio)) {
+ loff_t fpos = folio_pos(folio);
+ size_t offset = from - fpos;
+ size_t end = folio_size(folio);
+
+ if (to - fpos < end)
+ end = to - fpos;
+ folio_zero_segment(folio, offset, end);
+ trace_nfs_size_truncate_folio(mapping->host, to);
+ }
+
+ folio_unlock(folio);
+ folio_put(folio);
+}
+EXPORT_SYMBOL_GPL(nfs_truncate_last_folio);
+
/*
* Decide whether a read/modify/write cycle may be more efficient
* then a modify/write/read cycle when writing to a page in the
@@ -356,6 +388,7 @@ static int nfs_write_begin(const struct kiocb *iocb,
dfprintk(PAGECACHE, "NFS: write_begin(%pD2(%lu), %u@%lld)\n",
file, mapping->host->i_ino, len, (long long) pos);
+ nfs_truncate_last_folio(mapping, i_size_read(mapping->host), pos);
fgp |= fgf_set_order(len);
start:
@@ -442,10 +475,11 @@ static void nfs_invalidate_folio(struct folio *folio, size_t offset,
dfprintk(PAGECACHE, "NFS: invalidate_folio(%lu, %zu, %zu)\n",
folio->index, offset, length);
- if (offset != 0 || length < folio_size(folio))
- return;
/* Cancel any unstarted writes on this page */
- nfs_wb_folio_cancel(inode, folio);
+ if (offset != 0 || length < folio_size(folio))
+ nfs_wb_folio(inode, folio);
+ else
+ nfs_wb_folio_cancel(inode, folio);
folio_wait_private_2(folio); /* [DEPRECATED] */
trace_nfs_invalidate_folio(inode, folio_pos(folio) + offset, length);
}
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 8dc921d83538..9edb5f9b0c4e 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -293,7 +293,7 @@ ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
struct pnfs_layout_segment *l2)
{
const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
- const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
+ const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l2);
u32 i;
if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
@@ -773,8 +773,11 @@ ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
continue;
if (check_device &&
- nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
+ nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node)) {
+ // reinitialize the error state in case if this is the last iteration
+ ds = ERR_PTR(-EINVAL);
continue;
+ }
*best_idx = idx;
break;
@@ -804,7 +807,7 @@ ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
struct nfs4_pnfs_ds *ds;
ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
- if (ds)
+ if (!IS_ERR(ds))
return ds;
return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
}
@@ -818,7 +821,7 @@ ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
best_idx);
- if (ds || !pgio->pg_mirror_idx)
+ if (!IS_ERR(ds) || !pgio->pg_mirror_idx)
return ds;
return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
}
@@ -868,7 +871,7 @@ retry:
req->wb_nio = 0;
ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
- if (!ds) {
+ if (IS_ERR(ds)) {
if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
goto out_mds;
pnfs_generic_pg_cleanup(pgio);
@@ -1072,11 +1075,13 @@ static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
{
u32 idx = hdr->pgio_mirror_idx + 1;
u32 new_idx = 0;
+ struct nfs4_pnfs_ds *ds;
- if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx))
- ff_layout_send_layouterror(hdr->lseg);
- else
+ ds = ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx);
+ if (IS_ERR(ds))
pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
+ else
+ ff_layout_send_layouterror(hdr->lseg);
pnfs_read_resend_pnfs(hdr, new_idx);
}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 338ef77ae423..49df9debb1a6 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -716,6 +716,7 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
{
struct inode *inode = d_inode(dentry);
struct nfs_fattr *fattr;
+ loff_t oldsize = i_size_read(inode);
int error = 0;
nfs_inc_stats(inode, NFSIOS_VFSSETATTR);
@@ -731,7 +732,7 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
if (error)
return error;
- if (attr->ia_size == i_size_read(inode))
+ if (attr->ia_size == oldsize)
attr->ia_valid &= ~ATTR_SIZE;
}
@@ -767,8 +768,10 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
trace_nfs_setattr_enter(inode);
/* Write all dirty data */
- if (S_ISREG(inode->i_mode))
+ if (S_ISREG(inode->i_mode)) {
+ nfs_file_block_o_direct(NFS_I(inode));
nfs_sync_inode(inode);
+ }
fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
if (fattr == NULL) {
@@ -777,8 +780,12 @@ nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
}
error = NFS_PROTO(inode)->setattr(dentry, fattr, attr);
- if (error == 0)
+ if (error == 0) {
+ if (attr->ia_valid & ATTR_SIZE)
+ nfs_truncate_last_folio(inode->i_mapping, oldsize,
+ attr->ia_size);
error = nfs_refresh_inode(inode, fattr);
+ }
nfs_free_fattr(fattr);
out:
trace_nfs_setattr_exit(inode, error);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 74d712b58423..c0a44f389f8f 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -437,6 +437,8 @@ int nfs_file_release(struct inode *, struct file *);
int nfs_lock(struct file *, int, struct file_lock *);
int nfs_flock(struct file *, int, struct file_lock *);
int nfs_check_flags(int);
+void nfs_truncate_last_folio(struct address_space *mapping, loff_t from,
+ loff_t to);
/* inode.c */
extern struct workqueue_struct *nfsiod_workqueue;
@@ -530,6 +532,16 @@ static inline bool nfs_file_io_is_buffered(struct nfs_inode *nfsi)
return test_bit(NFS_INO_ODIRECT, &nfsi->flags) == 0;
}
+/* Must be called with exclusively locked inode->i_rwsem */
+static inline void nfs_file_block_o_direct(struct nfs_inode *nfsi)
+{
+ if (test_bit(NFS_INO_ODIRECT, &nfsi->flags)) {
+ clear_bit(NFS_INO_ODIRECT, &nfsi->flags);
+ inode_dio_wait(&nfsi->vfs_inode);
+ }
+}
+
+
/* namespace.c */
#define NFS_PATH_CANONICAL 1
extern char *nfs_path(char **p, struct dentry *dentry,
diff --git a/fs/nfs/io.c b/fs/nfs/io.c
index 3388faf2acb9..d275b0a250bf 100644
--- a/fs/nfs/io.c
+++ b/fs/nfs/io.c
@@ -14,15 +14,6 @@
#include "internal.h"
-/* Call with exclusively locked inode->i_rwsem */
-static void nfs_block_o_direct(struct nfs_inode *nfsi, struct inode *inode)
-{
- if (test_bit(NFS_INO_ODIRECT, &nfsi->flags)) {
- clear_bit(NFS_INO_ODIRECT, &nfsi->flags);
- inode_dio_wait(inode);
- }
-}
-
/**
* nfs_start_io_read - declare the file is being used for buffered reads
* @inode: file inode
@@ -57,7 +48,7 @@ nfs_start_io_read(struct inode *inode)
err = down_write_killable(&inode->i_rwsem);
if (err)
return err;
- nfs_block_o_direct(nfsi, inode);
+ nfs_file_block_o_direct(nfsi);
downgrade_write(&inode->i_rwsem);
return 0;
@@ -90,7 +81,7 @@ nfs_start_io_write(struct inode *inode)
err = down_write_killable(&inode->i_rwsem);
if (!err)
- nfs_block_o_direct(NFS_I(inode), inode);
+ nfs_file_block_o_direct(NFS_I(inode));
return err;
}
diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c
index bd5fca285899..97abf62f109d 100644
--- a/fs/nfs/localio.c
+++ b/fs/nfs/localio.c
@@ -180,10 +180,8 @@ static void nfs_local_probe(struct nfs_client *clp)
return;
}
- if (nfs_client_is_local(clp)) {
- /* If already enabled, disable and re-enable */
- nfs_localio_disable_client(clp);
- }
+ if (nfs_client_is_local(clp))
+ return;
if (!nfs_uuid_begin(&clp->cl_uuid))
return;
@@ -244,7 +242,8 @@ __nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred,
case -ENOMEM:
case -ENXIO:
case -ENOENT:
- /* Revalidate localio, will disable if unsupported */
+ /* Revalidate localio */
+ nfs_localio_disable_client(clp);
nfs_local_probe(clp);
}
}
@@ -453,12 +452,13 @@ static void nfs_local_call_read(struct work_struct *work)
nfs_local_iter_init(&iter, iocb, READ);
status = filp->f_op->read_iter(&iocb->kiocb, &iter);
+
+ revert_creds(save_cred);
+
if (status != -EIOCBQUEUED) {
nfs_local_read_done(iocb, status);
nfs_local_pgio_release(iocb);
}
-
- revert_creds(save_cred);
}
static int
@@ -648,14 +648,15 @@ static void nfs_local_call_write(struct work_struct *work)
file_start_write(filp);
status = filp->f_op->write_iter(&iocb->kiocb, &iter);
file_end_write(filp);
+
+ revert_creds(save_cred);
+ current->flags = old_flags;
+
if (status != -EIOCBQUEUED) {
nfs_local_write_done(iocb, status);
nfs_local_vfs_getattr(iocb);
nfs_local_pgio_release(iocb);
}
-
- revert_creds(save_cred);
- current->flags = old_flags;
}
static int
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 01c01f45358b..6a0b5871ba3b 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -114,6 +114,7 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
exception.inode = inode;
exception.state = lock->open_context->state;
+ nfs_file_block_o_direct(NFS_I(inode));
err = nfs_sync_inode(inode);
if (err)
goto out;
@@ -137,6 +138,7 @@ int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE],
};
struct inode *inode = file_inode(filep);
+ loff_t oldsize = i_size_read(inode);
int err;
if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE))
@@ -145,7 +147,11 @@ int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
inode_lock(inode);
err = nfs42_proc_fallocate(&msg, filep, offset, len);
- if (err == -EOPNOTSUPP)
+
+ if (err == 0)
+ nfs_truncate_last_folio(inode->i_mapping, oldsize,
+ offset + len);
+ else if (err == -EOPNOTSUPP)
NFS_SERVER(inode)->caps &= ~(NFS_CAP_ALLOCATE |
NFS_CAP_ZERO_RANGE);
@@ -183,6 +189,7 @@ int nfs42_proc_zero_range(struct file *filep, loff_t offset, loff_t len)
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ZERO_RANGE],
};
struct inode *inode = file_inode(filep);
+ loff_t oldsize = i_size_read(inode);
int err;
if (!nfs_server_capable(inode, NFS_CAP_ZERO_RANGE))
@@ -191,9 +198,11 @@ int nfs42_proc_zero_range(struct file *filep, loff_t offset, loff_t len)
inode_lock(inode);
err = nfs42_proc_fallocate(&msg, filep, offset, len);
- if (err == 0)
+ if (err == 0) {
+ nfs_truncate_last_folio(inode->i_mapping, oldsize,
+ offset + len);
truncate_pagecache_range(inode, offset, (offset + len) -1);
- if (err == -EOPNOTSUPP)
+ } else if (err == -EOPNOTSUPP)
NFS_SERVER(inode)->caps &= ~NFS_CAP_ZERO_RANGE;
inode_unlock(inode);
@@ -354,22 +363,27 @@ out:
/**
* nfs42_copy_dest_done - perform inode cache updates after clone/copy offload
- * @inode: pointer to destination inode
+ * @file: pointer to destination file
* @pos: destination offset
* @len: copy length
+ * @oldsize: length of the file prior to clone/copy
*
* Punch a hole in the inode page cache, so that the NFS client will
* know to retrieve new data.
* Update the file size if necessary, and then mark the inode as having
* invalid cached values for change attribute, ctime, mtime and space used.
*/
-static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len)
+static void nfs42_copy_dest_done(struct file *file, loff_t pos, loff_t len,
+ loff_t oldsize)
{
+ struct inode *inode = file_inode(file);
+ struct address_space *mapping = file->f_mapping;
loff_t newsize = pos + len;
loff_t end = newsize - 1;
- WARN_ON_ONCE(invalidate_inode_pages2_range(inode->i_mapping,
- pos >> PAGE_SHIFT, end >> PAGE_SHIFT));
+ nfs_truncate_last_folio(mapping, oldsize, pos);
+ WARN_ON_ONCE(invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
+ end >> PAGE_SHIFT));
spin_lock(&inode->i_lock);
if (newsize > i_size_read(inode))
@@ -402,6 +416,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
struct nfs_server *src_server = NFS_SERVER(src_inode);
loff_t pos_src = args->src_pos;
loff_t pos_dst = args->dst_pos;
+ loff_t oldsize_dst = i_size_read(dst_inode);
size_t count = args->count;
ssize_t status;
@@ -430,6 +445,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
return status;
}
+ nfs_file_block_o_direct(NFS_I(dst_inode));
status = nfs_sync_inode(dst_inode);
if (status)
return status;
@@ -475,7 +491,7 @@ static ssize_t _nfs42_proc_copy(struct file *src,
goto out;
}
- nfs42_copy_dest_done(dst_inode, pos_dst, res->write_res.count);
+ nfs42_copy_dest_done(dst, pos_dst, res->write_res.count, oldsize_dst);
nfs_invalidate_atime(src_inode);
status = res->write_res.count;
out:
@@ -1242,6 +1258,7 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
struct nfs42_clone_res res = {
.server = server,
};
+ loff_t oldsize_dst = i_size_read(dst_inode);
int status;
msg->rpc_argp = &args;
@@ -1276,7 +1293,7 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
/* a zero-length count means clone to EOF in src */
if (count == 0 && res.dst_fattr->valid & NFS_ATTR_FATTR_SIZE)
count = nfs_size_to_loff_t(res.dst_fattr->size) - dst_offset;
- nfs42_copy_dest_done(dst_inode, dst_offset, count);
+ nfs42_copy_dest_done(dst_f, dst_offset, count, oldsize_dst);
status = nfs_post_op_update_inode(dst_inode, res.dst_fattr);
}
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 1d6b5f4230c9..c9a0d1e420c6 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -278,9 +278,11 @@ static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off,
lock_two_nondirectories(src_inode, dst_inode);
/* flush all pending writes on both src and dst so that server
* has the latest data */
+ nfs_file_block_o_direct(NFS_I(src_inode));
ret = nfs_sync_inode(src_inode);
if (ret)
goto out_unlock;
+ nfs_file_block_o_direct(NFS_I(dst_inode));
ret = nfs_sync_inode(dst_inode);
if (ret)
goto out_unlock;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 7d2b67e06cc3..ce61253efd45 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -4013,8 +4013,10 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
res.attr_bitmask[2];
}
memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
- server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS |
- NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL);
+ server->caps &=
+ ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS |
+ NFS_CAP_SECURITY_LABEL | NFS_CAP_FS_LOCATIONS |
+ NFS_CAP_OPEN_XOR | NFS_CAP_DELEGTIME);
server->fattr_valid = NFS_ATTR_FATTR_V4;
if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
@@ -4092,7 +4094,6 @@ int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
};
int err;
- nfs_server_set_init_caps(server);
do {
err = nfs4_handle_exception(server,
_nfs4_server_capabilities(server, fhandle),
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 96b1323318c2..627115179795 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -272,6 +272,7 @@ DECLARE_EVENT_CLASS(nfs_update_size_class,
TP_ARGS(inode, new_size))
DEFINE_NFS_UPDATE_SIZE_EVENT(truncate);
+DEFINE_NFS_UPDATE_SIZE_EVENT(truncate_folio);
DEFINE_NFS_UPDATE_SIZE_EVENT(wcc);
DEFINE_NFS_UPDATE_SIZE_EVENT(update);
DEFINE_NFS_UPDATE_SIZE_EVENT(grow);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 8b7c04737967..647c53d1418a 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -237,59 +237,17 @@ static void nfs_mapping_set_error(struct folio *folio, int error)
}
/*
- * nfs_page_group_search_locked
- * @head - head request of page group
- * @page_offset - offset into page
+ * nfs_page_covers_folio
+ * @req: struct nfs_page
*
- * Search page group with head @head to find a request that contains the
- * page offset @page_offset.
- *
- * Returns a pointer to the first matching nfs request, or NULL if no
- * match is found.
- *
- * Must be called with the page group lock held
- */
-static struct nfs_page *
-nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset)
-{
- struct nfs_page *req;
-
- req = head;
- do {
- if (page_offset >= req->wb_pgbase &&
- page_offset < (req->wb_pgbase + req->wb_bytes))
- return req;
-
- req = req->wb_this_page;
- } while (req != head);
-
- return NULL;
-}
-
-/*
- * nfs_page_group_covers_page
- * @head - head request of page group
- *
- * Return true if the page group with head @head covers the whole page,
- * returns false otherwise
+ * Return true if the request covers the whole folio.
+ * Note that the caller should ensure all subrequests have been joined
*/
static bool nfs_page_group_covers_page(struct nfs_page *req)
{
unsigned int len = nfs_folio_length(nfs_page_to_folio(req));
- struct nfs_page *tmp;
- unsigned int pos = 0;
-
- nfs_page_group_lock(req);
- for (;;) {
- tmp = nfs_page_group_search_locked(req->wb_head, pos);
- if (!tmp)
- break;
- pos = tmp->wb_pgbase + tmp->wb_bytes;
- }
-
- nfs_page_group_unlock(req);
- return pos >= len;
+ return req->wb_pgbase == 0 && req->wb_bytes == len;
}
/* We can set the PG_uptodate flag if we see that a write request
@@ -2045,6 +2003,7 @@ int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio)
* release it */
nfs_inode_remove_request(req);
nfs_unlock_and_release_request(req);
+ folio_cancel_dirty(folio);
}
return ret;
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 930150ed5db1..ef147e8b3271 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -706,6 +706,8 @@ out:
* it not only handles the fiemap for inlined files, but also deals
* with the fast symlink, cause they have no difference for extent
* mapping per se.
+ *
+ * Must be called with ip_alloc_sem semaphore held.
*/
static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
struct fiemap_extent_info *fieinfo,
@@ -717,6 +719,7 @@ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
u64 phys;
u32 flags = FIEMAP_EXTENT_DATA_INLINE|FIEMAP_EXTENT_LAST;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ lockdep_assert_held_read(&oi->ip_alloc_sem);
di = (struct ocfs2_dinode *)di_bh->b_data;
if (ocfs2_inode_is_fast_symlink(inode))
@@ -732,8 +735,11 @@ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
phys += offsetof(struct ocfs2_dinode,
id2.i_data.id_data);
+ /* Release the ip_alloc_sem to prevent deadlock on page fault */
+ up_read(&OCFS2_I(inode)->ip_alloc_sem);
ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count,
flags);
+ down_read(&OCFS2_I(inode)->ip_alloc_sem);
if (ret < 0)
return ret;
}
@@ -802,9 +808,11 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits;
phys_bytes = le64_to_cpu(rec.e_blkno) << osb->sb->s_blocksize_bits;
virt_bytes = (u64)le32_to_cpu(rec.e_cpos) << osb->s_clustersize_bits;
-
+ /* Release the ip_alloc_sem to prevent deadlock on page fault */
+ up_read(&OCFS2_I(inode)->ip_alloc_sem);
ret = fiemap_fill_next_extent(fieinfo, virt_bytes, phys_bytes,
len_bytes, fe_flags);
+ down_read(&OCFS2_I(inode)->ip_alloc_sem);
if (ret)
break;
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index bd0c099cfdd2..176281112273 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -393,7 +393,8 @@ struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
if (proc_alloc_inum(&dp->low_ino))
goto out_free_entry;
- pde_set_flags(dp);
+ if (!S_ISDIR(dp->mode))
+ pde_set_flags(dp);
write_lock(&proc_subdir_lock);
dp->parent = dir;
diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
index beb4f18f05ef..2337cf795db3 100644
--- a/fs/smb/client/cifs_debug.c
+++ b/fs/smb/client/cifs_debug.c
@@ -304,6 +304,8 @@ static int cifs_debug_dirs_proc_show(struct seq_file *m, void *v)
list_for_each(tmp1, &ses->tcon_list) {
tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
cfids = tcon->cfids;
+ if (!cfids)
+ continue;
spin_lock(&cfids->cfid_list_lock); /* check lock ordering */
seq_printf(m, "Num entries: %d\n", cfids->num_entries);
list_for_each_entry(cfid, &cfids->entries, entry) {
@@ -319,8 +321,6 @@ static int cifs_debug_dirs_proc_show(struct seq_file *m, void *v)
seq_printf(m, "\n");
}
spin_unlock(&cfids->cfid_list_lock);
-
-
}
}
}
@@ -347,6 +347,22 @@ static __always_inline const char *compression_alg_str(__le16 alg)
}
}
+static __always_inline const char *cipher_alg_str(__le16 cipher)
+{
+ switch (cipher) {
+ case SMB2_ENCRYPTION_AES128_CCM:
+ return "AES128-CCM";
+ case SMB2_ENCRYPTION_AES128_GCM:
+ return "AES128-GCM";
+ case SMB2_ENCRYPTION_AES256_CCM:
+ return "AES256-CCM";
+ case SMB2_ENCRYPTION_AES256_GCM:
+ return "AES256-GCM";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
{
struct mid_q_entry *mid_entry;
@@ -539,6 +555,11 @@ skip_rdma:
else
seq_puts(m, "disabled (not supported by this server)");
+ /* Show negotiated encryption cipher, even if not required */
+ seq_puts(m, "\nEncryption: ");
+ if (server->cipher_type)
+ seq_printf(m, "Negotiated cipher (%s)", cipher_alg_str(server->cipher_type));
+
seq_printf(m, "\n\n\tSessions: ");
i = 0;
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
@@ -576,12 +597,8 @@ skip_rdma:
/* dump session id helpful for use with network trace */
seq_printf(m, " SessionId: 0x%llx", ses->Suid);
- if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) {
+ if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
seq_puts(m, " encrypted");
- /* can help in debugging to show encryption type */
- if (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
- seq_puts(m, "(gcm256)");
- }
if (ses->sign)
seq_puts(m, " signed");
diff --git a/fs/smb/client/cifs_unicode.c b/fs/smb/client/cifs_unicode.c
index 4cc6e0896fad..f8659d36793f 100644
--- a/fs/smb/client/cifs_unicode.c
+++ b/fs/smb/client/cifs_unicode.c
@@ -629,6 +629,9 @@ cifs_strndup_to_utf16(const char *src, const int maxlen, int *utf16_len,
int len;
__le16 *dst;
+ if (!src)
+ return NULL;
+
len = cifs_local_to_utf16_bytes(src, maxlen, cp);
len += 2; /* NULL */
dst = kmalloc(len, GFP_KERNEL);
diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
index 7869cec58f52..10c84c095fe7 100644
--- a/fs/smb/client/reparse.c
+++ b/fs/smb/client/reparse.c
@@ -278,7 +278,7 @@ static int detect_directory_symlink_target(struct cifs_sb_info *cifs_sb,
}
/*
- * For absolute symlinks it is not possible to determinate
+ * For absolute symlinks it is not possible to determine
* if it should point to directory or file.
*/
if (symname[0] == '/') {
diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
index 893a1ea8c000..a02d41d1ce4a 100644
--- a/fs/smb/client/smb1ops.c
+++ b/fs/smb/client/smb1ops.c
@@ -1005,7 +1005,7 @@ smb_set_file_info(struct inode *inode, const char *full_path,
rc = -EOPNOTSUPP;
}
- /* Fallback to SMB_COM_SETATTR command when absolutelty needed. */
+ /* Fallback to SMB_COM_SETATTR command when absolutely needed. */
if (rc == -EOPNOTSUPP) {
cifs_dbg(FYI, "calling SetInformation since SetPathInfo for attrs/times not supported by this server\n");
rc = SMBSetInformation(xid, tcon, full_path,
@@ -1039,7 +1039,7 @@ set_via_filehandle:
cifsFileInfo_put(open_file);
/*
- * Setting the read-only bit is not honered on non-NT servers when done
+ * Setting the read-only bit is not honored on non-NT servers when done
* via open-semantics. So for setting it, use SMB_COM_SETATTR command.
* This command works only after the file is closed, so use it only when
* operation was called without the filehandle.
diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
index cddf273c14ae..89d933b4a8bc 100644
--- a/fs/smb/client/smb2misc.c
+++ b/fs/smb/client/smb2misc.c
@@ -614,6 +614,15 @@ smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server)
struct cifs_tcon *tcon;
struct cifs_pending_open *open;
+ /* Trace receipt of lease break request from server */
+ trace_smb3_lease_break_enter(le32_to_cpu(rsp->CurrentLeaseState),
+ le32_to_cpu(rsp->Flags),
+ le16_to_cpu(rsp->Epoch),
+ le32_to_cpu(rsp->hdr.Id.SyncId.TreeId),
+ le64_to_cpu(rsp->hdr.SessionId),
+ *((u64 *)rsp->LeaseKey),
+ *((u64 *)&rsp->LeaseKey[8]));
+
cifs_dbg(FYI, "Checking for lease break\n");
/* If server is a channel, select the primary channel */
@@ -660,10 +669,12 @@ smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server)
spin_unlock(&cifs_tcp_ses_lock);
cifs_dbg(FYI, "Can not process lease break - no lease matched\n");
trace_smb3_lease_not_found(le32_to_cpu(rsp->CurrentLeaseState),
- le32_to_cpu(rsp->hdr.Id.SyncId.TreeId),
- le64_to_cpu(rsp->hdr.SessionId),
- *((u64 *)rsp->LeaseKey),
- *((u64 *)&rsp->LeaseKey[8]));
+ le32_to_cpu(rsp->Flags),
+ le16_to_cpu(rsp->Epoch),
+ le32_to_cpu(rsp->hdr.Id.SyncId.TreeId),
+ le64_to_cpu(rsp->hdr.SessionId),
+ *((u64 *)rsp->LeaseKey),
+ *((u64 *)&rsp->LeaseKey[8]));
return false;
}
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 2df93a75e3b8..c3b9d3f6210f 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -6192,11 +6192,11 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
please_key_high = (__u64 *)(lease_key+8);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
- trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
+ trace_smb3_lease_ack_err(le32_to_cpu(lease_state), tcon->tid,
ses->Suid, *please_key_low, *please_key_high, rc);
cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
} else
- trace_smb3_lease_done(le32_to_cpu(lease_state), tcon->tid,
+ trace_smb3_lease_ack_done(le32_to_cpu(lease_state), tcon->tid,
ses->Suid, *please_key_low, *please_key_high);
return rc;
diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
index 93e5b2bb9f28..fe0e075bc63c 100644
--- a/fs/smb/client/trace.h
+++ b/fs/smb/client/trace.h
@@ -1171,8 +1171,54 @@ DEFINE_EVENT(smb3_lease_done_class, smb3_##name, \
__u64 lease_key_high), \
TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high))
-DEFINE_SMB3_LEASE_DONE_EVENT(lease_done);
-DEFINE_SMB3_LEASE_DONE_EVENT(lease_not_found);
+DEFINE_SMB3_LEASE_DONE_EVENT(lease_ack_done);
+/* Tracepoint when a lease break request is received/entered (includes epoch and flags) */
+DECLARE_EVENT_CLASS(smb3_lease_enter_class,
+ TP_PROTO(__u32 lease_state,
+ __u32 flags,
+ __u16 epoch,
+ __u32 tid,
+ __u64 sesid,
+ __u64 lease_key_low,
+ __u64 lease_key_high),
+ TP_ARGS(lease_state, flags, epoch, tid, sesid, lease_key_low, lease_key_high),
+ TP_STRUCT__entry(
+ __field(__u32, lease_state)
+ __field(__u32, flags)
+ __field(__u16, epoch)
+ __field(__u32, tid)
+ __field(__u64, sesid)
+ __field(__u64, lease_key_low)
+ __field(__u64, lease_key_high)
+ ),
+ TP_fast_assign(
+ __entry->lease_state = lease_state;
+ __entry->flags = flags;
+ __entry->epoch = epoch;
+ __entry->tid = tid;
+ __entry->sesid = sesid;
+ __entry->lease_key_low = lease_key_low;
+ __entry->lease_key_high = lease_key_high;
+ ),
+ TP_printk("sid=0x%llx tid=0x%x lease_key=0x%llx%llx lease_state=0x%x flags=0x%x epoch=%u",
+ __entry->sesid, __entry->tid, __entry->lease_key_high,
+ __entry->lease_key_low, __entry->lease_state, __entry->flags, __entry->epoch)
+)
+
+#define DEFINE_SMB3_LEASE_ENTER_EVENT(name) \
+DEFINE_EVENT(smb3_lease_enter_class, smb3_##name, \
+ TP_PROTO(__u32 lease_state, \
+ __u32 flags, \
+ __u16 epoch, \
+ __u32 tid, \
+ __u64 sesid, \
+ __u64 lease_key_low, \
+ __u64 lease_key_high), \
+ TP_ARGS(lease_state, flags, epoch, tid, sesid, lease_key_low, lease_key_high))
+
+DEFINE_SMB3_LEASE_ENTER_EVENT(lease_break_enter);
+/* Lease not found: reuse lease_enter payload (includes epoch and flags) */
+DEFINE_SMB3_LEASE_ENTER_EVENT(lease_not_found);
DECLARE_EVENT_CLASS(smb3_lease_err_class,
TP_PROTO(__u32 lease_state,
@@ -1213,7 +1259,7 @@ DEFINE_EVENT(smb3_lease_err_class, smb3_##name, \
int rc), \
TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high, rc))
-DEFINE_SMB3_LEASE_ERR_EVENT(lease_err);
+DEFINE_SMB3_LEASE_ERR_EVENT(lease_ack_err);
DECLARE_EVENT_CLASS(smb3_connect_class,
TP_PROTO(char *hostname,
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index fa4ffe037bc7..8720a0705900 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -18,23 +18,42 @@
#define KASAN_ABI_VERSION 5
/*
+ * Clang 22 added preprocessor macros to match GCC, in hopes of eventually
+ * dropping __has_feature support for sanitizers:
+ * https://github.com/llvm/llvm-project/commit/568c23bbd3303518c5056d7f03444dae4fdc8a9c
+ * Create these macros for older versions of clang so that it is easy to clean
+ * up once the minimum supported version of LLVM for building the kernel always
+ * creates these macros.
+ *
* Note: Checking __has_feature(*_sanitizer) is only true if the feature is
* enabled. Therefore it is not required to additionally check defined(CONFIG_*)
* to avoid adding redundant attributes in other configurations.
*/
+#if __has_feature(address_sanitizer) && !defined(__SANITIZE_ADDRESS__)
+#define __SANITIZE_ADDRESS__
+#endif
+#if __has_feature(hwaddress_sanitizer) && !defined(__SANITIZE_HWADDRESS__)
+#define __SANITIZE_HWADDRESS__
+#endif
+#if __has_feature(thread_sanitizer) && !defined(__SANITIZE_THREAD__)
+#define __SANITIZE_THREAD__
+#endif
-#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
-/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
+/*
+ * Treat __SANITIZE_HWADDRESS__ the same as __SANITIZE_ADDRESS__ in the kernel.
+ */
+#ifdef __SANITIZE_HWADDRESS__
#define __SANITIZE_ADDRESS__
+#endif
+
+#ifdef __SANITIZE_ADDRESS__
#define __no_sanitize_address \
__attribute__((no_sanitize("address", "hwaddress")))
#else
#define __no_sanitize_address
#endif
-#if __has_feature(thread_sanitizer)
-/* emulate gcc's __SANITIZE_THREAD__ flag */
-#define __SANITIZE_THREAD__
+#ifdef __SANITIZE_THREAD__
#define __no_sanitize_thread \
__attribute__((no_sanitize("thread")))
#else
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index b91b993f58ee..487b3bf2e1ea 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -83,6 +83,7 @@ extern ssize_t cpu_show_old_microcode(struct device *dev,
extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index 7fa1eb3cc823..61d50571ad88 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -171,6 +171,9 @@ int em_dev_update_perf_domain(struct device *dev,
int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
const struct em_data_callback *cb,
const cpumask_t *cpus, bool microwatts);
+int em_dev_register_pd_no_update(struct device *dev, unsigned int nr_states,
+ const struct em_data_callback *cb,
+ const cpumask_t *cpus, bool microwatts);
void em_dev_unregister_perf_domain(struct device *dev);
struct em_perf_table *em_table_alloc(struct em_perf_domain *pd);
void em_table_free(struct em_perf_table *table);
@@ -350,6 +353,13 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
{
return -EINVAL;
}
+static inline
+int em_dev_register_pd_no_update(struct device *dev, unsigned int nr_states,
+ const struct em_data_callback *cb,
+ const cpumask_t *cpus, bool microwatts)
+{
+ return -EINVAL;
+}
static inline void em_dev_unregister_perf_domain(struct device *dev)
{
}
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index de5bd76a400c..d7d757e72554 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -856,8 +856,8 @@ struct kernel_ethtool_ts_info {
enum hwtstamp_provider_qualifier phc_qualifier;
enum hwtstamp_source phc_source;
int phc_phyindex;
- enum hwtstamp_tx_types tx_types;
- enum hwtstamp_rx_filters rx_filters;
+ u32 tx_types;
+ u32 rx_filters;
};
/**
diff --git a/include/linux/fs.h b/include/linux/fs.h
index d7ab4f96d705..601d036a6c78 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -149,7 +149,8 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* Expect random access pattern */
#define FMODE_RANDOM ((__force fmode_t)(1 << 12))
-/* FMODE_* bit 13 */
+/* Supports IOCB_HAS_METADATA */
+#define FMODE_HAS_METADATA ((__force fmode_t)(1 << 13))
/* File is opened with O_PATH; almost nothing can be done with it */
#define FMODE_PATH ((__force fmode_t)(1 << 14))
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index e5a2096e022e..d350263f23f3 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -220,6 +220,12 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
#define IEEE80211_MAX_AID_S1G 8191
#define IEEE80211_MAX_TIM_LEN 251
#define IEEE80211_MAX_MESH_PEERINGS 63
+
+/* S1G encoding types */
+#define IEEE80211_S1G_TIM_ENC_MODE_BLOCK 0
+#define IEEE80211_S1G_TIM_ENC_MODE_SINGLE 1
+#define IEEE80211_S1G_TIM_ENC_MODE_OLB 2
+
/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
6.2.1.1.2.
@@ -4757,15 +4763,8 @@ static inline unsigned long ieee80211_tu_to_usec(unsigned long tu)
return 1024 * tu;
}
-/**
- * ieee80211_check_tim - check if AID bit is set in TIM
- * @tim: the TIM IE
- * @tim_len: length of the TIM IE
- * @aid: the AID to look for
- * Return: whether or not traffic is indicated in the TIM for the given AID
- */
-static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
- u8 tim_len, u16 aid)
+static inline bool __ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
+ u8 tim_len, u16 aid)
{
u8 mask;
u8 index, indexn1, indexn2;
@@ -4788,6 +4787,254 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
return !!(tim->virtual_map[index] & mask);
}
+struct s1g_tim_aid {
+ u16 aid;
+ u8 target_blk; /* Target block index */
+ u8 target_subblk; /* Target subblock index */
+ u8 target_subblk_bit; /* Target subblock bit */
+};
+
+struct s1g_tim_enc_block {
+ u8 enc_mode;
+ bool inverse;
+ const u8 *ptr;
+ u8 len;
+
+ /*
+ * For an OLB encoded block that spans multiple blocks, this
+ * is the offset into the span described by that encoded block.
+ */
+ u8 olb_blk_offset;
+};
+
+/*
+ * Helper routines to quickly extract the length of an encoded block. Validation
+ * is also performed to ensure the length extracted lies within the TIM.
+ */
+
+static inline int ieee80211_s1g_len_bitmap(const u8 *ptr, const u8 *end)
+{
+ u8 blkmap;
+ u8 n_subblks;
+
+ if (ptr >= end)
+ return -EINVAL;
+
+ blkmap = *ptr;
+ n_subblks = hweight8(blkmap);
+
+ if (ptr + 1 + n_subblks > end)
+ return -EINVAL;
+
+ return 1 + n_subblks;
+}
+
+static inline int ieee80211_s1g_len_single(const u8 *ptr, const u8 *end)
+{
+ return (ptr + 1 > end) ? -EINVAL : 1;
+}
+
+static inline int ieee80211_s1g_len_olb(const u8 *ptr, const u8 *end)
+{
+ if (ptr >= end)
+ return -EINVAL;
+
+ return (ptr + 1 + *ptr > end) ? -EINVAL : 1 + *ptr;
+}
+
+/*
+ * Enumerate all encoded blocks until we find the encoded block that describes
+ * our target AID. OLB is a special case as a single encoded block can describe
+ * multiple blocks as a single encoded block.
+ */
+static inline int ieee80211_s1g_find_target_block(struct s1g_tim_enc_block *enc,
+ const struct s1g_tim_aid *aid,
+ const u8 *ptr, const u8 *end)
+{
+ /* need at least block-control octet */
+ while (ptr + 1 <= end) {
+ u8 ctrl = *ptr++;
+ u8 mode = ctrl & 0x03;
+ bool contains, inverse = ctrl & BIT(2);
+ u8 span, blk_off = ctrl >> 3;
+ int len;
+
+ switch (mode) {
+ case IEEE80211_S1G_TIM_ENC_MODE_BLOCK:
+ len = ieee80211_s1g_len_bitmap(ptr, end);
+ contains = blk_off == aid->target_blk;
+ break;
+ case IEEE80211_S1G_TIM_ENC_MODE_SINGLE:
+ len = ieee80211_s1g_len_single(ptr, end);
+ contains = blk_off == aid->target_blk;
+ break;
+ case IEEE80211_S1G_TIM_ENC_MODE_OLB:
+ len = ieee80211_s1g_len_olb(ptr, end);
+ /*
+ * An OLB encoded block can describe more then one
+ * block, meaning an encoded OLB block can span more
+ * then a single block.
+ */
+ if (len > 0) {
+ /* Minus one for the length octet */
+ span = DIV_ROUND_UP(len - 1, 8);
+ /*
+ * Check if our target block lies within the
+ * block span described by this encoded block.
+ */
+ contains = (aid->target_blk >= blk_off) &&
+ (aid->target_blk < blk_off + span);
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (len < 0)
+ return len;
+
+ if (contains) {
+ enc->enc_mode = mode;
+ enc->inverse = inverse;
+ enc->ptr = ptr;
+ enc->len = (u8)len;
+ enc->olb_blk_offset = blk_off;
+ return 0;
+ }
+
+ ptr += len;
+ }
+
+ return -ENOENT;
+}
+
+static inline bool ieee80211_s1g_parse_bitmap(struct s1g_tim_enc_block *enc,
+ struct s1g_tim_aid *aid)
+{
+ const u8 *ptr = enc->ptr;
+ u8 blkmap = *ptr++;
+
+ /*
+ * If our block bitmap does not contain a set bit that corresponds
+ * to our AID, it could mean a variety of things depending on if
+ * the encoding mode is inverted or not.
+ *
+ * 1. If inverted, it means the entire subblock is present and hence
+ * our AID has been set.
+ * 2. If not inverted, it means our subblock is not present and hence
+ * it is all zero meaning our AID is not set.
+ */
+ if (!(blkmap & BIT(aid->target_subblk)))
+ return enc->inverse;
+
+ /*
+ * Increment ptr by the number of set subblocks that appear before our
+ * target subblock. If our target subblock is 0, do nothing as ptr
+ * already points to our target subblock.
+ */
+ if (aid->target_subblk)
+ ptr += hweight8(blkmap & GENMASK(aid->target_subblk - 1, 0));
+
+ return !!(*ptr & BIT(aid->target_subblk_bit)) ^ enc->inverse;
+}
+
+static inline bool ieee80211_s1g_parse_single(struct s1g_tim_enc_block *enc,
+ struct s1g_tim_aid *aid)
+{
+ /*
+ * Single AID mode describes, as the name suggests, a single AID
+ * within the block described by the encoded block. The octet
+ * contains the 6 LSBs of the AID described in the block. The other
+ * 2 bits are reserved. When inversed, every single AID described
+ * by the current block have buffered traffic except for the AID
+ * described in the single AID octet.
+ */
+ return ((*enc->ptr & 0x3f) == (aid->aid & 0x3f)) ^ enc->inverse;
+}
+
+static inline bool ieee80211_s1g_parse_olb(struct s1g_tim_enc_block *enc,
+ struct s1g_tim_aid *aid)
+{
+ const u8 *ptr = enc->ptr;
+ u8 blk_len = *ptr++;
+ /*
+ * Given an OLB encoded block that describes multiple blocks,
+ * calculate the offset into the span. Then calculate the
+ * subblock location normally.
+ */
+ u16 span_offset = aid->target_blk - enc->olb_blk_offset;
+ u16 subblk_idx = span_offset * 8 + aid->target_subblk;
+
+ if (subblk_idx >= blk_len)
+ return enc->inverse;
+
+ return !!(ptr[subblk_idx] & BIT(aid->target_subblk_bit)) ^ enc->inverse;
+}
+
+/*
+ * An S1G PVB has 3 non optional encoding types, each that can be inverted.
+ * An S1G PVB is constructed with zero or more encoded block subfields. Each
+ * encoded block represents a single "block" of AIDs (64), and each encoded
+ * block can contain one of the 3 encoding types alongside a single bit for
+ * whether the bits should be inverted.
+ *
+ * As the standard makes no guarantee about the ordering of encoded blocks,
+ * we must parse every encoded block in the worst case scenario given an
+ * AID that lies within the last block.
+ */
+static inline bool ieee80211_s1g_check_tim(const struct ieee80211_tim_ie *tim,
+ u8 tim_len, u16 aid)
+{
+ int err;
+ struct s1g_tim_aid target_aid;
+ struct s1g_tim_enc_block enc_blk;
+
+ if (tim_len < 3)
+ return false;
+
+ target_aid.aid = aid;
+ target_aid.target_blk = (aid >> 6) & 0x1f;
+ target_aid.target_subblk = (aid >> 3) & 0x7;
+ target_aid.target_subblk_bit = aid & 0x7;
+
+ /*
+ * Find our AIDs target encoded block and fill &enc_blk with the
+ * encoded blocks information. If no entry is found or an error
+ * occurs return false.
+ */
+ err = ieee80211_s1g_find_target_block(&enc_blk, &target_aid,
+ tim->virtual_map,
+ (const u8 *)tim + tim_len + 2);
+ if (err)
+ return false;
+
+ switch (enc_blk.enc_mode) {
+ case IEEE80211_S1G_TIM_ENC_MODE_BLOCK:
+ return ieee80211_s1g_parse_bitmap(&enc_blk, &target_aid);
+ case IEEE80211_S1G_TIM_ENC_MODE_SINGLE:
+ return ieee80211_s1g_parse_single(&enc_blk, &target_aid);
+ case IEEE80211_S1G_TIM_ENC_MODE_OLB:
+ return ieee80211_s1g_parse_olb(&enc_blk, &target_aid);
+ default:
+ return false;
+ }
+}
+
+/**
+ * ieee80211_check_tim - check if AID bit is set in TIM
+ * @tim: the TIM IE
+ * @tim_len: length of the TIM IE
+ * @aid: the AID to look for
+ * @s1g: whether the TIM is from an S1G PPDU
+ * Return: whether or not traffic is indicated in the TIM for the given AID
+ */
+static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
+ u8 tim_len, u16 aid, bool s1g)
+{
+ return s1g ? ieee80211_s1g_check_tim(tim, tim_len, aid) :
+ __ieee80211_check_tim(tim, tim_len, aid);
+}
+
/**
* ieee80211_get_tdls_action - get TDLS action code
* @skb: the skb containing the frame, length will not be checked
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 890011071f2b..fe5ce9215821 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -562,7 +562,7 @@ static inline void kasan_init_hw_tags(void) { }
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
-int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
+int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
void kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long free_region_start,
unsigned long free_region_end,
@@ -574,7 +574,7 @@ static inline void kasan_populate_early_vm_area_shadow(void *start,
unsigned long size)
{ }
static inline int kasan_populate_vmalloc(unsigned long start,
- unsigned long size)
+ unsigned long size, gfp_t gfp_mask)
{
return 0;
}
@@ -610,7 +610,7 @@ static __always_inline void kasan_poison_vmalloc(const void *start,
static inline void kasan_populate_early_vm_area_shadow(void *start,
unsigned long size) { }
static inline int kasan_populate_vmalloc(unsigned long start,
- unsigned long size)
+ unsigned long size, gfp_t gfp_mask)
{
return 0;
}
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index fe3d6d98f8da..673cbdf43453 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -77,7 +77,7 @@
#define SDIO_DEVICE_ID_BROADCOM_43439 0xa9af
#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
#define SDIO_DEVICE_ID_BROADCOM_43751 0xaae7
-#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752 0xaae8
+#define SDIO_DEVICE_ID_BROADCOM_43752 0xaae8
#define SDIO_VENDOR_ID_CYPRESS 0x04b4
#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439 0xbd3d
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index 6227a1bdefec..d17ff750c708 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -37,16 +37,6 @@ fixed_phy_register(const struct fixed_phy_status *status,
static inline void fixed_phy_unregister(struct phy_device *phydev)
{
}
-static inline int fixed_phy_set_link_update(struct phy_device *phydev,
- int (*link_update)(struct net_device *,
- struct fixed_phy_status *))
-{
- return -ENODEV;
-}
-static inline int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier)
-{
- return -EINVAL;
-}
#endif /* CONFIG_FIXED_PHY */
#endif /* __PHY_FIXED_H */
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index c27aac67cb3f..b8ae89ea28ab 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -76,6 +76,7 @@ struct tk_read_base {
* @cs_was_changed_seq: The sequence number of clocksource change events
* @clock_valid: Indicator for valid clock
* @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset
+ * @monotonic_to_aux: CLOCK_MONOTONIC to CLOCK_AUX offset
* @cycle_interval: Number of clock cycles in one NTP interval
* @xtime_interval: Number of clock shifted nano seconds in one NTP
* interval.
@@ -117,6 +118,9 @@ struct tk_read_base {
* @offs_aux is used by the auxiliary timekeepers which do not utilize any
* of the regular timekeeper offset fields.
*
+ * @monotonic_to_aux is a timespec64 representation of @offs_aux to
+ * accelerate the VDSO update for CLOCK_AUX.
+ *
* The cacheline ordering of the structure is optimized for in kernel usage of
* the ktime_get() and ktime_get_ts64() family of time accessors. Struct
* timekeeper is prepended in the core timekeeping code with a sequence count,
@@ -159,7 +163,10 @@ struct timekeeper {
u8 cs_was_changed_seq;
u8 clock_valid;
- struct timespec64 monotonic_to_boot;
+ union {
+ struct timespec64 monotonic_to_boot;
+ struct timespec64 monotonic_to_aux;
+ };
u64 cycle_interval;
u64 xtime_interval;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 406626ff6cc8..4072a67c9cc9 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -841,9 +841,12 @@ struct cfg80211_bitrate_mask {
u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
u16 vht_mcs[NL80211_VHT_NSS_MAX];
u16 he_mcs[NL80211_HE_NSS_MAX];
+ u16 eht_mcs[NL80211_EHT_NSS_MAX];
enum nl80211_txrate_gi gi;
enum nl80211_he_gi he_gi;
+ enum nl80211_eht_gi eht_gi;
enum nl80211_he_ltf he_ltf;
+ enum nl80211_eht_ltf eht_ltf;
} control[NUM_NL80211_BANDS];
};
@@ -2457,6 +2460,29 @@ struct mpath_info {
};
/**
+ * enum wiphy_bss_param_flags - bit positions for supported bss parameters.
+ *
+ * @WIPHY_BSS_PARAM_CTS_PROT: support changing CTS protection.
+ * @WIPHY_BSS_PARAM_SHORT_PREAMBLE: support changing short preamble usage.
+ * @WIPHY_BSS_PARAM_SHORT_SLOT_TIME: support changing short slot time usage.
+ * @WIPHY_BSS_PARAM_BASIC_RATES: support reconfiguring basic rates.
+ * @WIPHY_BSS_PARAM_AP_ISOLATE: support changing AP isolation.
+ * @WIPHY_BSS_PARAM_HT_OPMODE: support changing HT operating mode.
+ * @WIPHY_BSS_PARAM_P2P_CTWINDOW: support reconfiguring ctwindow.
+ * @WIPHY_BSS_PARAM_P2P_OPPPS: support changing P2P opportunistic power-save.
+ */
+enum wiphy_bss_param_flags {
+ WIPHY_BSS_PARAM_CTS_PROT = BIT(0),
+ WIPHY_BSS_PARAM_SHORT_PREAMBLE = BIT(1),
+ WIPHY_BSS_PARAM_SHORT_SLOT_TIME = BIT(2),
+ WIPHY_BSS_PARAM_BASIC_RATES = BIT(3),
+ WIPHY_BSS_PARAM_AP_ISOLATE = BIT(4),
+ WIPHY_BSS_PARAM_HT_OPMODE = BIT(5),
+ WIPHY_BSS_PARAM_P2P_CTWINDOW = BIT(6),
+ WIPHY_BSS_PARAM_P2P_OPPPS = BIT(7),
+};
+
+/**
* struct bss_parameters - BSS parameters
*
* Used to change BSS parameters (mainly for AP mode).
@@ -5782,6 +5808,11 @@ struct wiphy_radio {
* and probe responses. This value should be set if the driver
* wishes to limit the number of csa counters. Default (0) means
* infinite.
+ * @bss_param_support: bitmask indicating which bss_parameters as defined in
+ * &struct bss_parameters the driver can actually handle in the
+ * .change_bss() callback. The bit positions are defined in &enum
+ * wiphy_bss_param_flags.
+ *
* @bss_select_support: bitmask indicating the BSS selection criteria supported
* by the driver in the .connect() callback. The bit position maps to the
* attribute indices defined in &enum nl80211_bss_select_attr.
@@ -5967,6 +5998,7 @@ struct wiphy {
u8 max_num_csa_counters;
+ u32 bss_param_support;
u32 bss_select_support;
u8 nan_supported_bands;
@@ -9548,7 +9580,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
* @wiphy: the wiphy
* @chan: channel for which the supported radio index is required
*
- * Return: radio index on success or a negative error code
+ * Return: radio index on success or -EINVAL otherwise
*/
int cfg80211_get_radio_idx_by_chan(struct wiphy *wiphy,
const struct ieee80211_channel *chan);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index e2128663b160..fab7dc73f738 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -1914,7 +1914,6 @@ struct nftables_pernet {
struct mutex commit_mutex;
u64 table_handle;
u64 tstamp;
- unsigned int base_seq;
unsigned int gc_seq;
u8 validate_state;
struct work_struct destroy_work;
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index 7644cfe9267d..b8df5acbb723 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -109,17 +109,11 @@ nft_hash_lookup_fast(const struct net *net, const struct nft_set *set,
const struct nft_set_ext *
nft_hash_lookup(const struct net *net, const struct nft_set *set,
const u32 *key);
+#endif
+
const struct nft_set_ext *
nft_set_do_lookup(const struct net *net, const struct nft_set *set,
const u32 *key);
-#else
-static inline const struct nft_set_ext *
-nft_set_do_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key)
-{
- return set->ops->lookup(net, set, key);
-}
-#endif
/* called from nft_pipapo_avx2.c */
const struct nft_set_ext *
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index cc8060c017d5..99dd166c5d07 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -3,6 +3,7 @@
#define _NETNS_NFTABLES_H_
struct netns_nftables {
+ unsigned int base_seq;
u8 gencursor;
};
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 0fb7923b8367..277914c4d067 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1941,6 +1941,7 @@ tcp_md5_do_lookup_any_l3index(const struct sock *sk,
}
#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
+void tcp_md5_destruct_sock(struct sock *sk);
#else
static inline struct tcp_md5sig_key *
tcp_md5_do_lookup(const struct sock *sk, int l3index,
@@ -1957,6 +1958,9 @@ tcp_md5_do_lookup_any_l3index(const struct sock *sk,
}
#define tcp_twsk_md5_key(twsk) NULL
+static inline void tcp_md5_destruct_sock(struct sock *sk)
+{
+}
#endif
int tcp_md5_alloc_sigpool(void);
diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h
index df655ce6987d..1e9e27d6e06b 100644
--- a/include/net/tcp_ao.h
+++ b/include/net/tcp_ao.h
@@ -130,7 +130,6 @@ struct tcp_ao_info {
u32 snd_sne;
u32 rcv_sne;
refcount_t refcnt; /* Protects twsk destruction */
- struct rcu_head rcu;
};
#ifdef CONFIG_TCP_MD5SIG
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h
index 7cf7dbbfa131..89aed99bfeae 100644
--- a/include/pcmcia/ss.h
+++ b/include/pcmcia/ss.h
@@ -227,12 +227,8 @@ struct pcmcia_socket {
/* socket drivers must define the resource operations type they use. There
- * are three options:
+ * are two options:
* - pccard_static_ops iomem and ioport areas are assigned statically
- * - pccard_iodyn_ops iomem areas is assigned statically, ioport
- * areas dynamically
- * If this option is selected, use
- * "select PCCARD_IODYN" in Kconfig.
* - pccard_nonstatic_ops iomem and ioport areas are assigned dynamically.
* If this option is selected, use
* "select PCCARD_NONSTATIC" in Kconfig.
@@ -240,13 +236,11 @@ struct pcmcia_socket {
*/
extern struct pccard_resource_ops pccard_static_ops;
#if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE)
-extern struct pccard_resource_ops pccard_iodyn_ops;
extern struct pccard_resource_ops pccard_nonstatic_ops;
#else
/* If PCMCIA is not used, but only CARDBUS, these functions are not used
* at all. Therefore, do not use the large (240K!) rsrc_nonstatic module
*/
-#define pccard_iodyn_ops pccard_static_ops
#define pccard_nonstatic_ops pccard_static_ops
#endif
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 73876c0e2bba..e52f8207ab27 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -823,6 +823,8 @@ struct br_mcast_stats {
/* bridge boolean options
* BR_BOOLOPT_NO_LL_LEARN - disable learning from link-local packets
* BR_BOOLOPT_MCAST_VLAN_SNOOPING - control vlan multicast snooping
+ * BR_BOOLOPT_FDB_LOCAL_VLAN_0 - local FDB entries installed by the bridge
+ * driver itself should only be added on VLAN 0
*
* IMPORTANT: if adding a new option do not forget to handle
* it in br_boolopt_toggle/get and bridge sysfs
@@ -832,6 +834,7 @@ enum br_boolopt_id {
BR_BOOLOPT_MCAST_VLAN_SNOOPING,
BR_BOOLOPT_MST_ENABLE,
BR_BOOLOPT_MDB_OFFLOAD_FAIL_NOTIFICATION,
+ BR_BOOLOPT_FDB_LOCAL_VLAN_0,
BR_BOOLOPT_MAX
};
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index d1a14f2892d9..aed0b4c5d5e8 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -1943,8 +1943,9 @@ enum nl80211_commands {
* The driver must also specify support for this with the extended
* features NL80211_EXT_FEATURE_BEACON_RATE_LEGACY,
* NL80211_EXT_FEATURE_BEACON_RATE_HT,
- * NL80211_EXT_FEATURE_BEACON_RATE_VHT and
- * NL80211_EXT_FEATURE_BEACON_RATE_HE.
+ * NL80211_EXT_FEATURE_BEACON_RATE_VHT,
+ * NL80211_EXT_FEATURE_BEACON_RATE_HE and
+ * NL80211_EXT_FEATURE_BEACON_RATE_EHT.
*
* @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain
* at least one byte, currently used with @NL80211_CMD_REGISTER_FRAME.
@@ -2283,7 +2284,8 @@ enum nl80211_commands {
* @NL80211_ATTR_PEER_AID: Association ID for the peer TDLS station (u16).
* This is similar to @NL80211_ATTR_STA_AID but with a difference of being
* allowed to be used with the first @NL80211_CMD_SET_STATION command to
- * update a TDLS peer STA entry.
+ * update a TDLS peer STA entry. For S1G interfaces, this is limited to
+ * 1600 for the current mac80211 implementation.
*
* @NL80211_ATTR_COALESCE_RULE: Coalesce rule information.
*
@@ -2928,6 +2930,12 @@ enum nl80211_commands {
* required alongside this attribute. Refer to
* @enum nl80211_s1g_short_beacon_attrs for the attribute definitions.
*
+ * @NL80211_ATTR_BSS_PARAM: nested attribute used with %NL80211_CMD_GET_WIPHY
+ * which indicates which BSS parameters can be modified. The attribute can
+ * also be used as flag attribute by user-space in %NL80211_CMD_SET_BSS to
+ * indicate that it wants strict checking on the BSS parameters to be
+ * modified.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -3489,6 +3497,7 @@ enum nl80211_attrs {
NL80211_ATTR_S1G_LONG_BEACON_PERIOD,
NL80211_ATTR_S1G_SHORT_BEACON,
+ NL80211_ATTR_BSS_PARAM,
/* add attributes here, update the policy in nl80211.c */
@@ -3736,6 +3745,22 @@ enum nl80211_eht_gi {
};
/**
+ * enum nl80211_eht_ltf - EHT long training field
+ * @NL80211_RATE_INFO_EHT_1XLTF: 3.2 usec
+ * @NL80211_RATE_INFO_EHT_2XLTF: 6.4 usec
+ * @NL80211_RATE_INFO_EHT_4XLTF: 12.8 usec
+ * @NL80211_RATE_INFO_EHT_6XLTF: 19.2 usec
+ * @NL80211_RATE_INFO_EHT_8XLTF: 25.6 usec
+ */
+enum nl80211_eht_ltf {
+ NL80211_RATE_INFO_EHT_1XLTF,
+ NL80211_RATE_INFO_EHT_2XLTF,
+ NL80211_RATE_INFO_EHT_4XLTF,
+ NL80211_RATE_INFO_EHT_6XLTF,
+ NL80211_RATE_INFO_EHT_8XLTF,
+};
+
+/**
* enum nl80211_eht_ru_alloc - EHT RU allocation values
* @NL80211_RATE_INFO_EHT_RU_ALLOC_26: 26-tone RU allocation
* @NL80211_RATE_INFO_EHT_RU_ALLOC_52: 52-tone RU allocation
@@ -5481,6 +5506,10 @@ enum nl80211_key_attributes {
* see &struct nl80211_txrate_he
* @NL80211_TXRATE_HE_GI: configure HE GI, 0.8us, 1.6us and 3.2us.
* @NL80211_TXRATE_HE_LTF: configure HE LTF, 1XLTF, 2XLTF and 4XLTF.
+ * @NL80211_TXRATE_EHT: EHT rates allowed for TX rate selection,
+ * see &struct nl80211_txrate_eht
+ * @NL80211_TXRATE_EHT_GI: configure EHT GI, (u8, see &enum nl80211_eht_gi)
+ * @NL80211_TXRATE_EHT_LTF: configure EHT LTF, (u8, see &enum nl80211_eht_ltf)
* @__NL80211_TXRATE_AFTER_LAST: internal
* @NL80211_TXRATE_MAX: highest TX rate attribute
*/
@@ -5493,6 +5522,9 @@ enum nl80211_tx_rate_attributes {
NL80211_TXRATE_HE,
NL80211_TXRATE_HE_GI,
NL80211_TXRATE_HE_LTF,
+ NL80211_TXRATE_EHT,
+ NL80211_TXRATE_EHT_GI,
+ NL80211_TXRATE_EHT_LTF,
/* keep last */
__NL80211_TXRATE_AFTER_LAST,
@@ -5525,6 +5557,15 @@ enum nl80211_txrate_gi {
NL80211_TXRATE_FORCE_LGI,
};
+#define NL80211_EHT_NSS_MAX 16
+/**
+ * struct nl80211_txrate_eht - EHT MCS/NSS txrate bitmap
+ * @mcs: MCS bitmap table for each NSS (array index 0 for 1 stream, etc.)
+ */
+struct nl80211_txrate_eht {
+ __u16 mcs[NL80211_EHT_NSS_MAX];
+};
+
/**
* enum nl80211_band - Frequency band
* @NL80211_BAND_2GHZ: 2.4 GHz ISM band
@@ -6649,6 +6690,9 @@ enum nl80211_feature_flags {
* (signaling and payload protected) A-MSDUs and this shall be advertised
* in the RSNXE.
*
+ * @NL80211_EXT_FEATURE_BEACON_RATE_EHT: Driver supports beacon rate
+ * configuration (AP/mesh) with EHT rates.
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -6724,6 +6768,7 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_OWE_OFFLOAD_AP,
NL80211_EXT_FEATURE_DFS_CONCURRENT,
NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT,
+ NL80211_EXT_FEATURE_BEACON_RATE_EHT,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
diff --git a/init/Kconfig b/init/Kconfig
index d811cad02a75..e3eb63eadc87 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -146,6 +146,9 @@ config RUSTC_HAS_UNNECESSARY_TRANSMUTES
config RUSTC_HAS_FILE_WITH_NUL
def_bool RUSTC_VERSION >= 108900
+config RUSTC_HAS_FILE_AS_C_STR
+ def_bool RUSTC_VERSION >= 109100
+
config PAHOLE_VERSION
int
default $(shell,$(srctree)/scripts/pahole-version.sh $(PAHOLE))
diff --git a/init/main.c b/init/main.c
index 0ee0ee7b7c2c..5753e9539ae6 100644
--- a/init/main.c
+++ b/init/main.c
@@ -956,6 +956,7 @@ void start_kernel(void)
sort_main_extable();
trap_init();
mm_core_init();
+ maple_tree_init();
poking_init();
ftrace_init();
@@ -973,7 +974,6 @@ void start_kernel(void)
"Interrupts were enabled *very* early, fixing it\n"))
local_irq_disable();
radix_tree_init();
- maple_tree_init();
/*
* Set up housekeeping before setting up workqueues to allow the unbound
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 52a5b950b2e5..af5a54b5db12 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -886,6 +886,9 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
if (req->flags & REQ_F_HAS_METADATA) {
struct io_async_rw *io = req->async_data;
+ if (!(file->f_mode & FMODE_HAS_METADATA))
+ return -EINVAL;
+
/*
* We have a union of meta fields with wpq used for buffered-io
* in io_async_rw, so fail it here.
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index e3f42018ed46..f7708fe2c457 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1326,7 +1326,7 @@ int audit_compare_dname_path(const struct qstr *dname, const char *path, int par
/* handle trailing slashes */
pathlen -= parentlen;
- while (p[pathlen - 1] == '/')
+ while (pathlen > 0 && p[pathlen - 1] == '/')
pathlen--;
if (pathlen != dlen)
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 269c04a24664..f6cf8c2af5f7 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -62,3 +62,4 @@ CFLAGS_REMOVE_bpf_lru_list.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_queue_stack_maps.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_lpm_trie.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_ringbuf.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_rqspinlock.o = $(CC_FLAGS_FTRACE)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 5d1650af899d..e4568d44e827 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2366,8 +2366,7 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx,
const struct bpf_insn *insn)
{
/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
- * is not working properly, or interpreter is being used when
- * prog->jit_requested is not 0, so warn about it!
+ * is not working properly, so warn about it!
*/
WARN_ON_ONCE(1);
return 0;
@@ -2468,8 +2467,9 @@ out:
return ret;
}
-static void bpf_prog_select_func(struct bpf_prog *fp)
+static bool bpf_prog_select_interpreter(struct bpf_prog *fp)
{
+ bool select_interpreter = false;
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
u32 idx = (round_up(stack_depth, 32) / 32) - 1;
@@ -2478,15 +2478,16 @@ static void bpf_prog_select_func(struct bpf_prog *fp)
* But for non-JITed programs, we don't need bpf_func, so no bounds
* check needed.
*/
- if (!fp->jit_requested &&
- !WARN_ON_ONCE(idx >= ARRAY_SIZE(interpreters))) {
+ if (idx < ARRAY_SIZE(interpreters)) {
fp->bpf_func = interpreters[idx];
+ select_interpreter = true;
} else {
fp->bpf_func = __bpf_prog_ret0_warn;
}
#else
fp->bpf_func = __bpf_prog_ret0_warn;
#endif
+ return select_interpreter;
}
/**
@@ -2505,7 +2506,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
/* In case of BPF to BPF calls, verifier did all the prep
* work with regards to JITing, etc.
*/
- bool jit_needed = fp->jit_requested;
+ bool jit_needed = false;
if (fp->bpf_func)
goto finalize;
@@ -2514,7 +2515,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
bpf_prog_has_kfunc_call(fp))
jit_needed = true;
- bpf_prog_select_func(fp);
+ if (!bpf_prog_select_interpreter(fp))
+ jit_needed = true;
/* eBPF JITs can rewrite the program in case constant
* blinding is active. However, in case of error during
@@ -3024,7 +3026,10 @@ EXPORT_SYMBOL_GPL(bpf_event_output);
/* Always built-in helper functions. */
const struct bpf_func_proto bpf_tail_call_proto = {
- .func = NULL,
+ /* func is unused for tail_call, we set it to pass the
+ * get_helper_proto check
+ */
+ .func = BPF_PTR_POISON,
.gpl_only = false,
.ret_type = RET_VOID,
.arg1_type = ARG_PTR_TO_CTX,
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index b2b7b8ec2c2a..c46360b27871 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -186,7 +186,6 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
struct xdp_buff xdp;
int i, nframes = 0;
- xdp_set_return_frame_no_direct();
xdp.rxq = &rxq;
for (i = 0; i < n; i++) {
@@ -231,7 +230,6 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
}
}
- xdp_clear_return_frame_no_direct();
stats->pass += nframes;
return nframes;
@@ -255,6 +253,7 @@ static void cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
rcu_read_lock();
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
+ xdp_set_return_frame_no_direct();
ret->xdp_n = cpu_map_bpf_prog_run_xdp(rcpu, frames, ret->xdp_n, stats);
if (unlikely(ret->skb_n))
@@ -264,6 +263,7 @@ static void cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
if (stats->redirect)
xdp_do_flush();
+ xdp_clear_return_frame_no_direct();
bpf_net_ctx_clear(bpf_net_ctx);
rcu_read_unlock();
diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c
index 94854cd9c4cc..83c4d9943084 100644
--- a/kernel/bpf/crypto.c
+++ b/kernel/bpf/crypto.c
@@ -278,7 +278,7 @@ static int bpf_crypto_crypt(const struct bpf_crypto_ctx *ctx,
siv_len = siv ? __bpf_dynptr_size(siv) : 0;
src_len = __bpf_dynptr_size(src);
dst_len = __bpf_dynptr_size(dst);
- if (!src_len || !dst_len)
+ if (!src_len || !dst_len || src_len > dst_len)
return -EINVAL;
if (siv_len != ctx->siv_len)
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index cdffd74ddbe6..67d49059aebb 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1274,8 +1274,11 @@ static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u
goto out;
}
- /* allocate hrtimer via map_kmalloc to use memcg accounting */
- cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node);
+ /* Allocate via bpf_map_kmalloc_node() for memcg accounting. Until
+ * kmalloc_nolock() is available, avoid locking issues by using
+ * __GFP_HIGH (GFP_ATOMIC & ~__GFP_RECLAIM).
+ */
+ cb = bpf_map_kmalloc_node(map, size, __GFP_HIGH, map->numa_node);
if (!cb) {
ret = -ENOMEM;
goto out;
@@ -3675,10 +3678,17 @@ __bpf_kfunc int bpf_strnstr(const char *s1__ign, const char *s2__ign, size_t len
guard(pagefault)();
for (i = 0; i < XATTR_SIZE_MAX; i++) {
- for (j = 0; i + j < len && j < XATTR_SIZE_MAX; j++) {
+ for (j = 0; i + j <= len && j < XATTR_SIZE_MAX; j++) {
__get_kernel_nofault(&c2, s2__ign + j, char, err_out);
if (c2 == '\0')
return i;
+ /*
+ * We allow reading an extra byte from s2 (note the
+ * `i + j <= len` above) to cover the case when s2 is
+ * a suffix of the first len chars of s1.
+ */
+ if (i + j == len)
+ break;
__get_kernel_nofault(&c1, s1__ign + j, char, err_out);
if (c1 == '\0')
return -ENOENT;
diff --git a/kernel/bpf/rqspinlock.c b/kernel/bpf/rqspinlock.c
index 5ab354d55d82..a00561b1d3e5 100644
--- a/kernel/bpf/rqspinlock.c
+++ b/kernel/bpf/rqspinlock.c
@@ -471,7 +471,7 @@ queue:
* any MCS node. This is not the most elegant solution, but is
* simple enough.
*/
- if (unlikely(idx >= _Q_MAX_NODES)) {
+ if (unlikely(idx >= _Q_MAX_NODES || in_nmi())) {
lockevent_inc(lock_no_node);
RES_RESET_TIMEOUT(ts, RES_DEF_TIMEOUT);
while (!queued_spin_trylock(lock)) {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 5964bed40ffb..27f0bc28ee12 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -8552,6 +8552,10 @@ static int process_timer_func(struct bpf_verifier_env *env, int regno,
verifier_bug(env, "Two map pointers in a timer helper");
return -EFAULT;
}
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ verbose(env, "bpf_timer cannot be used for PREEMPT_RT.\n");
+ return -EOPNOTSUPP;
+ }
meta->map_uid = reg->map_uid;
meta->map_ptr = map;
return 0;
@@ -11359,7 +11363,7 @@ static int get_helper_proto(struct bpf_verifier_env *env, int func_id,
return -EINVAL;
*ptr = env->ops->get_func_proto(func_id, env->prog);
- return *ptr ? 0 : -EINVAL;
+ return *ptr && (*ptr)->func ? 0 : -EINVAL;
}
static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index e43c6de2bce4..b82399437db0 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -39,6 +39,7 @@ enum {
dma_debug_sg,
dma_debug_coherent,
dma_debug_resource,
+ dma_debug_noncoherent,
};
enum map_err_types {
@@ -141,6 +142,7 @@ static const char *type2name[] = {
[dma_debug_sg] = "scatter-gather",
[dma_debug_coherent] = "coherent",
[dma_debug_resource] = "resource",
+ [dma_debug_noncoherent] = "noncoherent",
};
static const char *dir2name[] = {
@@ -993,7 +995,8 @@ static void check_unmap(struct dma_debug_entry *ref)
"[mapped as %s] [unmapped as %s]\n",
ref->dev_addr, ref->size,
type2name[entry->type], type2name[ref->type]);
- } else if (entry->type == dma_debug_coherent &&
+ } else if ((entry->type == dma_debug_coherent ||
+ entry->type == dma_debug_noncoherent) &&
ref->paddr != entry->paddr) {
err_printk(ref->dev, entry, "device driver frees "
"DMA memory with different CPU address "
@@ -1581,6 +1584,49 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
}
}
+void debug_dma_alloc_pages(struct device *dev, struct page *page,
+ size_t size, int direction,
+ dma_addr_t dma_addr,
+ unsigned long attrs)
+{
+ struct dma_debug_entry *entry;
+
+ if (unlikely(dma_debug_disabled()))
+ return;
+
+ entry = dma_entry_alloc();
+ if (!entry)
+ return;
+
+ entry->type = dma_debug_noncoherent;
+ entry->dev = dev;
+ entry->paddr = page_to_phys(page);
+ entry->size = size;
+ entry->dev_addr = dma_addr;
+ entry->direction = direction;
+
+ add_dma_entry(entry, attrs);
+}
+
+void debug_dma_free_pages(struct device *dev, struct page *page,
+ size_t size, int direction,
+ dma_addr_t dma_addr)
+{
+ struct dma_debug_entry ref = {
+ .type = dma_debug_noncoherent,
+ .dev = dev,
+ .paddr = page_to_phys(page),
+ .dev_addr = dma_addr,
+ .size = size,
+ .direction = direction,
+ };
+
+ if (unlikely(dma_debug_disabled()))
+ return;
+
+ check_unmap(&ref);
+}
+
static int __init dma_debug_driver_setup(char *str)
{
int i;
diff --git a/kernel/dma/debug.h b/kernel/dma/debug.h
index f525197d3cae..48757ca13f31 100644
--- a/kernel/dma/debug.h
+++ b/kernel/dma/debug.h
@@ -54,6 +54,13 @@ extern void debug_dma_sync_sg_for_cpu(struct device *dev,
extern void debug_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sg,
int nelems, int direction);
+extern void debug_dma_alloc_pages(struct device *dev, struct page *page,
+ size_t size, int direction,
+ dma_addr_t dma_addr,
+ unsigned long attrs);
+extern void debug_dma_free_pages(struct device *dev, struct page *page,
+ size_t size, int direction,
+ dma_addr_t dma_addr);
#else /* CONFIG_DMA_API_DEBUG */
static inline void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
@@ -126,5 +133,18 @@ static inline void debug_dma_sync_sg_for_device(struct device *dev,
int nelems, int direction)
{
}
+
+static inline void debug_dma_alloc_pages(struct device *dev, struct page *page,
+ size_t size, int direction,
+ dma_addr_t dma_addr,
+ unsigned long attrs)
+{
+}
+
+static inline void debug_dma_free_pages(struct device *dev, struct page *page,
+ size_t size, int direction,
+ dma_addr_t dma_addr)
+{
+}
#endif /* CONFIG_DMA_API_DEBUG */
#endif /* _KERNEL_DMA_DEBUG_H */
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 107e4a4d251d..56de28a3b179 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -712,7 +712,7 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
if (page) {
trace_dma_alloc_pages(dev, page_to_virt(page), *dma_handle,
size, dir, gfp, 0);
- debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
+ debug_dma_alloc_pages(dev, page, size, dir, *dma_handle, 0);
} else {
trace_dma_alloc_pages(dev, NULL, 0, size, dir, gfp, 0);
}
@@ -738,7 +738,7 @@ void dma_free_pages(struct device *dev, size_t size, struct page *page,
dma_addr_t dma_handle, enum dma_data_direction dir)
{
trace_dma_free_pages(dev, page_to_virt(page), dma_handle, size, dir, 0);
- debug_dma_unmap_page(dev, dma_handle, size, dir);
+ debug_dma_free_pages(dev, page, size, dir, dma_handle);
__dma_free_pages(dev, size, page, dma_handle, dir);
}
EXPORT_SYMBOL_GPL(dma_free_pages);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 872122e074e5..820127536e62 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -10330,6 +10330,7 @@ static int __perf_event_overflow(struct perf_event *event,
ret = 1;
event->pending_kill = POLL_HUP;
perf_event_disable_inatomic(event);
+ event->pmu->stop(event, 0);
}
if (event->attr.sigtrap) {
diff --git a/kernel/fork.c b/kernel/fork.c
index af673856499d..c4ada32598bd 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -689,7 +689,6 @@ void __mmdrop(struct mm_struct *mm)
mm_pasid_drop(mm);
mm_destroy_cid(mm);
percpu_counter_destroy_many(mm->rss_stat, NR_MM_COUNTERS);
- futex_hash_free(mm);
free_mm(mm);
}
@@ -1138,6 +1137,7 @@ static inline void __mmput(struct mm_struct *mm)
if (mm->binfmt)
module_put(mm->binfmt->module);
lru_gen_del_mm(mm);
+ futex_hash_free(mm);
mmdrop(mm);
}
diff --git a/kernel/futex/core.c b/kernel/futex/core.c
index d9bb5567af0c..125804fbb5cb 100644
--- a/kernel/futex/core.c
+++ b/kernel/futex/core.c
@@ -1722,12 +1722,9 @@ int futex_mm_init(struct mm_struct *mm)
RCU_INIT_POINTER(mm->futex_phash, NULL);
mm->futex_phash_new = NULL;
/* futex-ref */
+ mm->futex_ref = NULL;
atomic_long_set(&mm->futex_atomic, 0);
mm->futex_batches = get_state_synchronize_rcu();
- mm->futex_ref = alloc_percpu(unsigned int);
- if (!mm->futex_ref)
- return -ENOMEM;
- this_cpu_inc(*mm->futex_ref); /* 0 -> 1 */
return 0;
}
@@ -1801,6 +1798,17 @@ static int futex_hash_allocate(unsigned int hash_slots, unsigned int flags)
}
}
+ if (!mm->futex_ref) {
+ /*
+ * This will always be allocated by the first thread and
+ * therefore requires no locking.
+ */
+ mm->futex_ref = alloc_percpu(unsigned int);
+ if (!mm->futex_ref)
+ return -ENOMEM;
+ this_cpu_inc(*mm->futex_ref); /* 0 -> 1 */
+ }
+
fph = kvzalloc(struct_size(fph, queues, hash_slots),
GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!fph)
diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
index ea7995a25780..8df55397414a 100644
--- a/kernel/power/energy_model.c
+++ b/kernel/power/energy_model.c
@@ -553,6 +553,30 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
const struct em_data_callback *cb,
const cpumask_t *cpus, bool microwatts)
{
+ int ret = em_dev_register_pd_no_update(dev, nr_states, cb, cpus, microwatts);
+
+ if (_is_cpu_device(dev))
+ em_check_capacity_update();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(em_dev_register_perf_domain);
+
+/**
+ * em_dev_register_pd_no_update() - Register a perf domain for a device
+ * @dev : Device to register the PD for
+ * @nr_states : Number of performance states in the new PD
+ * @cb : Callback functions for populating the energy model
+ * @cpus : CPUs to include in the new PD (mandatory if @dev is a CPU device)
+ * @microwatts : Whether or not the power values in the EM will be in uW
+ *
+ * Like em_dev_register_perf_domain(), but does not trigger a CPU capacity
+ * update after registering the PD, even if @dev is a CPU device.
+ */
+int em_dev_register_pd_no_update(struct device *dev, unsigned int nr_states,
+ const struct em_data_callback *cb,
+ const cpumask_t *cpus, bool microwatts)
+{
struct em_perf_table *em_table;
unsigned long cap, prev_cap = 0;
unsigned long flags = 0;
@@ -636,12 +660,9 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
unlock:
mutex_unlock(&em_pd_mutex);
- if (_is_cpu_device(dev))
- em_check_capacity_update();
-
return ret;
}
-EXPORT_SYMBOL_GPL(em_dev_register_perf_domain);
+EXPORT_SYMBOL_GPL(em_dev_register_pd_no_update);
/**
* em_dev_unregister_perf_domain() - Unregister Energy Model (EM) for a device
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 1f1f30cca573..2f66ab453823 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -449,6 +449,7 @@ int hibernation_snapshot(int platform_mode)
shrink_shmem_memory();
console_suspend_all();
+ pm_restrict_gfp_mask();
error = dpm_suspend(PMSG_FREEZE);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 059fa8b79be6..b6974fce800c 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -83,6 +83,12 @@ static inline bool tk_is_aux(const struct timekeeper *tk)
}
#endif
+static inline void tk_update_aux_offs(struct timekeeper *tk, ktime_t offs)
+{
+ tk->offs_aux = offs;
+ tk->monotonic_to_aux = ktime_to_timespec64(offs);
+}
+
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;
@@ -1506,7 +1512,7 @@ static int __timekeeping_inject_offset(struct tk_data *tkd, const struct timespe
timekeeping_restore_shadow(tkd);
return -EINVAL;
}
- tks->offs_aux = offs;
+ tk_update_aux_offs(tks, offs);
}
timekeeping_update_from_shadow(tkd, TK_UPDATE_ALL);
@@ -2937,7 +2943,7 @@ static int aux_clock_set(const clockid_t id, const struct timespec64 *tnew)
* xtime ("realtime") is not applicable for auxiliary clocks and
* kept in sync with "monotonic".
*/
- aux_tks->offs_aux = ktime_sub(timespec64_to_ktime(*tnew), tnow);
+ tk_update_aux_offs(aux_tks, ktime_sub(timespec64_to_ktime(*tnew), tnow));
timekeeping_update_from_shadow(aux_tkd, TK_UPDATE_ALL);
return 0;
diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c
index 8ba8b0d8a387..aa59919b8f2c 100644
--- a/kernel/time/vsyscall.c
+++ b/kernel/time/vsyscall.c
@@ -159,10 +159,10 @@ void vdso_time_update_aux(struct timekeeper *tk)
if (clock_mode != VDSO_CLOCKMODE_NONE) {
fill_clock_configuration(vc, &tk->tkr_mono);
- vdso_ts->sec = tk->xtime_sec;
+ vdso_ts->sec = tk->xtime_sec + tk->monotonic_to_aux.tv_sec;
nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
- nsec += tk->offs_aux;
+ nsec += tk->monotonic_to_aux.tv_nsec;
vdso_ts->sec += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
nsec = nsec << tk->tkr_mono.shift;
vdso_ts->nsec = nsec;
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 2a42c1036ea8..1e3b32b1e82c 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -1397,7 +1397,8 @@ error:
ftrace_graph_active--;
gops->saved_func = NULL;
fgraph_lru_release_index(i);
- unregister_pm_notifier(&ftrace_suspend_notifier);
+ if (!ftrace_graph_active)
+ unregister_pm_notifier(&ftrace_suspend_notifier);
}
return ret;
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1b7db732c0b1..b3c94fbaf002 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -834,7 +834,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
/* copy the current bits to the new max */
ret = trace_pid_list_first(filtered_pids, &pid);
while (!ret) {
- trace_pid_list_set(pid_list, pid);
+ ret = trace_pid_list_set(pid_list, pid);
+ if (ret < 0)
+ goto out;
+
ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
nr_pids++;
}
@@ -871,6 +874,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
trace_parser_clear(&parser);
ret = 0;
}
+ out:
trace_parser_put(&parser);
if (ret < 0) {
@@ -7209,7 +7213,7 @@ static ssize_t write_marker_to_buffer(struct trace_array *tr, const char __user
entry = ring_buffer_event_data(event);
entry->ip = ip;
- len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
+ len = copy_from_user_nofault(&entry->buf, ubuf, cnt);
if (len) {
memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
cnt = FAULTED_SIZE;
@@ -7306,7 +7310,7 @@ static ssize_t write_raw_marker_to_buffer(struct trace_array *tr,
entry = ring_buffer_event_data(event);
- len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
+ len = copy_from_user_nofault(&entry->id, ubuf, cnt);
if (len) {
entry->id = -1;
memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
index af42aaa3d172..2ab283fd3032 100644
--- a/kernel/trace/trace_events_user.c
+++ b/kernel/trace/trace_events_user.c
@@ -496,7 +496,7 @@ static bool user_event_enabler_queue_fault(struct user_event_mm *mm,
{
struct user_event_enabler_fault *fault;
- fault = kmem_cache_zalloc(fault_cache, GFP_NOWAIT | __GFP_NOWARN);
+ fault = kmem_cache_zalloc(fault_cache, GFP_NOWAIT);
if (!fault)
return false;
diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
index fd259da0aa64..337bc0eb5d71 100644
--- a/kernel/trace/trace_osnoise.c
+++ b/kernel/trace/trace_osnoise.c
@@ -2322,6 +2322,9 @@ osnoise_cpus_write(struct file *filp, const char __user *ubuf, size_t count,
int running, err;
char *buf __free(kfree) = NULL;
+ if (count < 1)
+ return 0;
+
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 106ee8b0f2d5..c2e0b469fd43 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -2111,6 +2111,10 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
if (!quota->ms && !quota->sz && list_empty(&quota->goals))
return;
+ /* First charge window */
+ if (!quota->total_charged_sz && !quota->charged_from)
+ quota->charged_from = jiffies;
+
/* New charge window starts */
if (time_after_eq(jiffies, quota->charged_from +
msecs_to_jiffies(quota->reset_interval))) {
diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
index 151a9de5ad8b..b5a5ed16a7a5 100644
--- a/mm/damon/lru_sort.c
+++ b/mm/damon/lru_sort.c
@@ -198,6 +198,11 @@ static int damon_lru_sort_apply_parameters(void)
if (err)
return err;
+ if (!damon_lru_sort_mon_attrs.sample_interval) {
+ err = -EINVAL;
+ goto out;
+ }
+
err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs);
if (err)
goto out;
diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
index 3c71b4596676..fb7c982a0018 100644
--- a/mm/damon/reclaim.c
+++ b/mm/damon/reclaim.c
@@ -194,6 +194,11 @@ static int damon_reclaim_apply_parameters(void)
if (err)
return err;
+ if (!damon_reclaim_mon_attrs.aggr_interval) {
+ err = -EINVAL;
+ goto out;
+ }
+
err = damon_set_attrs(param_ctx, &damon_reclaim_mon_attrs);
if (err)
goto out;
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index 6d2b0dab50cb..7b9254cadd5f 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -1260,14 +1260,18 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
{
struct damon_sysfs_kdamond *kdamond = container_of(kobj,
struct damon_sysfs_kdamond, kobj);
- struct damon_ctx *ctx = kdamond->damon_ctx;
- bool running;
+ struct damon_ctx *ctx;
+ bool running = false;
- if (!ctx)
- running = false;
- else
+ if (!mutex_trylock(&damon_sysfs_lock))
+ return -EBUSY;
+
+ ctx = kdamond->damon_ctx;
+ if (ctx)
running = damon_is_running(ctx);
+ mutex_unlock(&damon_sysfs_lock);
+
return sysfs_emit(buf, "%s\n", running ?
damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] :
damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 753f99b4c718..eed59cfb5d21 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5851,7 +5851,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
spinlock_t *ptl;
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
- bool adjust_reservation = false;
+ bool adjust_reservation;
unsigned long last_addr_mask;
bool force_flush = false;
@@ -5944,6 +5944,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
sz);
hugetlb_count_sub(pages_per_huge_page(h), mm);
hugetlb_remove_rmap(folio);
+ spin_unlock(ptl);
/*
* Restore the reservation for anonymous page, otherwise the
@@ -5951,14 +5952,16 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
* If there we are freeing a surplus, do not set the restore
* reservation bit.
*/
+ adjust_reservation = false;
+
+ spin_lock_irq(&hugetlb_lock);
if (!h->surplus_huge_pages && __vma_private_lock(vma) &&
folio_test_anon(folio)) {
folio_set_hugetlb_restore_reserve(folio);
/* Reservation to be adjusted after the spin lock */
adjust_reservation = true;
}
-
- spin_unlock(ptl);
+ spin_unlock_irq(&hugetlb_lock);
/*
* Adjust the reservation for the region that will have the
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index e2ceebf737ef..11d472a5c4e8 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -336,13 +336,13 @@ static void ___free_pages_bulk(struct page **pages, int nr_pages)
}
}
-static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
+static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask)
{
unsigned long nr_populated, nr_total = nr_pages;
struct page **page_array = pages;
while (nr_pages) {
- nr_populated = alloc_pages_bulk(GFP_KERNEL, nr_pages, pages);
+ nr_populated = alloc_pages_bulk(gfp_mask, nr_pages, pages);
if (!nr_populated) {
___free_pages_bulk(page_array, nr_total - nr_pages);
return -ENOMEM;
@@ -354,25 +354,42 @@ static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
return 0;
}
-static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
+static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask)
{
unsigned long nr_pages, nr_total = PFN_UP(end - start);
struct vmalloc_populate_data data;
+ unsigned int flags;
int ret = 0;
- data.pages = (struct page **)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ data.pages = (struct page **)__get_free_page(gfp_mask | __GFP_ZERO);
if (!data.pages)
return -ENOMEM;
while (nr_total) {
nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
- ret = ___alloc_pages_bulk(data.pages, nr_pages);
+ ret = ___alloc_pages_bulk(data.pages, nr_pages, gfp_mask);
if (ret)
break;
data.start = start;
+
+ /*
+ * page tables allocations ignore external gfp mask, enforce it
+ * by the scope API
+ */
+ if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
+ flags = memalloc_nofs_save();
+ else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
+ flags = memalloc_noio_save();
+
ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
kasan_populate_vmalloc_pte, &data);
+
+ if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
+ memalloc_nofs_restore(flags);
+ else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
+ memalloc_noio_restore(flags);
+
___free_pages_bulk(data.pages, nr_pages);
if (ret)
break;
@@ -386,7 +403,7 @@ static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
return ret;
}
-int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
+int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
{
unsigned long shadow_start, shadow_end;
int ret;
@@ -415,7 +432,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
shadow_start = PAGE_ALIGN_DOWN(shadow_start);
shadow_end = PAGE_ALIGN(shadow_end);
- ret = __kasan_populate_vmalloc(shadow_start, shadow_end);
+ ret = __kasan_populate_vmalloc(shadow_start, shadow_end, gfp_mask);
if (ret)
return ret;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 6b40bdfd224c..b486c1d19b2d 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1417,8 +1417,8 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
*/
if (cc->is_khugepaged &&
(pte_young(pteval) || folio_test_young(folio) ||
- folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
- address)))
+ folio_test_referenced(folio) ||
+ mmu_notifier_test_young(vma->vm_mm, _address)))
referenced++;
}
if (!writable) {
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index fc30ca4804bf..df6ee59527dd 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -956,7 +956,7 @@ static const char * const action_page_types[] = {
[MF_MSG_BUDDY] = "free buddy page",
[MF_MSG_DAX] = "dax page",
[MF_MSG_UNSPLIT_THP] = "unsplit thp",
- [MF_MSG_ALREADY_POISONED] = "already poisoned",
+ [MF_MSG_ALREADY_POISONED] = "already poisoned page",
[MF_MSG_UNKNOWN] = "unknown page",
};
@@ -1349,9 +1349,10 @@ static int action_result(unsigned long pfn, enum mf_action_page_type type,
{
trace_memory_failure_event(pfn, type, result);
- num_poisoned_pages_inc(pfn);
-
- update_per_node_mf_stats(pfn, result);
+ if (type != MF_MSG_ALREADY_POISONED) {
+ num_poisoned_pages_inc(pfn);
+ update_per_node_mf_stats(pfn, result);
+ }
pr_err("%#lx: recovery action for %s: %s\n",
pfn, action_page_types[type], action_name[result]);
@@ -2094,12 +2095,11 @@ retry:
*hugetlb = 0;
return 0;
} else if (res == -EHWPOISON) {
- pr_err("%#lx: already hardware poisoned\n", pfn);
if (flags & MF_ACTION_REQUIRED) {
folio = page_folio(p);
res = kill_accessing_process(current, folio_pfn(folio), flags);
- action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
}
+ action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);
return res;
} else if (res == -EBUSY) {
if (!(flags & MF_NO_RETRY)) {
@@ -2285,7 +2285,6 @@ try_again:
goto unlock_mutex;
if (TestSetPageHWPoison(p)) {
- pr_err("%#lx: already hardware poisoned\n", pfn);
res = -EHWPOISON;
if (flags & MF_ACTION_REQUIRED)
res = kill_accessing_process(current, pfn, flags);
@@ -2569,10 +2568,9 @@ int unpoison_memory(unsigned long pfn)
static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
- if (!pfn_valid(pfn))
- return -ENXIO;
-
- p = pfn_to_page(pfn);
+ p = pfn_to_online_page(pfn);
+ if (!p)
+ return -EIO;
folio = page_folio(p);
mutex_lock(&mf_mutex);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 1f15af712bc3..74318c787715 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1815,8 +1815,14 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1;
if (folio_contain_hwpoisoned_page(folio)) {
- if (WARN_ON(folio_test_lru(folio)))
- folio_isolate_lru(folio);
+ /*
+ * unmap_poisoned_folio() cannot handle large folios
+ * in all cases yet.
+ */
+ if (folio_test_large(folio) && !folio_test_hugetlb(folio))
+ goto put_folio;
+ if (folio_test_lru(folio) && !folio_isolate_lru(folio))
+ goto put_folio;
if (folio_mapped(folio)) {
folio_lock(folio);
unmap_poisoned_folio(folio, pfn, false);
diff --git a/mm/mremap.c b/mm/mremap.c
index e618a706aff5..35de0a7b910e 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -1774,15 +1774,18 @@ static unsigned long check_mremap_params(struct vma_remap_struct *vrm)
if (!vrm->new_len)
return -EINVAL;
- /* Is the new length or address silly? */
- if (vrm->new_len > TASK_SIZE ||
- vrm->new_addr > TASK_SIZE - vrm->new_len)
+ /* Is the new length silly? */
+ if (vrm->new_len > TASK_SIZE)
return -EINVAL;
/* Remainder of checks are for cases with specific new_addr. */
if (!vrm_implies_new_addr(vrm))
return 0;
+ /* Is the new address silly? */
+ if (vrm->new_addr > TASK_SIZE - vrm->new_len)
+ return -EINVAL;
+
/* The new address must be page-aligned. */
if (offset_in_page(vrm->new_addr))
return -EINVAL;
diff --git a/mm/percpu.c b/mm/percpu.c
index a56f35dcc417..81462ce5866e 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1734,7 +1734,7 @@ void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved,
bool is_atomic;
bool do_warn;
struct obj_cgroup *objcg = NULL;
- static int warn_limit = 10;
+ static atomic_t warn_limit = ATOMIC_INIT(10);
struct pcpu_chunk *chunk, *next;
const char *err;
int slot, off, cpu, ret;
@@ -1904,13 +1904,17 @@ fail_unlock:
fail:
trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
- if (do_warn && warn_limit) {
- pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
- size, align, is_atomic, err);
- if (!is_atomic)
- dump_stack();
- if (!--warn_limit)
- pr_info("limit reached, disable warning\n");
+ if (do_warn) {
+ int remaining = atomic_dec_if_positive(&warn_limit);
+
+ if (remaining >= 0) {
+ pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
+ size, align, is_atomic, err);
+ if (!is_atomic)
+ dump_stack();
+ if (remaining == 0)
+ pr_info("limit reached, disable warning\n");
+ }
}
if (is_atomic) {
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 6dbcdceecae1..5edd536ba9d2 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2026,6 +2026,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
if (unlikely(!vmap_initialized))
return ERR_PTR(-EBUSY);
+ /* Only reclaim behaviour flags are relevant. */
+ gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
might_sleep();
/*
@@ -2038,8 +2040,6 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
*/
va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
if (!va) {
- gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
-
va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
if (unlikely(!va))
return ERR_PTR(-ENOMEM);
@@ -2089,7 +2089,7 @@ retry:
BUG_ON(va->va_start < vstart);
BUG_ON(va->va_end > vend);
- ret = kasan_populate_vmalloc(addr, size);
+ ret = kasan_populate_vmalloc(addr, size, gfp_mask);
if (ret) {
free_vmap_area(va);
return ERR_PTR(ret);
@@ -4826,7 +4826,7 @@ retry:
/* populate the kasan shadow space */
for (area = 0; area < nr_vms; area++) {
- if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
+ if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area], GFP_KERNEL))
goto err_free_shadow;
}
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 1885d0c315f0..512872a2ef81 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -259,6 +259,23 @@ static struct notifier_block br_switchdev_blocking_notifier = {
.notifier_call = br_switchdev_blocking_event,
};
+static int
+br_toggle_fdb_local_vlan_0(struct net_bridge *br, bool on,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (br_opt_get(br, BROPT_FDB_LOCAL_VLAN_0) == on)
+ return 0;
+
+ err = br_fdb_toggle_local_vlan_0(br, on, extack);
+ if (err)
+ return err;
+
+ br_opt_toggle(br, BROPT_FDB_LOCAL_VLAN_0, on);
+ return 0;
+}
+
/* br_boolopt_toggle - change user-controlled boolean option
*
* @br: bridge device
@@ -287,6 +304,9 @@ int br_boolopt_toggle(struct net_bridge *br, enum br_boolopt_id opt, bool on,
case BR_BOOLOPT_MDB_OFFLOAD_FAIL_NOTIFICATION:
br_opt_toggle(br, BROPT_MDB_OFFLOAD_FAIL_NOTIFICATION, on);
break;
+ case BR_BOOLOPT_FDB_LOCAL_VLAN_0:
+ err = br_toggle_fdb_local_vlan_0(br, on, extack);
+ break;
default:
/* shouldn't be called with unsupported options */
WARN_ON(1);
@@ -307,6 +327,8 @@ int br_boolopt_get(const struct net_bridge *br, enum br_boolopt_id opt)
return br_opt_get(br, BROPT_MST_ENABLED);
case BR_BOOLOPT_MDB_OFFLOAD_FAIL_NOTIFICATION:
return br_opt_get(br, BROPT_MDB_OFFLOAD_FAIL_NOTIFICATION);
+ case BR_BOOLOPT_FDB_LOCAL_VLAN_0:
+ return br_opt_get(br, BROPT_FDB_LOCAL_VLAN_0);
default:
/* shouldn't be called with unsupported options */
WARN_ON(1);
@@ -324,6 +346,13 @@ int br_boolopt_multi_toggle(struct net_bridge *br,
int err = 0;
int opt_id;
+ opt_id = find_next_bit(&bitmap, BITS_PER_LONG, BR_BOOLOPT_MAX);
+ if (opt_id != BITS_PER_LONG) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unknown boolean option %d",
+ opt_id);
+ return -EINVAL;
+ }
+
for_each_set_bit(opt_id, &bitmap, BR_BOOLOPT_MAX) {
bool on = !!(bm->optval & BIT(opt_id));
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 902694c0ce64..58d22e2b85fc 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -459,6 +459,9 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
struct net_bridge_fdb_entry *f;
struct net_bridge *br = p->br;
struct net_bridge_vlan *v;
+ bool local_vlan_0;
+
+ local_vlan_0 = br_opt_get(br, BROPT_FDB_LOCAL_VLAN_0);
spin_lock_bh(&br->hash_lock);
vg = nbp_vlan_group(p);
@@ -468,11 +471,11 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
/* delete old one */
fdb_delete_local(br, p, f);
- /* if this port has no vlan information
- * configured, we can safely be done at
- * this point.
+ /* if this port has no vlan information configured, or
+ * local entries are only kept on VLAN 0, we can safely
+ * be done at this point.
*/
- if (!vg || !vg->num_vlans)
+ if (!vg || !vg->num_vlans || local_vlan_0)
goto insert;
}
}
@@ -481,7 +484,7 @@ insert:
/* insert new address, may fail if invalid address or dup. */
fdb_add_local(br, p, newaddr, 0);
- if (!vg || !vg->num_vlans)
+ if (!vg || !vg->num_vlans || local_vlan_0)
goto done;
/* Now add entries for every VLAN configured on the port.
@@ -500,6 +503,9 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
struct net_bridge_vlan_group *vg;
struct net_bridge_fdb_entry *f;
struct net_bridge_vlan *v;
+ bool local_vlan_0;
+
+ local_vlan_0 = br_opt_get(br, BROPT_FDB_LOCAL_VLAN_0);
spin_lock_bh(&br->hash_lock);
@@ -511,7 +517,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
fdb_add_local(br, NULL, newaddr, 0);
vg = br_vlan_group(br);
- if (!vg || !vg->num_vlans)
+ if (!vg || !vg->num_vlans || local_vlan_0)
goto out;
/* Now remove and add entries for every VLAN configured on the
* bridge. This function runs under RTNL so the bitmap will not
@@ -576,6 +582,102 @@ void br_fdb_cleanup(struct work_struct *work)
mod_delayed_work(system_long_wq, &br->gc_work, work_delay);
}
+static void br_fdb_delete_locals_per_vlan_port(struct net_bridge *br,
+ struct net_bridge_port *p)
+{
+ struct net_bridge_vlan_group *vg;
+ struct net_bridge_vlan *v;
+ struct net_device *dev;
+
+ if (p) {
+ vg = nbp_vlan_group(p);
+ dev = p->dev;
+ } else {
+ vg = br_vlan_group(br);
+ dev = br->dev;
+ }
+
+ list_for_each_entry(v, &vg->vlan_list, vlist)
+ br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
+}
+
+static void br_fdb_delete_locals_per_vlan(struct net_bridge *br)
+{
+ struct net_bridge_port *p;
+
+ ASSERT_RTNL();
+
+ list_for_each_entry(p, &br->port_list, list)
+ br_fdb_delete_locals_per_vlan_port(br, p);
+
+ br_fdb_delete_locals_per_vlan_port(br, NULL);
+}
+
+static int br_fdb_insert_locals_per_vlan_port(struct net_bridge *br,
+ struct net_bridge_port *p,
+ struct netlink_ext_ack *extack)
+{
+ struct net_bridge_vlan_group *vg;
+ struct net_bridge_vlan *v;
+ struct net_device *dev;
+ int err;
+
+ if (p) {
+ vg = nbp_vlan_group(p);
+ dev = p->dev;
+ } else {
+ vg = br_vlan_group(br);
+ dev = br->dev;
+ }
+
+ list_for_each_entry(v, &vg->vlan_list, vlist) {
+ if (!br_vlan_should_use(v))
+ continue;
+
+ err = br_fdb_add_local(br, p, dev->dev_addr, v->vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int br_fdb_insert_locals_per_vlan(struct net_bridge *br,
+ struct netlink_ext_ack *extack)
+{
+ struct net_bridge_port *p;
+ int err;
+
+ ASSERT_RTNL();
+
+ list_for_each_entry(p, &br->port_list, list) {
+ err = br_fdb_insert_locals_per_vlan_port(br, p, extack);
+ if (err)
+ goto rollback;
+ }
+
+ err = br_fdb_insert_locals_per_vlan_port(br, NULL, extack);
+ if (err)
+ goto rollback;
+
+ return 0;
+
+rollback:
+ NL_SET_ERR_MSG_MOD(extack, "fdb_local_vlan_0 toggle: FDB entry insertion failed");
+ br_fdb_delete_locals_per_vlan(br);
+ return err;
+}
+
+int br_fdb_toggle_local_vlan_0(struct net_bridge *br, bool on,
+ struct netlink_ext_ack *extack)
+{
+ if (!on)
+ return br_fdb_insert_locals_per_vlan(br, extack);
+
+ br_fdb_delete_locals_per_vlan(br);
+ return 0;
+}
+
static bool __fdb_flush_matches(const struct net_bridge *br,
const struct net_bridge_fdb_entry *f,
const struct net_bridge_fdb_flush_desc *desc)
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 5f6ac9bf1527..67b4c905e49a 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -202,6 +202,14 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
break;
case BR_PKT_UNICAST:
dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid);
+ if (unlikely(!dst && vid &&
+ br_opt_get(br, BROPT_FDB_LOCAL_VLAN_0))) {
+ dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, 0);
+ if (dst &&
+ (!test_bit(BR_FDB_LOCAL, &dst->flags) ||
+ test_bit(BR_FDB_ADDED_BY_USER, &dst->flags)))
+ dst = NULL;
+ }
break;
default:
break;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 8de0904b9627..16be5d250402 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -487,6 +487,7 @@ enum net_bridge_opts {
BROPT_MCAST_VLAN_SNOOPING_ENABLED,
BROPT_MST_ENABLED,
BROPT_MDB_OFFLOAD_FAIL_NOTIFICATION,
+ BROPT_FDB_LOCAL_VLAN_0,
};
struct net_bridge {
@@ -843,6 +844,8 @@ void br_fdb_find_delete_local(struct net_bridge *br,
void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr);
void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
void br_fdb_cleanup(struct work_struct *work);
+int br_fdb_toggle_local_vlan_0(struct net_bridge *br, bool on,
+ struct netlink_ext_ack *extack);
void br_fdb_delete_by_port(struct net_bridge *br,
const struct net_bridge_port *p, u16 vid, int do_all);
struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 939a3aa78d5c..ae911220cb3c 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -331,10 +331,12 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
/* Add the dev mac and count the vlan only if it's usable */
if (br_vlan_should_use(v)) {
- err = br_fdb_add_local(br, p, dev->dev_addr, v->vid);
- if (err) {
- br_err(br, "failed insert local address into bridge forwarding table\n");
- goto out_filt;
+ if (!br_opt_get(br, BROPT_FDB_LOCAL_VLAN_0)) {
+ err = br_fdb_add_local(br, p, dev->dev_addr, v->vid);
+ if (err) {
+ br_err(br, "failed insert local address into bridge forwarding table\n");
+ goto out_filt;
+ }
}
vg->num_vlans++;
}
diff --git a/net/can/j1939/bus.c b/net/can/j1939/bus.c
index 39844f14eed8..797719cb227e 100644
--- a/net/can/j1939/bus.c
+++ b/net/can/j1939/bus.c
@@ -290,8 +290,11 @@ int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa)
if (!ecu)
ecu = j1939_ecu_create_locked(priv, name);
err = PTR_ERR_OR_ZERO(ecu);
- if (err)
+ if (err) {
+ if (j1939_address_is_unicast(sa))
+ priv->ents[sa].nusers--;
goto done;
+ }
ecu->nusers++;
/* TODO: do we care if ecu->addr != sa? */
diff --git a/net/can/j1939/j1939-priv.h b/net/can/j1939/j1939-priv.h
index 31a93cae5111..81f58924b4ac 100644
--- a/net/can/j1939/j1939-priv.h
+++ b/net/can/j1939/j1939-priv.h
@@ -212,6 +212,7 @@ void j1939_priv_get(struct j1939_priv *priv);
/* notify/alert all j1939 sockets bound to ifindex */
void j1939_sk_netdev_event_netdown(struct j1939_priv *priv);
+void j1939_sk_netdev_event_unregister(struct j1939_priv *priv);
int j1939_cancel_active_session(struct j1939_priv *priv, struct sock *sk);
void j1939_tp_init(struct j1939_priv *priv);
diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
index 7e8a20f2fc42..3706a872ecaf 100644
--- a/net/can/j1939/main.c
+++ b/net/can/j1939/main.c
@@ -377,6 +377,9 @@ static int j1939_netdev_notify(struct notifier_block *nb,
j1939_sk_netdev_event_netdown(priv);
j1939_ecu_unmap_all(priv);
break;
+ case NETDEV_UNREGISTER:
+ j1939_sk_netdev_event_unregister(priv);
+ break;
}
j1939_priv_put(priv);
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index 3d8b588822f9..88e7160d4248 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -521,6 +521,9 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa);
if (ret) {
j1939_netdev_stop(priv);
+ jsk->priv = NULL;
+ synchronize_rcu();
+ j1939_priv_put(priv);
goto out_release_sock;
}
@@ -1300,6 +1303,55 @@ void j1939_sk_netdev_event_netdown(struct j1939_priv *priv)
read_unlock_bh(&priv->j1939_socks_lock);
}
+void j1939_sk_netdev_event_unregister(struct j1939_priv *priv)
+{
+ struct sock *sk;
+ struct j1939_sock *jsk;
+ bool wait_rcu = false;
+
+rescan: /* The caller is holding a ref on this "priv" via j1939_priv_get_by_ndev(). */
+ read_lock_bh(&priv->j1939_socks_lock);
+ list_for_each_entry(jsk, &priv->j1939_socks, list) {
+ /* Skip if j1939_jsk_add() is not called on this socket. */
+ if (!(jsk->state & J1939_SOCK_BOUND))
+ continue;
+ sk = &jsk->sk;
+ sock_hold(sk);
+ read_unlock_bh(&priv->j1939_socks_lock);
+ /* Check if j1939_jsk_del() is not yet called on this socket after holding
+ * socket's lock, for both j1939_sk_bind() and j1939_sk_release() call
+ * j1939_jsk_del() with socket's lock held.
+ */
+ lock_sock(sk);
+ if (jsk->state & J1939_SOCK_BOUND) {
+ /* Neither j1939_sk_bind() nor j1939_sk_release() called j1939_jsk_del().
+ * Make this socket no longer bound, by pretending as if j1939_sk_bind()
+ * dropped old references but did not get new references.
+ */
+ j1939_jsk_del(priv, jsk);
+ j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa);
+ j1939_netdev_stop(priv);
+ /* Call j1939_priv_put() now and prevent j1939_sk_sock_destruct() from
+ * calling the corresponding j1939_priv_put().
+ *
+ * j1939_sk_sock_destruct() is supposed to call j1939_priv_put() after
+ * an RCU grace period. But since the caller is holding a ref on this
+ * "priv", we can defer synchronize_rcu() until immediately before
+ * the caller calls j1939_priv_put().
+ */
+ j1939_priv_put(priv);
+ jsk->priv = NULL;
+ wait_rcu = true;
+ }
+ release_sock(sk);
+ sock_put(sk);
+ goto rescan;
+ }
+ read_unlock_bh(&priv->j1939_socks_lock);
+ if (wait_rcu)
+ synchronize_rcu();
+}
+
static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 9c0ad7f4b5d8..ad54b12d4b4c 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -464,8 +464,15 @@ int generic_hwtstamp_get_lower(struct net_device *dev,
if (!netif_device_present(dev))
return -ENODEV;
- if (ops->ndo_hwtstamp_get)
- return dev_get_hwtstamp_phylib(dev, kernel_cfg);
+ if (ops->ndo_hwtstamp_get) {
+ int err;
+
+ netdev_lock_ops(dev);
+ err = dev_get_hwtstamp_phylib(dev, kernel_cfg);
+ netdev_unlock_ops(dev);
+
+ return err;
+ }
/* Legacy path: unconverted lower driver */
return generic_hwtstamp_ioctl_lower(dev, SIOCGHWTSTAMP, kernel_cfg);
@@ -481,8 +488,15 @@ int generic_hwtstamp_set_lower(struct net_device *dev,
if (!netif_device_present(dev))
return -ENODEV;
- if (ops->ndo_hwtstamp_set)
- return dev_set_hwtstamp_phylib(dev, kernel_cfg, extack);
+ if (ops->ndo_hwtstamp_set) {
+ int err;
+
+ netdev_lock_ops(dev);
+ err = dev_set_hwtstamp_phylib(dev, kernel_cfg, extack);
+ netdev_unlock_ops(dev);
+
+ return err;
+ }
/* Legacy path: unconverted lower driver */
return generic_hwtstamp_ioctl_lower(dev, SIOCSHWTSTAMP, kernel_cfg);
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 88657255fec1..fbbc3ccf9df6 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -49,7 +49,7 @@ static bool hsr_check_carrier(struct hsr_port *master)
ASSERT_RTNL();
- hsr_for_each_port(master->hsr, port) {
+ hsr_for_each_port_rtnl(master->hsr, port) {
if (port->type != HSR_PT_MASTER && is_slave_up(port->dev)) {
netif_carrier_on(master->dev);
return true;
@@ -105,7 +105,7 @@ int hsr_get_max_mtu(struct hsr_priv *hsr)
struct hsr_port *port;
mtu_max = ETH_DATA_LEN;
- hsr_for_each_port(hsr, port)
+ hsr_for_each_port_rtnl(hsr, port)
if (port->type != HSR_PT_MASTER)
mtu_max = min(port->dev->mtu, mtu_max);
@@ -139,7 +139,7 @@ static int hsr_dev_open(struct net_device *dev)
hsr = netdev_priv(dev);
- hsr_for_each_port(hsr, port) {
+ hsr_for_each_port_rtnl(hsr, port) {
if (port->type == HSR_PT_MASTER)
continue;
switch (port->type) {
@@ -172,7 +172,7 @@ static int hsr_dev_close(struct net_device *dev)
struct hsr_priv *hsr;
hsr = netdev_priv(dev);
- hsr_for_each_port(hsr, port) {
+ hsr_for_each_port_rtnl(hsr, port) {
if (port->type == HSR_PT_MASTER)
continue;
switch (port->type) {
@@ -205,7 +205,7 @@ static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr,
* may become enabled.
*/
features &= ~NETIF_F_ONE_FOR_ALL;
- hsr_for_each_port(hsr, port)
+ hsr_for_each_port_rtnl(hsr, port)
features = netdev_increment_features(features,
port->dev->features,
mask);
@@ -226,6 +226,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
struct hsr_priv *hsr = netdev_priv(dev);
struct hsr_port *master;
+ rcu_read_lock();
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
if (master) {
skb->dev = master->dev;
@@ -238,6 +239,8 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
dev_core_stats_tx_dropped_inc(dev);
dev_kfree_skb_any(skb);
}
+ rcu_read_unlock();
+
return NETDEV_TX_OK;
}
@@ -484,7 +487,7 @@ static void hsr_set_rx_mode(struct net_device *dev)
hsr = netdev_priv(dev);
- hsr_for_each_port(hsr, port) {
+ hsr_for_each_port_rtnl(hsr, port) {
if (port->type == HSR_PT_MASTER)
continue;
switch (port->type) {
@@ -506,7 +509,7 @@ static void hsr_change_rx_flags(struct net_device *dev, int change)
hsr = netdev_priv(dev);
- hsr_for_each_port(hsr, port) {
+ hsr_for_each_port_rtnl(hsr, port) {
if (port->type == HSR_PT_MASTER)
continue;
switch (port->type) {
@@ -534,7 +537,7 @@ static int hsr_ndo_vlan_rx_add_vid(struct net_device *dev,
hsr = netdev_priv(dev);
- hsr_for_each_port(hsr, port) {
+ hsr_for_each_port_rtnl(hsr, port) {
if (port->type == HSR_PT_MASTER ||
port->type == HSR_PT_INTERLINK)
continue;
@@ -580,7 +583,7 @@ static int hsr_ndo_vlan_rx_kill_vid(struct net_device *dev,
hsr = netdev_priv(dev);
- hsr_for_each_port(hsr, port) {
+ hsr_for_each_port_rtnl(hsr, port) {
switch (port->type) {
case HSR_PT_SLAVE_A:
case HSR_PT_SLAVE_B:
@@ -672,9 +675,14 @@ struct net_device *hsr_get_port_ndev(struct net_device *ndev,
struct hsr_priv *hsr = netdev_priv(ndev);
struct hsr_port *port;
+ rcu_read_lock();
hsr_for_each_port(hsr, port)
- if (port->type == pt)
+ if (port->type == pt) {
+ dev_hold(port->dev);
+ rcu_read_unlock();
return port->dev;
+ }
+ rcu_read_unlock();
return NULL;
}
EXPORT_SYMBOL(hsr_get_port_ndev);
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
index 192893c3f2ec..bc94b07101d8 100644
--- a/net/hsr/hsr_main.c
+++ b/net/hsr/hsr_main.c
@@ -22,7 +22,7 @@ static bool hsr_slave_empty(struct hsr_priv *hsr)
{
struct hsr_port *port;
- hsr_for_each_port(hsr, port)
+ hsr_for_each_port_rtnl(hsr, port)
if (port->type != HSR_PT_MASTER)
return false;
return true;
@@ -134,7 +134,7 @@ struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt)
{
struct hsr_port *port;
- hsr_for_each_port(hsr, port)
+ hsr_for_each_port_rtnl(hsr, port)
if (port->type == pt)
return port;
return NULL;
diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
index 135ec5fce019..33b0d2460c9b 100644
--- a/net/hsr/hsr_main.h
+++ b/net/hsr/hsr_main.h
@@ -224,6 +224,9 @@ struct hsr_priv {
#define hsr_for_each_port(hsr, port) \
list_for_each_entry_rcu((port), &(hsr)->ports, port_list)
+#define hsr_for_each_port_rtnl(hsr, port) \
+ list_for_each_entry_rcu((port), &(hsr)->ports, port_list, lockdep_rtnl_is_held())
+
struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt);
/* Caller must ensure skb is a valid HSR frame */
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index cc9915543637..2e61ac137128 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -206,6 +206,9 @@ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu)
if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
return -EINVAL;
+ if (skb_is_gso(skb))
+ skb_gso_reset(skb);
+
skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
pskb_pull(skb, ETH_HLEN);
skb_reset_network_header(skb);
@@ -300,6 +303,9 @@ static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu)
if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
return -EINVAL;
+ if (skb_is_gso(skb))
+ skb_gso_reset(skb);
+
skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN);
pskb_pull(skb, ETH_HLEN);
skb_reset_network_header(skb);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 588932c3cf1d..7f9c671b1ee0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -412,6 +412,22 @@ static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp)
return rate64;
}
+#ifdef CONFIG_TCP_MD5SIG
+void tcp_md5_destruct_sock(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (tp->md5sig_info) {
+
+ tcp_clear_md5_list(sk);
+ kfree(rcu_replace_pointer(tp->md5sig_info, NULL, 1));
+ static_branch_slow_dec_deferred(&tcp_md5_needed);
+ tcp_md5_release_sigpool();
+ }
+}
+EXPORT_IPV6_MOD_GPL(tcp_md5_destruct_sock);
+#endif
+
/* Address-family independent initialization for a tcp_sock.
*
* NOTE: A lot of things set to zero explicitly by call to
@@ -2818,9 +2834,9 @@ found_ok_skb:
err = tcp_recvmsg_dmabuf(sk, skb, offset, msg,
used);
- if (err <= 0) {
+ if (err < 0) {
if (!copied)
- copied = -EFAULT;
+ copied = err;
break;
}
diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
index bbb8d5f0eae7..31302be78bc4 100644
--- a/net/ipv4/tcp_ao.c
+++ b/net/ipv4/tcp_ao.c
@@ -268,9 +268,8 @@ static void tcp_ao_key_free_rcu(struct rcu_head *head)
kfree_sensitive(key);
}
-static void tcp_ao_info_free_rcu(struct rcu_head *head)
+static void tcp_ao_info_free(struct tcp_ao_info *ao)
{
- struct tcp_ao_info *ao = container_of(head, struct tcp_ao_info, rcu);
struct tcp_ao_key *key;
struct hlist_node *n;
@@ -310,7 +309,7 @@ void tcp_ao_destroy_sock(struct sock *sk, bool twsk)
if (!twsk)
tcp_ao_sk_omem_free(sk, ao);
- call_rcu(&ao->rcu, tcp_ao_info_free_rcu);
+ tcp_ao_info_free(ao);
}
void tcp_ao_time_wait(struct tcp_timewait_sock *tcptw, struct tcp_sock *tp)
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index ba581785adb4..a268e1595b22 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -408,8 +408,11 @@ more_data:
if (!psock->cork) {
psock->cork = kzalloc(sizeof(*psock->cork),
GFP_ATOMIC | __GFP_NOWARN);
- if (!psock->cork)
+ if (!psock->cork) {
+ sk_msg_free(sk, msg);
+ *copied = 0;
return -ENOMEM;
+ }
}
memcpy(psock->cork, msg, sizeof(*msg));
return 0;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 1e58a8a9ff7a..2a0602035729 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1503,9 +1503,9 @@ void tcp_clear_md5_list(struct sock *sk)
md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
- hlist_del_rcu(&key->node);
+ hlist_del(&key->node);
atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
- kfree_rcu(key, rcu);
+ kfree(key);
}
}
@@ -2494,6 +2494,13 @@ static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
.ao_calc_key_sk = tcp_v4_ao_calc_key_sk,
#endif
};
+
+static void tcp4_destruct_sock(struct sock *sk)
+{
+ tcp_md5_destruct_sock(sk);
+ tcp_ao_destroy_sock(sk, false);
+ inet_sock_destruct(sk);
+}
#endif
/* NOTE: A lot of things set to zero explicitly by call to
@@ -2509,23 +2516,12 @@ static int tcp_v4_init_sock(struct sock *sk)
#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
+ sk->sk_destruct = tcp4_destruct_sock;
#endif
return 0;
}
-#ifdef CONFIG_TCP_MD5SIG
-static void tcp_md5sig_info_free_rcu(struct rcu_head *head)
-{
- struct tcp_md5sig_info *md5sig;
-
- md5sig = container_of(head, struct tcp_md5sig_info, rcu);
- kfree(md5sig);
- static_branch_slow_dec_deferred(&tcp_md5_needed);
- tcp_md5_release_sigpool();
-}
-#endif
-
static void tcp_release_user_frags(struct sock *sk)
{
#ifdef CONFIG_PAGE_POOL
@@ -2562,19 +2558,6 @@ void tcp_v4_destroy_sock(struct sock *sk)
/* Cleans up our, hopefully empty, out_of_order_queue. */
skb_rbtree_purge(&tp->out_of_order_queue);
-#ifdef CONFIG_TCP_MD5SIG
- /* Clean up the MD5 key list, if any */
- if (tp->md5sig_info) {
- struct tcp_md5sig_info *md5sig;
-
- md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
- tcp_clear_md5_list(sk);
- call_rcu(&md5sig->rcu, tcp_md5sig_info_free_rcu);
- rcu_assign_pointer(tp->md5sig_info, NULL);
- }
-#endif
- tcp_ao_destroy_sock(sk, false);
-
/* Clean up a referenced TCP bind bucket. */
if (inet_csk(sk)->icsk_bind_hash)
inet_put_port(sk);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index d1c9e4088646..7c2ae07d8d5d 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -377,26 +377,17 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
}
EXPORT_SYMBOL(tcp_time_wait);
-#ifdef CONFIG_TCP_MD5SIG
-static void tcp_md5_twsk_free_rcu(struct rcu_head *head)
-{
- struct tcp_md5sig_key *key;
-
- key = container_of(head, struct tcp_md5sig_key, rcu);
- kfree(key);
- static_branch_slow_dec_deferred(&tcp_md5_needed);
- tcp_md5_release_sigpool();
-}
-#endif
-
void tcp_twsk_destructor(struct sock *sk)
{
#ifdef CONFIG_TCP_MD5SIG
if (static_branch_unlikely(&tcp_md5_needed.key)) {
struct tcp_timewait_sock *twsk = tcp_twsk(sk);
- if (twsk->tw_md5_key)
- call_rcu(&twsk->tw_md5_key->rcu, tcp_md5_twsk_free_rcu);
+ if (twsk->tw_md5_key) {
+ kfree(twsk->tw_md5_key);
+ static_branch_slow_dec_deferred(&tcp_md5_needed);
+ tcp_md5_release_sigpool();
+ }
}
#endif
tcp_ao_destroy_sock(sk, true);
diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c
index ff66db48453c..944b3cf25468 100644
--- a/net/ipv4/udp_tunnel_nic.c
+++ b/net/ipv4/udp_tunnel_nic.c
@@ -930,7 +930,7 @@ udp_tunnel_nic_netdevice_event(struct notifier_block *unused,
err = udp_tunnel_nic_register(dev);
if (err)
- netdev_WARN(dev, "failed to register for UDP tunnel offloads: %d", err);
+ netdev_warn(dev, "failed to register for UDP tunnel offloads: %d", err);
return notifier_from_errno(err);
}
/* All other events will need the udp_tunnel_nic state */
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 0562e939b2e3..08dabc47a6e7 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -2110,6 +2110,13 @@ static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
.ao_calc_key_sk = tcp_v4_ao_calc_key_sk,
#endif
};
+
+static void tcp6_destruct_sock(struct sock *sk)
+{
+ tcp_md5_destruct_sock(sk);
+ tcp_ao_destroy_sock(sk, false);
+ inet6_sock_destruct(sk);
+}
#endif
/* NOTE: A lot of things set to zero explicitly by call to
@@ -2125,6 +2132,7 @@ static int tcp_v6_init_sock(struct sock *sk)
#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
+ sk->sk_destruct = tcp6_destruct_sock;
#endif
return 0;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index a35ee6d693a8..b70369f3cd32 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -260,7 +260,7 @@ rescore:
/* compute_score is too long of a function to be
* inlined, and calling it again here yields
- * measureable overhead for some
+ * measurable overhead for some
* workloads. Work around it by jumping
* backwards to rescore 'result'.
*/
@@ -449,7 +449,7 @@ struct sock *udp6_lib_lookup(const struct net *net, const struct in6_addr *saddr
EXPORT_SYMBOL_GPL(udp6_lib_lookup);
#endif
-/* do not use the scratch area len for jumbogram: their length execeeds the
+/* do not use the scratch area len for jumbogram: their length exceeds the
* scratch area space; note that the IP6CB flags is still in the first
* cacheline, so checking for jumbograms is cheap
*/
@@ -1048,7 +1048,7 @@ static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
}
-/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
+/* wrapper for udp_queue_rcv_skb taking care of csum conversion and
* return code conversion for ip layer consumption
*/
static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 2ed07fa121ab..b26f61f13605 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2171,10 +2171,16 @@ static int sta_apply_parameters(struct ieee80211_local *local,
/*
* cfg80211 validates this (1-2007) and allows setting the AID
- * only when creating a new station entry
+ * only when creating a new station entry. For S1G APs, the current
+ * implementation supports a maximum of 1600 AIDs.
*/
- if (params->aid)
+ if (params->aid) {
+ if (sdata->vif.cfg.s1g &&
+ params->aid > IEEE80211_MAX_SUPPORTED_S1G_AID)
+ return -EINVAL;
+
sta->sta.aid = params->aid;
+ }
/*
* Some of the following updates would be racy if called on an
@@ -3001,6 +3007,9 @@ static int ieee80211_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *req)
{
struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_link_data *link;
+ struct ieee80211_channel *chan;
+ int radio_idx;
sdata = IEEE80211_WDEV_TO_SUB_IF(req->wdev);
@@ -3028,10 +3037,20 @@ static int ieee80211_scan(struct wiphy *wiphy,
* the frames sent while scanning on other channel will be
* lost)
*/
- if (ieee80211_num_beaconing_links(sdata) &&
- (!(wiphy->features & NL80211_FEATURE_AP_SCAN) ||
- !(req->flags & NL80211_SCAN_FLAG_AP)))
- return -EOPNOTSUPP;
+ for_each_link_data(sdata, link) {
+ /* if the link is not beaconing, ignore it */
+ if (!sdata_dereference(link->u.ap.beacon, sdata))
+ continue;
+
+ chan = link->conf->chanreq.oper.chan;
+ radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chan);
+
+ if (ieee80211_is_radio_idx_in_scan_req(wiphy, req,
+ radio_idx) &&
+ (!(wiphy->features & NL80211_FEATURE_AP_SCAN) ||
+ !(req->flags & NL80211_SCAN_FLAG_AP)))
+ return -EOPNOTSUPP;
+ }
break;
case NL80211_IFTYPE_NAN:
default:
@@ -3677,12 +3696,7 @@ static bool ieee80211_is_scan_ongoing(struct wiphy *wiphy,
if (list_empty(&local->roc_list) && !local->scanning)
return false;
- if (wiphy->n_radio < 2)
- return true;
-
req_radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chandef->chan);
- if (req_radio_idx < 0)
- return true;
if (local->scanning) {
scan_req = wiphy_dereference(wiphy, local->scan_req);
@@ -3701,14 +3715,6 @@ static bool ieee80211_is_scan_ongoing(struct wiphy *wiphy,
list_for_each_entry(roc, &local->roc_list, list) {
chan_radio_idx = cfg80211_get_radio_idx_by_chan(wiphy,
roc->chan);
- /*
- * The roc work is added but chan_radio_idx is invalid.
- * Should not happen but if it does, let's not take
- * risk and return true.
- */
- if (chan_radio_idx < 0)
- return true;
-
if (chan_radio_idx == req_radio_idx)
return true;
}
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index c9cea0e7ac16..57065714cf8c 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -659,19 +659,8 @@ bool ieee80211_is_radar_required(struct ieee80211_local *local,
for_each_sdata_link(local, link) {
if (link->radar_required) {
- if (wiphy->n_radio < 2)
- return true;
-
chan = link->conf->chanreq.oper.chan;
radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chan);
- /*
- * The radio index (radio_idx) is expected to be valid,
- * as it's derived from a channel tied to a link. If
- * it's invalid (i.e., negative), return true to avoid
- * potential issues with radar-sensitive operations.
- */
- if (radio_idx < 0)
- return true;
if (ieee80211_is_radio_idx_in_scan_req(wiphy, req,
radio_idx))
diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c
index 0397755a3bd1..3d365626faa4 100644
--- a/net/mac80211/ethtool.c
+++ b/net/mac80211/ethtool.c
@@ -48,8 +48,8 @@ static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = {
"rx_duplicates", "rx_fragments", "rx_dropped",
"tx_packets", "tx_bytes",
"tx_filtered", "tx_retry_failed", "tx_retries",
- "sta_state", "txrate", "rxrate", "signal",
- "channel", "noise", "ch_time", "ch_time_busy",
+ "tx_handlers_drop", "sta_state", "txrate", "rxrate",
+ "signal", "channel", "noise", "ch_time", "ch_time_busy",
"ch_time_ext_busy", "ch_time_rx", "ch_time_tx"
};
#define STA_STATS_LEN ARRAY_SIZE(ieee80211_gstrings_sta_stats)
@@ -120,6 +120,7 @@ static void ieee80211_get_stats(struct net_device *dev,
i = 0;
ADD_STA_STATS(&sta->deflink);
+ data[i++] = sdata->tx_handlers_drop;
data[i++] = sta->sta_state;
@@ -145,6 +146,7 @@ static void ieee80211_get_stats(struct net_device *dev,
sta_set_sinfo(sta, &sinfo, false);
i = 0;
ADD_STA_STATS(&sta->deflink);
+ data[i++] = sdata->tx_handlers_drop;
}
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 8afa2404eaa8..8a666faeb1ec 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -86,6 +86,14 @@ extern const u8 ieee80211_ac_to_qos_mask[IEEE80211_NUM_ACS];
#define IEEE80211_MAX_NAN_INSTANCE_ID 255
+/*
+ * Current mac80211 implementation supports a maximum of 1600 AIDS
+ * for S1G interfaces. With regards to an S1G TIM, this covers 25 blocks
+ * as each block is 64 AIDs.
+ */
+#define IEEE80211_MAX_SUPPORTED_S1G_AID 1600
+#define IEEE80211_MAX_SUPPORTED_S1G_TIM_BLOCKS 25
+
enum ieee80211_status_data {
IEEE80211_STATUS_TYPE_MASK = 0x00f,
IEEE80211_STATUS_TYPE_INVALID = 0,
@@ -1210,6 +1218,7 @@ struct ieee80211_sub_if_data {
} debugfs;
#endif
+ u32 tx_handlers_drop;
/* must be last, dynamically sized area in this! */
struct ieee80211_vif vif;
};
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 3ae6104e5cb2..437f1363c982 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -862,6 +862,14 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
if (emulate_chanctx || ops->remain_on_channel)
wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ wiphy->bss_param_support = WIPHY_BSS_PARAM_CTS_PROT |
+ WIPHY_BSS_PARAM_SHORT_PREAMBLE |
+ WIPHY_BSS_PARAM_SHORT_SLOT_TIME |
+ WIPHY_BSS_PARAM_BASIC_RATES |
+ WIPHY_BSS_PARAM_AP_ISOLATE |
+ WIPHY_BSS_PARAM_HT_OPMODE |
+ WIPHY_BSS_PARAM_P2P_CTWINDOW |
+ WIPHY_BSS_PARAM_P2P_OPPPS;
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS |
NL80211_FEATURE_SAE |
NL80211_FEATURE_HT_IBSS |
@@ -1164,9 +1172,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (WARN_ON(!ieee80211_hw_check(hw, MFP_CAPABLE)))
return -EINVAL;
- if (WARN_ON(!ieee80211_hw_check(hw, CONNECTION_MONITOR)))
- return -EINVAL;
-
if (WARN_ON(ieee80211_hw_check(hw, NEED_DTIM_BEFORE_ASSOC)))
return -EINVAL;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index a4a715f6f1c3..f37068a533f4 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -624,6 +624,9 @@ int mesh_add_he_6ghz_cap_ie(struct ieee80211_sub_if_data *sdata,
if (!sband)
return -EINVAL;
+ if (sband->band != NL80211_BAND_6GHZ)
+ return 0;
+
iftd = ieee80211_get_sband_iftype_data(sband,
NL80211_IFTYPE_MESH_POINT);
/* The device doesn't support HE in mesh mode or at all */
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
index 20e022a03933..ebab1f0a0138 100644
--- a/net/mac80211/mesh_ps.c
+++ b/net/mac80211/mesh_ps.c
@@ -586,7 +586,7 @@ void ieee80211_mps_frame_release(struct sta_info *sta,
if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
has_buffered = ieee80211_check_tim(elems->tim, elems->tim_len,
- sta->mesh->aid);
+ sta->mesh->aid, false);
if (has_buffered)
mps_dbg(sta->sdata, "%pM indicates buffered frames\n",
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index dd650a127a31..0e12309accbe 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1850,7 +1850,8 @@ ieee80211_add_link_elems(struct ieee80211_sub_if_data *sdata,
ieee80211_put_he_cap(skb, sdata, sband,
&assoc_data->link[link_id].conn);
ADD_PRESENT_EXT_ELEM(WLAN_EID_EXT_HE_CAPABILITY);
- ieee80211_put_he_6ghz_cap(skb, sdata, smps_mode);
+ if (sband->band == NL80211_BAND_6GHZ)
+ ieee80211_put_he_6ghz_cap(skb, sdata, smps_mode);
}
/*
@@ -2112,8 +2113,11 @@ ieee80211_link_common_elems_size(struct ieee80211_sub_if_data *sdata,
sizeof(struct ieee80211_he_mcs_nss_supp) +
IEEE80211_HE_PPE_THRES_MAX_LEN;
- if (sband->band == NL80211_BAND_6GHZ)
+ if (sband->band == NL80211_BAND_6GHZ) {
size += 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa);
+ /* reg connection */
+ size += 4;
+ }
size += 2 + 1 + sizeof(struct ieee80211_eht_cap_elem) +
sizeof(struct ieee80211_eht_mcs_nss_supp) +
@@ -2187,11 +2191,7 @@ static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
2 + /* ext capa & op */
2; /* EML capa */
- /*
- * The capability elements were already considered above;
- * note this over-estimates a bit because there's no
- * STA profile for the assoc link.
- */
+ /* The capability elements were already considered above */
size += (n_links - 1) *
(1 + 1 + /* subelement ID/length */
2 + /* STA control */
@@ -5729,7 +5729,7 @@ static u8 ieee80211_max_rx_chains(struct ieee80211_link_data *link,
he_cap_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY,
ies->data, ies->len);
- if (!he_cap_elem || he_cap_elem->datalen < sizeof(*he_cap))
+ if (!he_cap_elem || he_cap_elem->datalen < sizeof(*he_cap) + 1)
return chains;
/* skip one byte ext_tag_id */
@@ -6356,6 +6356,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
};
u8 ap_mld_addr[ETH_ALEN] __aligned(2);
unsigned int link_id;
+ u16 max_aid = IEEE80211_MAX_AID;
lockdep_assert_wiphy(sdata->local->hw.wiphy);
@@ -6382,10 +6383,12 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
reassoc = ieee80211_is_reassoc_resp(mgmt->frame_control);
capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
- if (assoc_data->s1g)
+ if (assoc_data->s1g) {
elem_start = mgmt->u.s1g_assoc_resp.variable;
- else
+ max_aid = IEEE80211_MAX_SUPPORTED_S1G_AID;
+ } else {
elem_start = mgmt->u.assoc_resp.variable;
+ }
/*
* Note: this may not be perfect, AP might misbehave - if
@@ -6409,16 +6412,15 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
if (elems->aid_resp)
aid = le16_to_cpu(elems->aid_resp->aid);
- else if (assoc_data->s1g)
- aid = 0; /* TODO */
else
aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
/*
- * The 5 MSB of the AID field are reserved
- * (802.11-2016 9.4.1.8 AID field)
+ * The 5 MSB of the AID field are reserved for a non-S1G STA. For
+ * an S1G STA the 3 MSBs are reserved.
+ * (802.11-2016 9.4.1.8 AID field).
*/
- aid &= 0x7ff;
+ aid &= assoc_data->s1g ? 0x1fff : 0x7ff;
sdata_info(sdata,
"RX %sssocResp from %pM (capab=0x%x status=%d aid=%d)\n",
@@ -6455,7 +6457,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
event.u.mlme.reason = status_code;
drv_event_callback(sdata->local, sdata, &event);
} else {
- if (aid == 0 || aid > IEEE80211_MAX_AID) {
+ if (aid == 0 || aid > max_aid) {
sdata_info(sdata,
"invalid AID value %d (out of range), turn off PS\n",
aid);
@@ -6493,6 +6495,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
}
sdata->vif.cfg.aid = aid;
+ sdata->vif.cfg.s1g = assoc_data->s1g;
if (!ieee80211_assoc_success(sdata, mgmt, elems,
elem_start, elem_len)) {
@@ -7440,7 +7443,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
ncrc = elems->crc;
if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) &&
- ieee80211_check_tim(elems->tim, elems->tim_len, vif_cfg->aid)) {
+ ieee80211_check_tim(elems->tim, elems->tim_len, vif_cfg->aid,
+ vif_cfg->s1g)) {
if (local->hw.conf.dynamic_ps_timeout > 0) {
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
local->hw.conf.flags &= ~IEEE80211_CONF_PS;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 8c550aab9bdc..1bd75e0375a0 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2962,7 +2962,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
u32 thr = 0;
- int i, ac, cpu, link_id;
+ int i, ac, cpu;
struct ieee80211_sta_rx_stats *last_rxstats;
last_rxstats = sta_get_last_rx_stats(sta, -1);
@@ -3204,18 +3204,23 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
if (sta->sta.valid_links) {
struct ieee80211_link_data *link;
struct link_sta_info *link_sta;
+ int link_id;
ether_addr_copy(sinfo->mld_addr, sta->addr);
+
+ /* assign valid links first for iteration */
+ sinfo->valid_links = sta->sta.valid_links;
+
for_each_valid_link(sinfo, link_id) {
link_sta = wiphy_dereference(sta->local->hw.wiphy,
sta->link[link_id]);
link = wiphy_dereference(sdata->local->hw.wiphy,
sdata->link[link_id]);
- if (!link_sta || !sinfo->links[link_id] || !link)
+ if (!link_sta || !sinfo->links[link_id] || !link) {
+ sinfo->valid_links &= ~BIT(link_id);
continue;
-
- sinfo->valid_links = sta->sta.valid_links;
+ }
sta_set_link_sinfo(sta, sinfo->links[link_id],
link, tidstats);
}
diff --git a/net/mac80211/tests/Makefile b/net/mac80211/tests/Makefile
index 3b0c08356fc5..3c7f874e5c41 100644
--- a/net/mac80211/tests/Makefile
+++ b/net/mac80211/tests/Makefile
@@ -1,3 +1,3 @@
-mac80211-tests-y += module.o util.o elems.o mfp.o tpe.o chan-mode.o
+mac80211-tests-y += module.o util.o elems.o mfp.o tpe.o chan-mode.o s1g_tim.o
obj-$(CONFIG_MAC80211_KUNIT_TEST) += mac80211-tests.o
diff --git a/net/mac80211/tests/s1g_tim.c b/net/mac80211/tests/s1g_tim.c
new file mode 100644
index 000000000000..642fa4ece89f
--- /dev/null
+++ b/net/mac80211/tests/s1g_tim.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for S1G TIM PVB decoding. This test suite covers
+ * IEEE80211-2024 Annex L figures 8, 9, 10, 12, 13, 14. ADE mode
+ * is not covered as it is an optional encoding format and is not
+ * currently supported by mac80211.
+ *
+ * Copyright (C) 2025 Morse Micro
+ */
+#include <linux/ieee80211.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+
+#define MAX_AID 128
+
+#define BC(enc_mode, inverse, blk_off) \
+ ((((blk_off) & 0x1f) << 3) | ((inverse) ? BIT(2) : 0) | \
+ ((enc_mode) & 0x3))
+
+static void byte_to_bitstr(u8 v, char *out)
+{
+ for (int b = 7; b >= 0; b--)
+ *out++ = (v & BIT(b)) ? '1' : '0';
+ *out = '\0';
+}
+
+static void dump_tim_bits(struct kunit *test,
+ const struct ieee80211_tim_ie *tim, u8 tim_len)
+{
+ const u8 *ptr = tim->virtual_map;
+ const u8 *end = (const u8 *)tim + tim_len;
+ unsigned int oct = 1;
+ unsigned int blk = 0;
+ char bits[9];
+
+ while (ptr < end) {
+ u8 ctrl = *ptr++;
+ u8 mode = ctrl & 0x03;
+ bool inverse = ctrl & BIT(2);
+ u8 blk_off = ctrl >> 3;
+
+ kunit_info(
+ test, "Block %u (ENC=%s, blk_off=%u, inverse=%u)", blk,
+ (mode == IEEE80211_S1G_TIM_ENC_MODE_BLOCK) ? "BLOCK" :
+ (mode == IEEE80211_S1G_TIM_ENC_MODE_SINGLE) ? "SINGLE" :
+ "OLB",
+ blk_off, inverse);
+
+ byte_to_bitstr(ctrl, bits);
+ kunit_info(test, " octet %2u (ctrl) : %s (0x%02x)", oct,
+ bits, ctrl);
+ ++oct;
+
+ switch (mode) {
+ case IEEE80211_S1G_TIM_ENC_MODE_BLOCK: {
+ u8 blkmap = *ptr++;
+
+ byte_to_bitstr(blkmap, bits);
+ kunit_info(test, " octet %2u (blk-map) : %s (0x%02x)",
+ oct, bits, blkmap);
+ ++oct;
+
+ for (u8 sb = 0; sb < 8; sb++) {
+ if (!(blkmap & BIT(sb)))
+ continue;
+ u8 sub = *ptr++;
+
+ byte_to_bitstr(sub, bits);
+ kunit_info(
+ test,
+ " octet %2u (SB %2u) : %s (0x%02x)",
+ oct, sb, bits, sub);
+ ++oct;
+ }
+ break;
+ }
+ case IEEE80211_S1G_TIM_ENC_MODE_SINGLE: {
+ u8 single = *ptr++;
+
+ byte_to_bitstr(single, bits);
+ kunit_info(test, " octet %2u (single) : %s (0x%02x)",
+ oct, bits, single);
+ ++oct;
+ break;
+ }
+ case IEEE80211_S1G_TIM_ENC_MODE_OLB: {
+ u8 len = *ptr++;
+
+ byte_to_bitstr(len, bits);
+ kunit_info(test, " octet %2u (len=%2u) : %s (0x%02x)",
+ oct, len, bits, len);
+ ++oct;
+
+ for (u8 i = 0; i < len && ptr < end; i++) {
+ u8 sub = *ptr++;
+
+ byte_to_bitstr(sub, bits);
+ kunit_info(
+ test,
+ " octet %2u (SB %2u) : %s (0x%02x)",
+ oct, i, bits, sub);
+ ++oct;
+ }
+ break;
+ }
+ default:
+ kunit_info(test, " ** unknown encoding 0x%x **", mode);
+ return;
+ }
+ blk++;
+ }
+}
+
+static void tim_push(u8 **p, u8 v)
+{
+ *(*p)++ = v;
+}
+
+static void tim_begin(struct ieee80211_tim_ie *tim, u8 **p)
+{
+ tim->dtim_count = 0;
+ tim->dtim_period = 1;
+ tim->bitmap_ctrl = 0;
+ *p = tim->virtual_map;
+}
+
+static u8 tim_end(struct ieee80211_tim_ie *tim, u8 *tail)
+{
+ return tail - (u8 *)tim;
+}
+
+static void pvb_add_block_bitmap(u8 **p, u8 blk_off, bool inverse, u8 blk_bmap,
+ const u8 *subblocks)
+{
+ u8 enc = IEEE80211_S1G_TIM_ENC_MODE_BLOCK;
+ u8 n = hweight8(blk_bmap);
+
+ tim_push(p, BC(enc, inverse, blk_off));
+ tim_push(p, blk_bmap);
+
+ for (u8 i = 0; i < n; i++)
+ tim_push(p, subblocks[i]);
+}
+
+static void pvb_add_single_aid(u8 **p, u8 blk_off, bool inverse, u8 single6)
+{
+ u8 enc = IEEE80211_S1G_TIM_ENC_MODE_SINGLE;
+
+ tim_push(p, BC(enc, inverse, blk_off));
+ tim_push(p, single6 & GENMASK(5, 0));
+}
+
+static void pvb_add_olb(u8 **p, u8 blk_off, bool inverse, const u8 *subblocks,
+ u8 len)
+{
+ u8 enc = IEEE80211_S1G_TIM_ENC_MODE_OLB;
+
+ tim_push(p, BC(enc, inverse, blk_off));
+ tim_push(p, len);
+ for (u8 i = 0; i < len; i++)
+ tim_push(p, subblocks[i]);
+}
+
+static void check_all_aids(struct kunit *test,
+ const struct ieee80211_tim_ie *tim, u8 tim_len,
+ const unsigned long *expected)
+{
+ for (u16 aid = 1; aid <= MAX_AID; aid++) {
+ bool want = test_bit(aid, expected);
+ bool got = ieee80211_s1g_check_tim(tim, tim_len, aid);
+
+ KUNIT_ASSERT_EQ_MSG(test, got, want,
+ "AID %u mismatch (got=%d want=%d)", aid,
+ got, want);
+ }
+}
+
+static void fill_bitmap(unsigned long *bm, const u16 *list, size_t n)
+{
+ size_t i;
+
+ bitmap_zero(bm, MAX_AID + 1);
+ for (i = 0; i < n; i++)
+ __set_bit(list[i], bm);
+}
+
+static void fill_bitmap_inverse(unsigned long *bm, u16 max_aid,
+ const u16 *except, size_t n_except)
+{
+ bitmap_zero(bm, MAX_AID + 1);
+ for (u16 aid = 1; aid <= max_aid; aid++)
+ __set_bit(aid, bm);
+
+ for (size_t i = 0; i < n_except; i++)
+ if (except[i] <= max_aid)
+ __clear_bit(except[i], bm);
+}
+
+static void s1g_tim_block_test(struct kunit *test)
+{
+ u8 buf[256] = {};
+ struct ieee80211_tim_ie *tim = (void *)buf;
+ u8 *p, tim_len;
+ static const u8 subblocks[] = {
+ 0x42, /* SB m=0: AIDs 1,6 */
+ 0xA0, /* SB m=2: AIDs 21,23 */
+ };
+ u8 blk_bmap = 0x05; /* bits 0 and 2 set */
+ bool inverse = false;
+ static const u16 set_list[] = { 1, 6, 21, 23 };
+ DECLARE_BITMAP(exp, MAX_AID + 1);
+
+ tim_begin(tim, &p);
+ pvb_add_block_bitmap(&p, 0, inverse, blk_bmap, subblocks);
+ tim_len = tim_end(tim, p);
+
+ fill_bitmap(exp, set_list, ARRAY_SIZE(set_list));
+
+ dump_tim_bits(test, tim, tim_len);
+ check_all_aids(test, tim, tim_len, exp);
+}
+
+static void s1g_tim_single_test(struct kunit *test)
+{
+ u8 buf[256] = {};
+ struct ieee80211_tim_ie *tim = (void *)buf;
+ u8 *p, tim_len;
+ bool inverse = false;
+ u8 blk_off = 0;
+ u8 single6 = 0x1f; /* 31 */
+ static const u16 set_list[] = { 31 };
+ DECLARE_BITMAP(exp, MAX_AID + 1);
+
+ tim_begin(tim, &p);
+ pvb_add_single_aid(&p, blk_off, inverse, single6);
+ tim_len = tim_end(tim, p);
+
+ fill_bitmap(exp, set_list, ARRAY_SIZE(set_list));
+
+ dump_tim_bits(test, tim, tim_len);
+ check_all_aids(test, tim, tim_len, exp);
+}
+
+static void s1g_tim_olb_test(struct kunit *test)
+{
+ u8 buf[256] = {};
+ struct ieee80211_tim_ie *tim = (void *)buf;
+ u8 *p, tim_len;
+ bool inverse = false;
+ u8 blk_off = 0;
+ static const u16 set_list[] = { 1, 6, 13, 15, 17, 22, 29, 31, 33,
+ 38, 45, 47, 49, 54, 61, 63, 65, 70 };
+ static const u8 subblocks[] = { 0x42, 0xA0, 0x42, 0xA0, 0x42,
+ 0xA0, 0x42, 0xA0, 0x42 };
+ u8 len = ARRAY_SIZE(subblocks);
+ DECLARE_BITMAP(exp, MAX_AID + 1);
+
+ tim_begin(tim, &p);
+ pvb_add_olb(&p, blk_off, inverse, subblocks, len);
+ tim_len = tim_end(tim, p);
+
+ fill_bitmap(exp, set_list, ARRAY_SIZE(set_list));
+
+ dump_tim_bits(test, tim, tim_len);
+ check_all_aids(test, tim, tim_len, exp);
+}
+
+static void s1g_tim_inverse_block_test(struct kunit *test)
+{
+ u8 buf[256] = {};
+ struct ieee80211_tim_ie *tim = (void *)buf;
+ u8 *p, tim_len;
+ /* Same sub-block content as Figure L-8, but inverse = true */
+ static const u8 subblocks[] = {
+ 0x42, /* SB m=0: AIDs 1,6 */
+ 0xA0, /* SB m=2: AIDs 21,23 */
+ };
+ u8 blk_bmap = 0x05;
+ bool inverse = true;
+ /* All AIDs except 1,6,21,23 are set */
+ static const u16 except[] = { 1, 6, 21, 23 };
+ DECLARE_BITMAP(exp, MAX_AID + 1);
+
+ tim_begin(tim, &p);
+ pvb_add_block_bitmap(&p, 0, inverse, blk_bmap, subblocks);
+ tim_len = tim_end(tim, p);
+
+ fill_bitmap_inverse(exp, 63, except, ARRAY_SIZE(except));
+
+ dump_tim_bits(test, tim, tim_len);
+ check_all_aids(test, tim, tim_len, exp);
+}
+
+static void s1g_tim_inverse_single_test(struct kunit *test)
+{
+ u8 buf[256] = {};
+ struct ieee80211_tim_ie *tim = (void *)buf;
+ u8 *p, tim_len;
+ bool inverse = true;
+ u8 blk_off = 0;
+ u8 single6 = 0x1f; /* 31 */
+ /* All AIDs except 31 are set */
+ static const u16 except[] = { 31 };
+ DECLARE_BITMAP(exp, MAX_AID + 1);
+
+ tim_begin(tim, &p);
+ pvb_add_single_aid(&p, blk_off, inverse, single6);
+ tim_len = tim_end(tim, p);
+
+ fill_bitmap_inverse(exp, 63, except, ARRAY_SIZE(except));
+
+ dump_tim_bits(test, tim, tim_len);
+ check_all_aids(test, tim, tim_len, exp);
+}
+
+static void s1g_tim_inverse_olb_test(struct kunit *test)
+{
+ u8 buf[256] = {};
+ struct ieee80211_tim_ie *tim = (void *)buf;
+ u8 *p, tim_len;
+ bool inverse = true;
+ u8 blk_off = 0, len;
+ /* All AIDs except the list below are set */
+ static const u16 except[] = { 1, 6, 13, 15, 17, 22, 29, 31, 33,
+ 38, 45, 47, 49, 54, 61, 63, 65, 70 };
+ static const u8 subblocks[] = { 0x42, 0xA0, 0x42, 0xA0, 0x42,
+ 0xA0, 0x42, 0xA0, 0x42 };
+ len = ARRAY_SIZE(subblocks);
+ DECLARE_BITMAP(exp, MAX_AID + 1);
+
+ tim_begin(tim, &p);
+ pvb_add_olb(&p, blk_off, inverse, subblocks, len);
+ tim_len = tim_end(tim, p);
+
+ fill_bitmap_inverse(exp, 127, except, ARRAY_SIZE(except));
+
+ dump_tim_bits(test, tim, tim_len);
+ check_all_aids(test, tim, tim_len, exp);
+}
+
+static struct kunit_case s1g_tim_test_cases[] = {
+ KUNIT_CASE(s1g_tim_block_test),
+ KUNIT_CASE(s1g_tim_single_test),
+ KUNIT_CASE(s1g_tim_olb_test),
+ KUNIT_CASE(s1g_tim_inverse_block_test),
+ KUNIT_CASE(s1g_tim_inverse_single_test),
+ KUNIT_CASE(s1g_tim_inverse_olb_test),
+ {}
+};
+
+static struct kunit_suite s1g_tim = {
+ .name = "mac80211-s1g-tim",
+ .test_cases = s1g_tim_test_cases,
+};
+
+kunit_test_suite(s1g_tim);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 00671ae45b2f..a27e2af5d569 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1814,6 +1814,7 @@ static int invoke_tx_handlers_early(struct ieee80211_tx_data *tx)
txh_done:
if (unlikely(res == TX_DROP)) {
+ tx->sdata->tx_handlers_drop++;
I802_DEBUG_INC(tx->local->tx_handlers_drop);
if (tx->skb)
ieee80211_free_txskb(&tx->local->hw, tx->skb);
@@ -1858,6 +1859,7 @@ static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx)
txh_done:
if (unlikely(res == TX_DROP)) {
+ tx->sdata->tx_handlers_drop++;
I802_DEBUG_INC(tx->local->tx_handlers_drop);
if (tx->skb)
ieee80211_free_txskb(&tx->local->hw, tx->skb);
@@ -1942,6 +1944,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
if (unlikely(res_prepare == TX_DROP)) {
ieee80211_free_txskb(&local->hw, skb);
+ tx.sdata->tx_handlers_drop++;
return true;
} else if (unlikely(res_prepare == TX_QUEUED)) {
return true;
@@ -3728,8 +3731,10 @@ void __ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
r = ieee80211_xmit_fast_finish(sdata, sta, fast_tx->pn_offs,
fast_tx->key, &tx);
tx.skb = NULL;
- if (r == TX_DROP)
+ if (r == TX_DROP) {
+ tx.sdata->tx_handlers_drop++;
goto free;
+ }
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
@@ -4882,15 +4887,114 @@ void ieee80211_tx_pending(struct tasklet_struct *t)
/* functions for drivers to get certain frames */
+static void ieee80211_beacon_add_tim_pvb(struct ps_data *ps,
+ struct sk_buff *skb,
+ bool mcast_traffic)
+{
+ int i, n1 = 0, n2;
+
+ /*
+ * Find largest even number N1 so that bits numbered 1 through
+ * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits
+ * (N2 + 1) x 8 through 2007 are 0.
+ */
+ for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) {
+ if (ps->tim[i]) {
+ n1 = i & 0xfe;
+ break;
+ }
+ }
+ n2 = n1;
+ for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) {
+ if (ps->tim[i]) {
+ n2 = i;
+ break;
+ }
+ }
+
+ /* Bitmap control */
+ skb_put_u8(skb, n1 | mcast_traffic);
+ /* Part Virt Bitmap */
+ skb_put_data(skb, ps->tim + n1, n2 - n1 + 1);
+}
+
+/*
+ * mac80211 currently supports encoding using block bitmap mode, non
+ * inversed. The current implementation supports up to 1600 AIDs.
+ *
+ * Block bitmap encoding breaks down the AID bitmap into blocks of 64
+ * AIDs. Each block contains between 0 and 8 subblocks. Each subblock
+ * describes 8 AIDs and the presence of a subblock is determined by
+ * the block bitmap.
+ */
+static void ieee80211_s1g_beacon_add_tim_pvb(struct ps_data *ps,
+ struct sk_buff *skb,
+ bool mcast_traffic)
+{
+ int blk;
+
+ /*
+ * Emit a bitmap control block with a page slice number of 31 and a
+ * page index of 0 which indicates as per IEEE80211-2024 9.4.2.5.1
+ * that the entire page (2048 bits) indicated by the page index
+ * is encoded in the partial virtual bitmap.
+ */
+ skb_put_u8(skb, mcast_traffic | (31 << 1));
+
+ /* Emit an encoded block for each non-zero sub-block */
+ for (blk = 0; blk < IEEE80211_MAX_SUPPORTED_S1G_TIM_BLOCKS; blk++) {
+ u8 blk_bmap = 0;
+ int sblk;
+
+ for (sblk = 0; sblk < 8; sblk++) {
+ int sblk_idx = blk * 8 + sblk;
+
+ /*
+ * If the current subblock is non-zero, increase the
+ * number of subblocks to emit for the current block.
+ */
+ if (ps->tim[sblk_idx])
+ blk_bmap |= BIT(sblk);
+ }
+
+ /* If the current block contains no non-zero sublocks */
+ if (!blk_bmap)
+ continue;
+
+ /*
+ * Emit a block control byte for the current encoded block
+ * with an encoding mode of block bitmap (0x0), not inverse
+ * (0x0) and the current block offset (5 bits)
+ */
+ skb_put_u8(skb, blk << 3);
+
+ /*
+ * Emit the block bitmap for the current encoded block which
+ * contains the present subblocks.
+ */
+ skb_put_u8(skb, blk_bmap);
+
+ /* Emit the present subblocks */
+ for (sblk = 0; sblk < 8; sblk++) {
+ int sblk_idx = blk * 8 + sblk;
+
+ if (!(blk_bmap & BIT(sblk)))
+ continue;
+
+ skb_put_u8(skb, ps->tim[sblk_idx]);
+ }
+ }
+}
+
static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
struct ieee80211_link_data *link,
struct ps_data *ps, struct sk_buff *skb,
bool is_template)
{
- u8 *pos, *tim;
- int aid0 = 0;
- int i, have_bits = 0, n1, n2;
+ struct element *tim;
+ bool mcast_traffic = false, have_bits = false;
struct ieee80211_bss_conf *link_conf = link->conf;
+ bool s1g = ieee80211_get_link_sband(link)->band == NL80211_BAND_S1GHZ;
/* Generate bitmap for TIM only if there are any STAs in power save
* mode. */
@@ -4898,7 +5002,8 @@ static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
/* in the hope that this is faster than
* checking byte-for-byte */
have_bits = !bitmap_empty((unsigned long *)ps->tim,
- IEEE80211_MAX_AID+1);
+ IEEE80211_MAX_AID + 1);
+
if (!is_template) {
if (ps->dtim_count == 0)
ps->dtim_count = link_conf->dtim_period - 1;
@@ -4906,51 +5011,39 @@ static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
ps->dtim_count--;
}
- tim = pos = skb_put(skb, 5);
- *pos++ = WLAN_EID_TIM;
- *pos++ = 3;
- *pos++ = ps->dtim_count;
- *pos++ = link_conf->dtim_period;
+ /* Length is set after parsing the AID bitmap */
+ tim = skb_put(skb, sizeof(struct element));
+ tim->id = WLAN_EID_TIM;
+ skb_put_u8(skb, ps->dtim_count);
+ skb_put_u8(skb, link_conf->dtim_period);
if (ps->dtim_count == 0 && !skb_queue_empty(&ps->bc_buf))
- aid0 = 1;
+ mcast_traffic = true;
- ps->dtim_bc_mc = aid0 == 1;
+ ps->dtim_bc_mc = mcast_traffic;
if (have_bits) {
- /* Find largest even number N1 so that bits numbered 1 through
- * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits
- * (N2 + 1) x 8 through 2007 are 0. */
- n1 = 0;
- for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) {
- if (ps->tim[i]) {
- n1 = i & 0xfe;
- break;
- }
- }
- n2 = n1;
- for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) {
- if (ps->tim[i]) {
- n2 = i;
- break;
- }
- }
-
- /* Bitmap control */
- *pos++ = n1 | aid0;
- /* Part Virt Bitmap */
- skb_put_data(skb, ps->tim + n1, n2 - n1 + 1);
-
- tim[1] = n2 - n1 + 4;
+ if (s1g)
+ ieee80211_s1g_beacon_add_tim_pvb(ps, skb,
+ mcast_traffic);
+ else
+ ieee80211_beacon_add_tim_pvb(ps, skb, mcast_traffic);
} else {
- *pos++ = aid0; /* Bitmap control */
-
- if (ieee80211_get_link_sband(link)->band != NL80211_BAND_S1GHZ) {
- tim[1] = 4;
+ /*
+ * If there is no buffered unicast traffic for an S1G
+ * interface, we can exclude the bitmap control. This is in
+ * contrast to other phy types as they do include the bitmap
+ * control and pvb even when there is no buffered traffic.
+ */
+ if (!s1g) {
+ /* Bitmap control */
+ skb_put_u8(skb, mcast_traffic);
/* Part Virt Bitmap */
skb_put_u8(skb, 0);
}
}
+
+ tim->datalen = skb_tail_pointer(skb) - tim->data;
}
static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 32f1bc5908c5..9eb35e3b9e52 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1756,7 +1756,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
bool sched_scan_stopped = false;
bool suspended = local->suspended;
bool in_reconfig = false;
- u32 rts_threshold;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -1832,7 +1831,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
/* setup RTS threshold */
if (hw->wiphy->n_radio > 0) {
for (i = 0; i < hw->wiphy->n_radio; i++) {
- rts_threshold = hw->wiphy->radio_cfg[i].rts_threshold;
+ u32 rts_threshold =
+ hw->wiphy->radio_cfg[i].rts_threshold;
+
drv_set_rts_threshold(local, i, rts_threshold);
}
} else {
@@ -4022,16 +4023,13 @@ bool ieee80211_is_radio_idx_in_scan_req(struct wiphy *wiphy,
for (i = 0; i < scan_req->n_channels; i++) {
chan = scan_req->channels[i];
chan_radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chan);
- /*
- * The chan_radio_idx should be valid since it's taken from a
- * valid scan request.
- * However, if chan_radio_idx is unexpectedly invalid (negative),
- * we take a conservative approach and assume the scan request
- * might use the specified radio_idx. Hence, return true.
- */
- if (WARN_ON(chan_radio_idx < 0))
- return true;
+ /* The radio index either matched successfully, or an error
+ * occurred. For example, if radio-level information is
+ * missing, the same error value is returned. This
+ * typically implies a single-radio setup, in which case
+ * the operation should not be allowed.
+ */
if (chan_radio_idx == radio_idx)
return true;
}
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index 2c267aff95be..2abe6f1e9940 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -1532,13 +1532,12 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
{
static const unsigned int tx_rx_locks = SOCK_RCVBUF_LOCK | SOCK_SNDBUF_LOCK;
struct sock *sk = (struct sock *)msk;
+ bool keep_open;
- if (ssk->sk_prot->keepalive) {
- if (sock_flag(sk, SOCK_KEEPOPEN))
- ssk->sk_prot->keepalive(ssk, 1);
- else
- ssk->sk_prot->keepalive(ssk, 0);
- }
+ keep_open = sock_flag(sk, SOCK_KEEPOPEN);
+ if (ssk->sk_prot->keepalive)
+ ssk->sk_prot->keepalive(ssk, keep_open);
+ sock_valbool_flag(ssk, SOCK_KEEPOPEN, keep_open);
ssk->sk_priority = sk->sk_priority;
ssk->sk_bound_dev_if = sk->sk_bound_dev_if;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 68e273d8821a..eed434e0a970 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1123,11 +1123,14 @@ nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla,
return ERR_PTR(-ENOENT);
}
-static __be16 nft_base_seq(const struct net *net)
+static unsigned int nft_base_seq(const struct net *net)
{
- struct nftables_pernet *nft_net = nft_pernet(net);
+ return READ_ONCE(net->nft.base_seq);
+}
- return htons(nft_net->base_seq & 0xffff);
+static __be16 nft_base_seq_be16(const struct net *net)
+{
+ return htons(nft_base_seq(net) & 0xffff);
}
static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
@@ -1147,7 +1150,7 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
nlh = nfnl_msg_put(skb, portid, seq,
nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
- flags, family, NFNETLINK_V0, nft_base_seq(net));
+ flags, family, NFNETLINK_V0, nft_base_seq_be16(net));
if (!nlh)
goto nla_put_failure;
@@ -1240,7 +1243,7 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
rcu_read_lock();
nft_net = nft_pernet(net);
- cb->seq = READ_ONCE(nft_net->base_seq);
+ cb->seq = nft_base_seq(net);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
@@ -2022,7 +2025,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
nlh = nfnl_msg_put(skb, portid, seq,
nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
- flags, family, NFNETLINK_V0, nft_base_seq(net));
+ flags, family, NFNETLINK_V0, nft_base_seq_be16(net));
if (!nlh)
goto nla_put_failure;
@@ -2125,7 +2128,7 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
rcu_read_lock();
nft_net = nft_pernet(net);
- cb->seq = READ_ONCE(nft_net->base_seq);
+ cb->seq = nft_base_seq(net);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
@@ -3663,7 +3666,7 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
u16 type = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
nlh = nfnl_msg_put(skb, portid, seq, type, flags, family, NFNETLINK_V0,
- nft_base_seq(net));
+ nft_base_seq_be16(net));
if (!nlh)
goto nla_put_failure;
@@ -3831,7 +3834,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
rcu_read_lock();
nft_net = nft_pernet(net);
- cb->seq = READ_ONCE(nft_net->base_seq);
+ cb->seq = nft_base_seq(net);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
@@ -4042,7 +4045,7 @@ static int nf_tables_getrule_reset(struct sk_buff *skb,
buf = kasprintf(GFP_ATOMIC, "%.*s:%u",
nla_len(nla[NFTA_RULE_TABLE]),
(char *)nla_data(nla[NFTA_RULE_TABLE]),
- nft_net->base_seq);
+ nft_base_seq(net));
audit_log_nfcfg(buf, info->nfmsg->nfgen_family, 1,
AUDIT_NFT_OP_RULE_RESET, GFP_ATOMIC);
kfree(buf);
@@ -4879,7 +4882,7 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
nlh = nfnl_msg_put(skb, portid, seq,
nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
flags, ctx->family, NFNETLINK_V0,
- nft_base_seq(ctx->net));
+ nft_base_seq_be16(ctx->net));
if (!nlh)
goto nla_put_failure;
@@ -5024,7 +5027,7 @@ static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
nft_net = nft_pernet(net);
- cb->seq = READ_ONCE(nft_net->base_seq);
+ cb->seq = nft_base_seq(net);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (ctx->family != NFPROTO_UNSPEC &&
@@ -6201,7 +6204,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
nft_net = nft_pernet(net);
- cb->seq = READ_ONCE(nft_net->base_seq);
+ cb->seq = nft_base_seq(net);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (dump_ctx->ctx.family != NFPROTO_UNSPEC &&
@@ -6230,7 +6233,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
seq = cb->nlh->nlmsg_seq;
nlh = nfnl_msg_put(skb, portid, seq, event, NLM_F_MULTI,
- table->family, NFNETLINK_V0, nft_base_seq(net));
+ table->family, NFNETLINK_V0, nft_base_seq_be16(net));
if (!nlh)
goto nla_put_failure;
@@ -6323,7 +6326,7 @@ static int nf_tables_fill_setelem_info(struct sk_buff *skb,
event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
nlh = nfnl_msg_put(skb, portid, seq, event, flags, ctx->family,
- NFNETLINK_V0, nft_base_seq(ctx->net));
+ NFNETLINK_V0, nft_base_seq_be16(ctx->net));
if (!nlh)
goto nla_put_failure;
@@ -6622,7 +6625,7 @@ static int nf_tables_getsetelem_reset(struct sk_buff *skb,
}
nelems++;
}
- audit_log_nft_set_reset(dump_ctx.ctx.table, nft_net->base_seq, nelems);
+ audit_log_nft_set_reset(dump_ctx.ctx.table, nft_base_seq(info->net), nelems);
out_unlock:
rcu_read_unlock();
@@ -8372,7 +8375,7 @@ static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net,
nlh = nfnl_msg_put(skb, portid, seq,
nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
- flags, family, NFNETLINK_V0, nft_base_seq(net));
+ flags, family, NFNETLINK_V0, nft_base_seq_be16(net));
if (!nlh)
goto nla_put_failure;
@@ -8437,7 +8440,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
nft_net = nft_pernet(net);
- cb->seq = READ_ONCE(nft_net->base_seq);
+ cb->seq = nft_base_seq(net);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
@@ -8471,7 +8474,7 @@ cont:
idx++;
}
if (ctx->reset && entries)
- audit_log_obj_reset(table, nft_net->base_seq, entries);
+ audit_log_obj_reset(table, nft_base_seq(net), entries);
if (rc < 0)
break;
}
@@ -8640,7 +8643,7 @@ static int nf_tables_getobj_reset(struct sk_buff *skb,
buf = kasprintf(GFP_ATOMIC, "%.*s:%u",
nla_len(nla[NFTA_OBJ_TABLE]),
(char *)nla_data(nla[NFTA_OBJ_TABLE]),
- nft_net->base_seq);
+ nft_base_seq(net));
audit_log_nfcfg(buf, info->nfmsg->nfgen_family, 1,
AUDIT_NFT_OP_OBJ_RESET, GFP_ATOMIC);
kfree(buf);
@@ -8745,9 +8748,8 @@ void nft_obj_notify(struct net *net, const struct nft_table *table,
struct nft_object *obj, u32 portid, u32 seq, int event,
u16 flags, int family, int report, gfp_t gfp)
{
- struct nftables_pernet *nft_net = nft_pernet(net);
char *buf = kasprintf(gfp, "%s:%u",
- table->name, nft_net->base_seq);
+ table->name, nft_base_seq(net));
audit_log_nfcfg(buf,
family,
@@ -9433,7 +9435,7 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
nlh = nfnl_msg_put(skb, portid, seq,
nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event),
- flags, family, NFNETLINK_V0, nft_base_seq(net));
+ flags, family, NFNETLINK_V0, nft_base_seq_be16(net));
if (!nlh)
goto nla_put_failure;
@@ -9502,7 +9504,7 @@ static int nf_tables_dump_flowtable(struct sk_buff *skb,
rcu_read_lock();
nft_net = nft_pernet(net);
- cb->seq = READ_ONCE(nft_net->base_seq);
+ cb->seq = nft_base_seq(net);
list_for_each_entry_rcu(table, &nft_net->tables, list) {
if (family != NFPROTO_UNSPEC && family != table->family)
@@ -9687,17 +9689,16 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
u32 portid, u32 seq)
{
- struct nftables_pernet *nft_net = nft_pernet(net);
struct nlmsghdr *nlh;
char buf[TASK_COMM_LEN];
int event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWGEN);
nlh = nfnl_msg_put(skb, portid, seq, event, 0, AF_UNSPEC,
- NFNETLINK_V0, nft_base_seq(net));
+ NFNETLINK_V0, nft_base_seq_be16(net));
if (!nlh)
goto nla_put_failure;
- if (nla_put_be32(skb, NFTA_GEN_ID, htonl(nft_net->base_seq)) ||
+ if (nla_put_be32(skb, NFTA_GEN_ID, htonl(nft_base_seq(net))) ||
nla_put_be32(skb, NFTA_GEN_PROC_PID, htonl(task_pid_nr(current))) ||
nla_put_string(skb, NFTA_GEN_PROC_NAME, get_task_comm(buf, current)))
goto nla_put_failure;
@@ -10959,11 +10960,12 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
* Bump generation counter, invalidate any dump in progress.
* Cannot fail after this point.
*/
- base_seq = READ_ONCE(nft_net->base_seq);
+ base_seq = nft_base_seq(net);
while (++base_seq == 0)
;
- WRITE_ONCE(nft_net->base_seq, base_seq);
+ /* pairs with smp_load_acquire in nft_lookup_eval */
+ smp_store_release(&net->nft.base_seq, base_seq);
gc_seq = nft_gc_seq_begin(nft_net);
@@ -11172,7 +11174,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
nft_commit_notify(net, NETLINK_CB(skb).portid);
nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
- nf_tables_commit_audit_log(&adl, nft_net->base_seq);
+ nf_tables_commit_audit_log(&adl, nft_base_seq(net));
nft_gc_seq_end(nft_net, gc_seq);
nft_net->validate_state = NFT_VALIDATE_SKIP;
@@ -11497,7 +11499,7 @@ static bool nf_tables_valid_genid(struct net *net, u32 genid)
mutex_lock(&nft_net->commit_mutex);
nft_net->tstamp = get_jiffies_64();
- genid_ok = genid == 0 || nft_net->base_seq == genid;
+ genid_ok = genid == 0 || nft_base_seq(net) == genid;
if (!genid_ok)
mutex_unlock(&nft_net->commit_mutex);
@@ -12134,7 +12136,7 @@ static int __net_init nf_tables_init_net(struct net *net)
INIT_LIST_HEAD(&nft_net->module_list);
INIT_LIST_HEAD(&nft_net->notify_list);
mutex_init(&nft_net->commit_mutex);
- nft_net->base_seq = 1;
+ net->nft.base_seq = 1;
nft_net->gc_seq = 0;
nft_net->validate_state = NFT_VALIDATE_SKIP;
INIT_WORK(&nft_net->destroy_work, nf_tables_trans_destroy_work);
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 40c602ffbcba..58c5b14889c4 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -24,11 +24,11 @@ struct nft_lookup {
struct nft_set_binding binding;
};
-#ifdef CONFIG_MITIGATION_RETPOLINE
-const struct nft_set_ext *
-nft_set_do_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key)
+static const struct nft_set_ext *
+__nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key)
{
+#ifdef CONFIG_MITIGATION_RETPOLINE
if (set->ops == &nft_set_hash_fast_type.ops)
return nft_hash_lookup_fast(net, set, key);
if (set->ops == &nft_set_hash_type.ops)
@@ -51,10 +51,46 @@ nft_set_do_lookup(const struct net *net, const struct nft_set *set,
return nft_rbtree_lookup(net, set, key);
WARN_ON_ONCE(1);
+#endif
return set->ops->lookup(net, set, key);
}
+
+static unsigned int nft_base_seq(const struct net *net)
+{
+ /* pairs with smp_store_release() in nf_tables_commit() */
+ return smp_load_acquire(&net->nft.base_seq);
+}
+
+static bool nft_lookup_should_retry(const struct net *net, unsigned int seq)
+{
+ return unlikely(seq != nft_base_seq(net));
+}
+
+const struct nft_set_ext *
+nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key)
+{
+ const struct nft_set_ext *ext;
+ unsigned int base_seq;
+
+ do {
+ base_seq = nft_base_seq(net);
+
+ ext = __nft_set_do_lookup(net, set, key);
+ if (ext)
+ break;
+ /* No match? There is a small chance that lookup was
+ * performed in the old generation, but nf_tables_commit()
+ * already unlinked a (matching) element.
+ *
+ * We need to repeat the lookup to make sure that we didn't
+ * miss a matching element in the new generation.
+ */
+ } while (nft_lookup_should_retry(net, base_seq));
+
+ return ext;
+}
EXPORT_SYMBOL_GPL(nft_set_do_lookup);
-#endif
void nft_lookup_eval(const struct nft_expr *expr,
struct nft_regs *regs,
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
index c24c922f895d..8d3f040a904a 100644
--- a/net/netfilter/nft_set_bitmap.c
+++ b/net/netfilter/nft_set_bitmap.c
@@ -226,7 +226,8 @@ static void nft_bitmap_walk(const struct nft_ctx *ctx,
const struct nft_bitmap *priv = nft_set_priv(set);
struct nft_bitmap_elem *be;
- list_for_each_entry_rcu(be, &priv->list, head) {
+ list_for_each_entry_rcu(be, &priv->list, head,
+ lockdep_is_held(&nft_pernet(ctx->net)->commit_mutex)) {
if (iter->count < iter->skip)
goto cont;
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index 4b64c3bd8e70..a7b8fa8cab7c 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -549,6 +549,23 @@ static struct nft_pipapo_elem *pipapo_get(const struct nft_pipapo_match *m,
*
* This function is called from the data path. It will search for
* an element matching the given key in the current active copy.
+ * Unlike other set types, this uses NFT_GENMASK_ANY instead of
+ * nft_genmask_cur().
+ *
+ * This is because new (future) elements are not reachable from
+ * priv->match, they get added to priv->clone instead.
+ * When the commit phase flips the generation bitmask, the
+ * 'now old' entries are skipped but without the 'now current'
+ * elements becoming visible. Using nft_genmask_cur() thus creates
+ * inconsistent state: matching old entries get skipped but thew
+ * newly matching entries are unreachable.
+ *
+ * GENMASK will still find the 'now old' entries which ensures consistent
+ * priv->match view.
+ *
+ * nft_pipapo_commit swaps ->clone and ->match shortly after the
+ * genbit flip. As ->clone doesn't contain the old entries in the first
+ * place, lookup will only find the now-current ones.
*
* Return: ntables API extension pointer or NULL if no match.
*/
@@ -557,12 +574,11 @@ nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
const u32 *key)
{
struct nft_pipapo *priv = nft_set_priv(set);
- u8 genmask = nft_genmask_cur(net);
const struct nft_pipapo_match *m;
const struct nft_pipapo_elem *e;
m = rcu_dereference(priv->match);
- e = pipapo_get_slow(m, (const u8 *)key, genmask, get_jiffies_64());
+ e = pipapo_get_slow(m, (const u8 *)key, NFT_GENMASK_ANY, get_jiffies_64());
return e ? &e->ext : NULL;
}
diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
index 7559306d0aed..27dab3667548 100644
--- a/net/netfilter/nft_set_pipapo_avx2.c
+++ b/net/netfilter/nft_set_pipapo_avx2.c
@@ -1275,7 +1275,6 @@ nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
const u32 *key)
{
struct nft_pipapo *priv = nft_set_priv(set);
- u8 genmask = nft_genmask_cur(net);
const struct nft_pipapo_match *m;
const u8 *rp = (const u8 *)key;
const struct nft_pipapo_elem *e;
@@ -1293,7 +1292,7 @@ nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
m = rcu_dereference(priv->match);
- e = pipapo_get_avx2(m, rp, genmask, get_jiffies_64());
+ e = pipapo_get_avx2(m, rp, NFT_GENMASK_ANY, get_jiffies_64());
local_bh_enable();
return e ? &e->ext : NULL;
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index b311b66df3e9..ca594161b840 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -77,7 +77,9 @@ __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
nft_rbtree_interval_end(rbe) &&
nft_rbtree_interval_start(interval))
continue;
- interval = rbe;
+ if (nft_set_elem_active(&rbe->ext, genmask) &&
+ !nft_rbtree_elem_expired(rbe))
+ interval = rbe;
} else if (d > 0)
parent = rcu_dereference_raw(parent->rb_right);
else {
@@ -102,8 +104,6 @@ __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
}
if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
- nft_set_elem_active(&interval->ext, genmask) &&
- !nft_rbtree_elem_expired(interval) &&
nft_rbtree_interval_start(interval))
return &interval->ext;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 104732d34543..978c129c6095 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -1836,6 +1836,9 @@ static int genl_bind(struct net *net, int group)
!ns_capable(net->user_ns, CAP_SYS_ADMIN))
ret = -EPERM;
+ if (ret)
+ break;
+
if (family->bind)
family->bind(i);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 9d42c4bd6e39..173e6edda08f 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -203,8 +203,7 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *,
static int prb_queue_frozen(struct tpacket_kbdq_core *);
static void prb_open_block(struct tpacket_kbdq_core *,
struct tpacket_block_desc *);
-static void prb_retire_rx_blk_timer_expired(struct timer_list *);
-static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
+static enum hrtimer_restart prb_retire_rx_blk_timer_expired(struct hrtimer *);
static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
static void prb_clear_rxhash(struct tpacket_kbdq_core *,
struct tpacket3_hdr *);
@@ -579,33 +578,13 @@ static __be16 vlan_get_protocol_dgram(const struct sk_buff *skb)
return proto;
}
-static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
-{
- timer_delete_sync(&pkc->retire_blk_timer);
-}
-
static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
struct sk_buff_head *rb_queue)
{
struct tpacket_kbdq_core *pkc;
pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
-
- spin_lock_bh(&rb_queue->lock);
- pkc->delete_blk_timer = 1;
- spin_unlock_bh(&rb_queue->lock);
-
- prb_del_retire_blk_timer(pkc);
-}
-
-static void prb_setup_retire_blk_timer(struct packet_sock *po)
-{
- struct tpacket_kbdq_core *pkc;
-
- pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
- timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
- 0);
- pkc->retire_blk_timer.expires = jiffies;
+ hrtimer_cancel(&pkc->retire_blk_timer);
}
static int prb_calc_retire_blk_tmo(struct packet_sock *po,
@@ -669,57 +648,36 @@ static void init_prb_bdqc(struct packet_sock *po,
p1->knum_blocks = req_u->req3.tp_block_nr;
p1->hdrlen = po->tp_hdrlen;
p1->version = po->tp_version;
- p1->last_kactive_blk_num = 0;
po->stats.stats3.tp_freeze_q_cnt = 0;
if (req_u->req3.tp_retire_blk_tov)
- p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
+ p1->interval_ktime = ms_to_ktime(req_u->req3.tp_retire_blk_tov);
else
- p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
- req_u->req3.tp_block_size);
- p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
+ p1->interval_ktime = ms_to_ktime(prb_calc_retire_blk_tmo(po,
+ req_u->req3.tp_block_size));
p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
rwlock_init(&p1->blk_fill_in_prog_lock);
p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
prb_init_ft_ops(p1, req_u);
- prb_setup_retire_blk_timer(po);
+ hrtimer_setup(&p1->retire_blk_timer, prb_retire_rx_blk_timer_expired,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
+ hrtimer_start(&p1->retire_blk_timer, p1->interval_ktime,
+ HRTIMER_MODE_REL_SOFT);
prb_open_block(p1, pbd);
}
-/* Do NOT update the last_blk_num first.
- * Assumes sk_buff_head lock is held.
- */
-static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
-{
- mod_timer(&pkc->retire_blk_timer,
- jiffies + pkc->tov_in_jiffies);
- pkc->last_kactive_blk_num = pkc->kactive_blk_num;
-}
-
/*
- * Timer logic:
- * 1) We refresh the timer only when we open a block.
- * By doing this we don't waste cycles refreshing the timer
- * on packet-by-packet basis.
- *
* With a 1MB block-size, on a 1Gbps line, it will take
* i) ~8 ms to fill a block + ii) memcpy etc.
* In this cut we are not accounting for the memcpy time.
*
- * So, if the user sets the 'tmo' to 10ms then the timer
- * will never fire while the block is still getting filled
- * (which is what we want). However, the user could choose
- * to close a block early and that's fine.
- *
- * But when the timer does fire, we check whether or not to refresh it.
* Since the tmo granularity is in msecs, it is not too expensive
* to refresh the timer, lets say every '8' msecs.
* Either the user can set the 'tmo' or we can derive it based on
* a) line-speed and b) block-size.
* prb_calc_retire_blk_tmo() calculates the tmo.
- *
*/
-static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
+static enum hrtimer_restart prb_retire_rx_blk_timer_expired(struct hrtimer *t)
{
struct packet_sock *po =
timer_container_of(po, t, rx_ring.prb_bdqc.retire_blk_timer);
@@ -732,9 +690,6 @@ static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
frozen = prb_queue_frozen(pkc);
pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
- if (unlikely(pkc->delete_blk_timer))
- goto out;
-
/* We only need to plug the race when the block is partially filled.
* tpacket_rcv:
* lock(); increment BLOCK_NUM_PKTS; unlock()
@@ -750,46 +705,31 @@ static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
write_unlock(&pkc->blk_fill_in_prog_lock);
}
- if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
- if (!frozen) {
- if (!BLOCK_NUM_PKTS(pbd)) {
- /* An empty block. Just refresh the timer. */
- goto refresh_timer;
- }
+ if (!frozen) {
+ if (BLOCK_NUM_PKTS(pbd)) {
+ /* Not an empty block. Need retire the block. */
prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
- if (!prb_dispatch_next_block(pkc, po))
- goto refresh_timer;
- else
- goto out;
- } else {
- /* Case 1. Queue was frozen because user-space was
- * lagging behind.
+ prb_dispatch_next_block(pkc, po);
+ }
+ } else {
+ /* Case 1. Queue was frozen because user-space was
+ * lagging behind.
+ */
+ if (!prb_curr_blk_in_use(pbd)) {
+ /* Case 2. queue was frozen,user-space caught up,
+ * now the link went idle && the timer fired.
+ * We don't have a block to close.So we open this
+ * block and restart the timer.
+ * opening a block thaws the queue,restarts timer
+ * Thawing/timer-refresh is a side effect.
*/
- if (prb_curr_blk_in_use(pbd)) {
- /*
- * Ok, user-space is still behind.
- * So just refresh the timer.
- */
- goto refresh_timer;
- } else {
- /* Case 2. queue was frozen,user-space caught up,
- * now the link went idle && the timer fired.
- * We don't have a block to close.So we open this
- * block and restart the timer.
- * opening a block thaws the queue,restarts timer
- * Thawing/timer-refresh is a side effect.
- */
- prb_open_block(pkc, pbd);
- goto out;
- }
+ prb_open_block(pkc, pbd);
}
}
-refresh_timer:
- _prb_refresh_rx_retire_blk_timer(pkc);
-
-out:
+ hrtimer_forward_now(&pkc->retire_blk_timer, pkc->interval_ktime);
spin_unlock(&po->sk.sk_receive_queue.lock);
+ return HRTIMER_RESTART;
}
static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
@@ -883,11 +823,18 @@ static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
}
/*
- * Side effect of opening a block:
+ * prb_open_block is called by tpacket_rcv or timer callback.
*
- * 1) prb_queue is thawed.
- * 2) retire_blk_timer is refreshed.
+ * Reasons why NOT update hrtimer in prb_open_block:
+ * 1) It will increase complexity to distinguish the two caller scenario.
+ * 2) hrtimer_cancel and hrtimer_start need to be called if you want to update
+ * TMO of an already enqueued hrtimer, leading to complex shutdown logic.
*
+ * One side effect of NOT update hrtimer when called by tpacket_rcv is that
+ * a newly opened block triggered by tpacket_rcv may be retired earlier than
+ * expected. On the other hand, if timeout is updated in prb_open_block, the
+ * frequent reception of network packets that leads to prb_open_block being
+ * called may cause hrtimer to be removed and enqueued repeatedly.
*/
static void prb_open_block(struct tpacket_kbdq_core *pkc1,
struct tpacket_block_desc *pbd1)
@@ -921,7 +868,6 @@ static void prb_open_block(struct tpacket_kbdq_core *pkc1,
pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
prb_thaw_queue(pkc1);
- _prb_refresh_rx_retire_blk_timer(pkc1);
smp_wmb();
}
diff --git a/net/packet/diag.c b/net/packet/diag.c
index 6ce1dcc284d9..c8f43e0c1925 100644
--- a/net/packet/diag.c
+++ b/net/packet/diag.c
@@ -83,7 +83,7 @@ static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type,
pdr.pdr_frame_nr = ring->frame_max + 1;
if (ver > TPACKET_V2) {
- pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov;
+ pdr.pdr_retire_tmo = ktime_to_ms(ring->prb_bdqc.interval_ktime);
pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv;
pdr.pdr_features = ring->prb_bdqc.feature_req_word;
} else {
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 1e743d0316fd..b76e645cd78d 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -20,15 +20,10 @@ struct tpacket_kbdq_core {
unsigned int feature_req_word;
unsigned int hdrlen;
unsigned char reset_pending_on_curr_blk;
- unsigned char delete_blk_timer;
unsigned short kactive_blk_num;
unsigned short blk_sizeof_priv;
- /* last_kactive_blk_num:
- * trick to see if user-space has caught up
- * in order to avoid refreshing timer when every single pkt arrives.
- */
- unsigned short last_kactive_blk_num;
+ unsigned short version;
char *pkblk_start;
char *pkblk_end;
@@ -38,6 +33,7 @@ struct tpacket_kbdq_core {
uint64_t knxt_seq_num;
char *prev;
char *nxt_offset;
+
struct sk_buff *skb;
rwlock_t blk_fill_in_prog_lock;
@@ -45,12 +41,10 @@ struct tpacket_kbdq_core {
/* Default is set to 8ms */
#define DEFAULT_PRB_RETIRE_TOV (8)
- unsigned short retire_blk_tov;
- unsigned short version;
- unsigned long tov_in_jiffies;
+ ktime_t interval_ktime;
/* timer to retire an outstanding block */
- struct timer_list retire_blk_timer;
+ struct hrtimer retire_blk_timer;
};
struct pgv {
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 73bc39281ef5..9b45fbdc90ca 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -276,8 +276,6 @@ EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
{
- if (unlikely(current->flags & PF_EXITING))
- return -EINTR;
schedule();
if (signal_pending_state(mode, current))
return -ERESTARTSYS;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index c5f7bbf5775f..3aa987e7f072 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -407,9 +407,9 @@ xs_sock_recv_cmsg(struct socket *sock, unsigned int *msg_flags, int flags)
iov_iter_kvec(&msg.msg_iter, ITER_DEST, &alert_kvec, 1,
alert_kvec.iov_len);
ret = sock_recvmsg(sock, &msg, flags);
- if (ret > 0 &&
- tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT) {
- iov_iter_revert(&msg.msg_iter, ret);
+ if (ret > 0) {
+ if (tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT)
+ iov_iter_revert(&msg.msg_iter, ret);
ret = xs_sock_process_cmsg(sock, &msg, msg_flags, &u.cmsg,
-EAGAIN);
}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index a7e2931ffb2e..797f9f2004a6 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1018,6 +1018,15 @@ int wiphy_register(struct wiphy *wiphy)
rdev->wiphy.features |= NL80211_FEATURE_SCAN_FLUSH;
+ if (rdev->wiphy.bss_param_support & WIPHY_BSS_PARAM_P2P_CTWINDOW)
+ rdev->wiphy.features |= NL80211_FEATURE_P2P_GO_CTWIN;
+ else if (rdev->wiphy.features & NL80211_FEATURE_P2P_GO_CTWIN)
+ rdev->wiphy.bss_param_support |= WIPHY_BSS_PARAM_P2P_CTWINDOW;
+ if (rdev->wiphy.bss_param_support & WIPHY_BSS_PARAM_P2P_OPPPS)
+ rdev->wiphy.features |= NL80211_FEATURE_P2P_GO_OPPPS;
+ else if (rdev->wiphy.features & NL80211_FEATURE_P2P_GO_OPPPS)
+ rdev->wiphy.bss_param_support |= WIPHY_BSS_PARAM_P2P_OPPPS;
+
rtnl_lock();
wiphy_lock(&rdev->wiphy);
res = device_add(&rdev->wiphy.dev);
diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
index 2613d6ac0fda..46e4317cbd7e 100644
--- a/net/wireless/ethtool.c
+++ b/net/wireless/ethtool.c
@@ -23,7 +23,7 @@ void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
else
strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
- strscpy(info->bus_info, dev_name(wiphy_dev(wdev->wiphy)),
+ strscpy(info->bus_info, dev_name(pdev),
sizeof(info->bus_info));
}
EXPORT_SYMBOL(cfg80211_get_drvinfo);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 89519aa52893..b7bc7e5e81dd 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -411,6 +411,14 @@ static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
[NL80211_TXRATE_HE_LTF] = NLA_POLICY_RANGE(NLA_U8,
NL80211_RATE_INFO_HE_1XLTF,
NL80211_RATE_INFO_HE_4XLTF),
+ [NL80211_TXRATE_EHT] = NLA_POLICY_EXACT_LEN(sizeof(struct nl80211_txrate_eht)),
+ [NL80211_TXRATE_EHT_GI] = NLA_POLICY_RANGE(NLA_U8,
+ NL80211_RATE_INFO_EHT_GI_0_8,
+ NL80211_RATE_INFO_EHT_GI_3_2),
+ [NL80211_TXRATE_EHT_LTF] = NLA_POLICY_RANGE(NLA_U8,
+ NL80211_RATE_INFO_EHT_1XLTF,
+ NL80211_RATE_INFO_EHT_8XLTF),
+
};
static const struct nla_policy
@@ -871,6 +879,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_S1G_LONG_BEACON_PERIOD] = NLA_POLICY_MIN(NLA_U8, 2),
[NL80211_ATTR_S1G_SHORT_BEACON] =
NLA_POLICY_NESTED(nl80211_s1g_short_beacon),
+ [NL80211_ATTR_BSS_PARAM] = { .type = NLA_FLAG },
};
/* policy for the key attributes */
@@ -3019,6 +3028,40 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
rdev->wiphy.ext_features))
goto nla_put_failure;
+ if (rdev->wiphy.bss_param_support) {
+ struct nlattr *nested;
+ u32 parsup = rdev->wiphy.bss_param_support;
+
+ nested = nla_nest_start(msg, NL80211_ATTR_BSS_PARAM);
+ if (!nested)
+ goto nla_put_failure;
+
+ if ((parsup & WIPHY_BSS_PARAM_CTS_PROT) &&
+ nla_put_flag(msg, NL80211_ATTR_BSS_CTS_PROT))
+ goto nla_put_failure;
+ if ((parsup & WIPHY_BSS_PARAM_SHORT_PREAMBLE) &&
+ nla_put_flag(msg, NL80211_ATTR_BSS_SHORT_PREAMBLE))
+ goto nla_put_failure;
+ if ((parsup & WIPHY_BSS_PARAM_SHORT_SLOT_TIME) &&
+ nla_put_flag(msg, NL80211_ATTR_BSS_SHORT_SLOT_TIME))
+ goto nla_put_failure;
+ if ((parsup & WIPHY_BSS_PARAM_BASIC_RATES) &&
+ nla_put_flag(msg, NL80211_ATTR_BSS_BASIC_RATES))
+ goto nla_put_failure;
+ if ((parsup & WIPHY_BSS_PARAM_AP_ISOLATE) &&
+ nla_put_flag(msg, NL80211_ATTR_AP_ISOLATE))
+ goto nla_put_failure;
+ if ((parsup & WIPHY_BSS_PARAM_HT_OPMODE) &&
+ nla_put_flag(msg, NL80211_ATTR_BSS_HT_OPMODE))
+ goto nla_put_failure;
+ if ((parsup & WIPHY_BSS_PARAM_P2P_CTWINDOW) &&
+ nla_put_flag(msg, NL80211_ATTR_P2P_CTWINDOW))
+ goto nla_put_failure;
+ if ((parsup & WIPHY_BSS_PARAM_P2P_OPPPS) &&
+ nla_put_flag(msg, NL80211_ATTR_P2P_OPPPS))
+ goto nla_put_failure;
+ nla_nest_end(msg, nested);
+ }
if (rdev->wiphy.bss_select_support) {
struct nlattr *nested;
u32 bss_select_support = rdev->wiphy.bss_select_support;
@@ -5393,6 +5436,164 @@ static bool he_set_mcs_mask(struct genl_info *info,
return true;
}
+static void eht_build_mcs_mask(struct genl_info *info,
+ const struct ieee80211_sta_eht_cap *eht_cap,
+ u8 mcs_nss_len, u16 *mcs_mask)
+{
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ u8 nss, mcs_7 = 0, mcs_9 = 0, mcs_11 = 0, mcs_13 = 0;
+ unsigned int link_id = nl80211_link_id(info->attrs);
+
+ if (mcs_nss_len == 4) {
+ const struct ieee80211_eht_mcs_nss_supp_20mhz_only *mcs =
+ &eht_cap->eht_mcs_nss_supp.only_20mhz;
+
+ mcs_7 = u8_get_bits(mcs->rx_tx_mcs7_max_nss,
+ IEEE80211_EHT_MCS_NSS_TX);
+ mcs_9 = u8_get_bits(mcs->rx_tx_mcs9_max_nss,
+ IEEE80211_EHT_MCS_NSS_TX);
+ mcs_11 = u8_get_bits(mcs->rx_tx_mcs11_max_nss,
+ IEEE80211_EHT_MCS_NSS_TX);
+ mcs_13 = u8_get_bits(mcs->rx_tx_mcs13_max_nss,
+ IEEE80211_EHT_MCS_NSS_TX);
+
+ } else {
+ const struct ieee80211_eht_mcs_nss_supp_bw *mcs;
+ enum nl80211_chan_width width;
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ width = wdev->u.ibss.chandef.width;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ width = wdev->u.mesh.chandef.width;
+ break;
+ case NL80211_IFTYPE_OCB:
+ width = wdev->u.ocb.chandef.width;
+ break;
+ default:
+ if (wdev->valid_links)
+ width = wdev->links[link_id].ap.chandef.width;
+ else
+ width = wdev->u.ap.preset_chandef.width;
+ break;
+ }
+
+ switch (width) {
+ case NL80211_CHAN_WIDTH_320:
+ mcs = &eht_cap->eht_mcs_nss_supp.bw._320;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ mcs = &eht_cap->eht_mcs_nss_supp.bw._160;
+ break;
+ default:
+ mcs = &eht_cap->eht_mcs_nss_supp.bw._80;
+ break;
+ }
+
+ mcs_7 = u8_get_bits(mcs->rx_tx_mcs9_max_nss,
+ IEEE80211_EHT_MCS_NSS_TX);
+ mcs_9 = u8_get_bits(mcs->rx_tx_mcs9_max_nss,
+ IEEE80211_EHT_MCS_NSS_TX);
+ mcs_11 = u8_get_bits(mcs->rx_tx_mcs11_max_nss,
+ IEEE80211_EHT_MCS_NSS_TX);
+ mcs_13 = u8_get_bits(mcs->rx_tx_mcs13_max_nss,
+ IEEE80211_EHT_MCS_NSS_TX);
+ }
+
+ /* Enable MCS 14 for NSS 0 */
+ if (eht_cap->eht_cap_elem.phy_cap_info[6] &
+ IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP)
+ mcs_mask[0] |= 0x4000;
+
+ /* Enable MCS 15 for NSS 0 */
+ mcs_mask[0] |= 0x8000;
+
+ for (nss = 0; nss < NL80211_EHT_NSS_MAX; nss++) {
+ if (!mcs_7)
+ continue;
+ mcs_mask[nss] |= 0x00FF;
+ mcs_7--;
+
+ if (!mcs_9)
+ continue;
+ mcs_mask[nss] |= 0x0300;
+ mcs_9--;
+
+ if (!mcs_11)
+ continue;
+ mcs_mask[nss] |= 0x0C00;
+ mcs_11--;
+
+ if (!mcs_13)
+ continue;
+ mcs_mask[nss] |= 0x3000;
+ mcs_13--;
+ }
+}
+
+static bool eht_set_mcs_mask(struct genl_info *info, struct wireless_dev *wdev,
+ struct ieee80211_supported_band *sband,
+ struct nl80211_txrate_eht *txrate,
+ u16 mcs[NL80211_EHT_NSS_MAX])
+{
+ const struct ieee80211_sta_he_cap *he_cap;
+ const struct ieee80211_sta_eht_cap *eht_cap;
+ u16 tx_mcs_mask[NL80211_EHT_NSS_MAX] = { 0 };
+ u8 i, mcs_nss_len;
+
+ he_cap = ieee80211_get_he_iftype_cap(sband, wdev->iftype);
+ if (!he_cap)
+ return false;
+
+ eht_cap = ieee80211_get_eht_iftype_cap(sband, wdev->iftype);
+ if (!eht_cap)
+ return false;
+
+ /* Checks for MCS 14 */
+ if (txrate->mcs[0] & 0x4000) {
+ if (sband->band != NL80211_BAND_6GHZ)
+ return false;
+
+ if (!(eht_cap->eht_cap_elem.phy_cap_info[6] &
+ IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP))
+ return false;
+ }
+
+ mcs_nss_len = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
+ &eht_cap->eht_cap_elem,
+ wdev->iftype ==
+ NL80211_IFTYPE_STATION);
+
+ if (mcs_nss_len == 3) {
+ /* Supported iftypes for setting non-20 MHZ only EHT MCS */
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_OCB:
+ break;
+ default:
+ return false;
+ }
+ }
+
+ /* Build eht_mcs_mask from EHT and HE capabilities */
+ eht_build_mcs_mask(info, eht_cap, mcs_nss_len, tx_mcs_mask);
+
+ memset(mcs, 0, sizeof(u16) * NL80211_EHT_NSS_MAX);
+ for (i = 0; i < NL80211_EHT_NSS_MAX; i++) {
+ if ((tx_mcs_mask[i] & txrate->mcs[i]) == txrate->mcs[i])
+ mcs[i] = txrate->mcs[i];
+ else
+ return false;
+ }
+
+ return true;
+}
+
static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
struct nlattr *attrs[],
enum nl80211_attrs attr,
@@ -5413,6 +5614,8 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
/* Default to all rates enabled */
for (i = 0; i < NUM_NL80211_BANDS; i++) {
const struct ieee80211_sta_he_cap *he_cap;
+ const struct ieee80211_sta_eht_cap *eht_cap;
+ u8 mcs_nss_len;
if (!default_all_enabled)
break;
@@ -5441,6 +5644,21 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
mask->control[i].he_gi = 0xFF;
mask->control[i].he_ltf = 0xFF;
+
+ eht_cap = ieee80211_get_eht_iftype_cap(sband, wdev->iftype);
+ if (!eht_cap)
+ continue;
+
+ mcs_nss_len = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem,
+ &eht_cap->eht_cap_elem,
+ wdev->iftype ==
+ NL80211_IFTYPE_STATION);
+
+ eht_build_mcs_mask(info, eht_cap, mcs_nss_len,
+ mask->control[i].eht_mcs);
+
+ mask->control[i].eht_gi = 0xFF;
+ mask->control[i].eht_ltf = 0xFF;
}
/* if no rates are given set it back to the defaults */
@@ -5512,13 +5730,27 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
mask->control[band].he_ltf =
nla_get_u8(tb[NL80211_TXRATE_HE_LTF]);
+ if (tb[NL80211_TXRATE_EHT] &&
+ !eht_set_mcs_mask(info, wdev, sband,
+ nla_data(tb[NL80211_TXRATE_EHT]),
+ mask->control[band].eht_mcs))
+ return -EINVAL;
+
+ if (tb[NL80211_TXRATE_EHT_GI])
+ mask->control[band].eht_gi =
+ nla_get_u8(tb[NL80211_TXRATE_EHT_GI]);
+ if (tb[NL80211_TXRATE_EHT_LTF])
+ mask->control[band].eht_ltf =
+ nla_get_u8(tb[NL80211_TXRATE_EHT_LTF]);
+
if (mask->control[band].legacy == 0) {
- /* don't allow empty legacy rates if HT, VHT or HE
+ /* don't allow empty legacy rates if HT, VHT, HE or EHT
* are not even supported.
*/
if (!(rdev->wiphy.bands[band]->ht_cap.ht_supported ||
rdev->wiphy.bands[band]->vht_cap.vht_supported ||
- ieee80211_get_he_iftype_cap(sband, wdev->iftype)))
+ ieee80211_get_he_iftype_cap(sband, wdev->iftype) ||
+ ieee80211_get_eht_iftype_cap(sband, wdev->iftype)))
return -EINVAL;
for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
@@ -5533,6 +5765,10 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
if (mask->control[band].he_mcs[i])
goto out;
+ for (i = 0; i < NL80211_EHT_NSS_MAX; i++)
+ if (mask->control[band].eht_mcs[i])
+ goto out;
+
/* legacy and mcs rates may not be both empty */
return -EINVAL;
}
@@ -5546,7 +5782,7 @@ static int validate_beacon_tx_rate(struct cfg80211_registered_device *rdev,
enum nl80211_band band,
struct cfg80211_bitrate_mask *beacon_rate)
{
- u32 count_ht, count_vht, count_he, i;
+ u32 count_ht, count_vht, count_he, count_eht, i;
u32 rate = beacon_rate->control[band].legacy;
/* Allow only one rate */
@@ -5592,8 +5828,21 @@ static int validate_beacon_tx_rate(struct cfg80211_registered_device *rdev,
return -EINVAL;
}
- if ((count_ht && count_vht && count_he) ||
- (!rate && !count_ht && !count_vht && !count_he))
+ count_eht = 0;
+ for (i = 0; i < NL80211_EHT_NSS_MAX; i++) {
+ if (hweight16(beacon_rate->control[band].eht_mcs[i]) > 1) {
+ return -EINVAL;
+ } else if (beacon_rate->control[band].eht_mcs[i]) {
+ count_eht++;
+ if (count_eht > 1)
+ return -EINVAL;
+ }
+ if (count_eht && rate)
+ return -EINVAL;
+ }
+
+ if ((count_ht && count_vht && count_he && count_eht) ||
+ (!rate && !count_ht && !count_vht && !count_he && !count_eht))
return -EINVAL;
if (rate &&
@@ -5613,6 +5862,11 @@ static int validate_beacon_tx_rate(struct cfg80211_registered_device *rdev,
NL80211_EXT_FEATURE_BEACON_RATE_HE))
return -EINVAL;
+ if (count_eht &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_BEACON_RATE_EHT))
+ return -EINVAL;
+
return 0;
}
@@ -7062,7 +7316,8 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
u32 seq, int flags,
struct cfg80211_registered_device *rdev,
struct net_device *dev,
- const u8 *mac_addr, struct station_info *sinfo)
+ const u8 *mac_addr, struct station_info *sinfo,
+ bool link_stats)
{
void *hdr;
struct nlattr *sinfoattr, *bss_param;
@@ -7283,7 +7538,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
goto nla_put_failure;
}
- if (sinfo->valid_links) {
+ if (link_stats && sinfo->valid_links) {
links = nla_nest_start(msg, NL80211_ATTR_MLO_LINKS);
if (!links)
goto nla_put_failure;
@@ -7574,7 +7829,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
rdev, wdev->netdev, mac_addr,
- &sinfo) < 0)
+ &sinfo, false) < 0)
goto out;
sta_idx++;
@@ -7635,7 +7890,7 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
if (nl80211_send_station(msg, NL80211_CMD_NEW_STATION,
info->snd_portid, info->snd_seq, 0,
- rdev, dev, mac_addr, &sinfo) < 0) {
+ rdev, dev, mac_addr, &sinfo, false) < 0) {
nlmsg_free(msg);
return -ENOBUFS;
}
@@ -8829,6 +9084,9 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct bss_parameters params;
+ u32 bss_param_support = rdev->wiphy.bss_param_support;
+ u32 changed = 0;
+ bool strict;
memset(&params, 0, sizeof(params));
params.link_id = nl80211_link_id_or_invalid(info->attrs);
@@ -8841,26 +9099,54 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
params.p2p_ctwindow = -1;
params.p2p_opp_ps = -1;
- if (info->attrs[NL80211_ATTR_BSS_CTS_PROT])
+ strict = nla_get_flag(info->attrs[NL80211_ATTR_BSS_PARAM]);
+ if (info->attrs[NL80211_ATTR_BSS_CTS_PROT]) {
+ if (strict && !(bss_param_support & WIPHY_BSS_PARAM_CTS_PROT))
+ return -EINVAL;
params.use_cts_prot =
nla_get_u8(info->attrs[NL80211_ATTR_BSS_CTS_PROT]);
- if (info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE])
+ changed |= WIPHY_BSS_PARAM_CTS_PROT;
+ }
+ if (info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]) {
+ if (strict &&
+ !(bss_param_support & WIPHY_BSS_PARAM_SHORT_PREAMBLE))
+ return -EINVAL;
params.use_short_preamble =
nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]);
- if (info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME])
+ changed |= WIPHY_BSS_PARAM_SHORT_PREAMBLE;
+ }
+ if (info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]) {
+ if (strict &&
+ !(bss_param_support & WIPHY_BSS_PARAM_SHORT_SLOT_TIME))
+ return -EINVAL;
params.use_short_slot_time =
nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]);
+ changed |= WIPHY_BSS_PARAM_SHORT_SLOT_TIME;
+ }
if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) {
+ if (strict &&
+ !(bss_param_support & WIPHY_BSS_PARAM_BASIC_RATES))
+ return -EINVAL;
params.basic_rates =
nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
params.basic_rates_len =
nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
+ changed |= WIPHY_BSS_PARAM_BASIC_RATES;
+ }
+ if (info->attrs[NL80211_ATTR_AP_ISOLATE]) {
+ if (strict && !(bss_param_support & WIPHY_BSS_PARAM_AP_ISOLATE))
+ return -EINVAL;
+ params.ap_isolate =
+ !!nla_get_u8(info->attrs[NL80211_ATTR_AP_ISOLATE]);
+ changed |= WIPHY_BSS_PARAM_AP_ISOLATE;
}
- if (info->attrs[NL80211_ATTR_AP_ISOLATE])
- params.ap_isolate = !!nla_get_u8(info->attrs[NL80211_ATTR_AP_ISOLATE]);
- if (info->attrs[NL80211_ATTR_BSS_HT_OPMODE])
+ if (info->attrs[NL80211_ATTR_BSS_HT_OPMODE]) {
+ if (strict && !(bss_param_support & WIPHY_BSS_PARAM_HT_OPMODE))
+ return -EINVAL;
params.ht_opmode =
nla_get_u16(info->attrs[NL80211_ATTR_BSS_HT_OPMODE]);
+ changed |= WIPHY_BSS_PARAM_HT_OPMODE;
+ }
if (info->attrs[NL80211_ATTR_P2P_CTWINDOW]) {
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
@@ -8868,8 +9154,9 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
params.p2p_ctwindow =
nla_get_u8(info->attrs[NL80211_ATTR_P2P_CTWINDOW]);
if (params.p2p_ctwindow != 0 &&
- !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_CTWIN))
+ !(bss_param_support & WIPHY_BSS_PARAM_P2P_CTWINDOW))
return -EINVAL;
+ changed |= WIPHY_BSS_PARAM_P2P_CTWINDOW;
}
if (info->attrs[NL80211_ATTR_P2P_OPPPS]) {
@@ -8878,9 +9165,11 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
return -EINVAL;
tmp = nla_get_u8(info->attrs[NL80211_ATTR_P2P_OPPPS]);
+ if (tmp && !(bss_param_support & WIPHY_BSS_PARAM_P2P_OPPPS))
+ return -EINVAL;
params.p2p_opp_ps = tmp;
if (params.p2p_opp_ps &&
- !(rdev->wiphy.features & NL80211_FEATURE_P2P_GO_OPPPS))
+ !(rdev->wiphy.bss_param_support & WIPHY_BSS_PARAM_P2P_OPPPS))
return -EINVAL;
}
@@ -8891,6 +9180,10 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
return -EOPNOTSUPP;
+ changed &= rdev->wiphy.bss_param_support;
+ if (!changed)
+ return 0;
+
return rdev_change_bss(rdev, dev, &params);
}
@@ -19680,7 +19973,7 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
return;
if (nl80211_send_station(msg, NL80211_CMD_NEW_STATION, 0, 0, 0,
- rdev, dev, mac_addr, sinfo) < 0) {
+ rdev, dev, mac_addr, sinfo, false) < 0) {
nlmsg_free(msg);
return;
}
@@ -19710,7 +20003,7 @@ void cfg80211_del_sta_sinfo(struct net_device *dev, const u8 *mac_addr,
}
if (nl80211_send_station(msg, NL80211_CMD_DEL_STATION, 0, 0, 0,
- rdev, dev, mac_addr, sinfo) < 0) {
+ rdev, dev, mac_addr, sinfo, false) < 0) {
nlmsg_free(msg);
return;
}
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 6c7b7c3828a4..90a9187a6b13 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1816,6 +1816,9 @@ static void cfg80211_update_hidden_bsses(struct cfg80211_internal_bss *known,
WARN_ON(ies != old_ies);
rcu_assign_pointer(bss->pub.beacon_ies, new_ies);
+
+ bss->ts = known->ts;
+ bss->pub.ts_boottime = known->pub.ts_boottime;
}
}
@@ -1882,6 +1885,10 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
{
lockdep_assert_held(&rdev->bss_lock);
+ /* Update time stamps */
+ known->ts = new->ts;
+ known->pub.ts_boottime = new->pub.ts_boottime;
+
/* Update IEs */
if (rcu_access_pointer(new->pub.proberesp_ies)) {
const struct cfg80211_bss_ies *old;
@@ -1945,8 +1952,6 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
if (signal_valid)
known->pub.signal = new->pub.signal;
known->pub.capability = new->pub.capability;
- known->ts = new->ts;
- known->pub.ts_boottime = new->pub.ts_boottime;
known->parent_tsf = new->parent_tsf;
known->pub.chains = new->pub.chains;
memcpy(known->pub.chain_signal, new->pub.chain_signal,
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 34c584a215e5..9b6074155d59 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -3137,23 +3137,6 @@ DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_notify_new_peer_candidate,
TP_ARGS(netdev, macaddr)
);
-DECLARE_EVENT_CLASS(netdev_evt_only,
- TP_PROTO(struct net_device *netdev),
- TP_ARGS(netdev),
- TP_STRUCT__entry(
- NETDEV_ENTRY
- ),
- TP_fast_assign(
- NETDEV_ASSIGN;
- ),
- TP_printk(NETDEV_PR_FMT , NETDEV_PR_ARG)
-);
-
-DEFINE_EVENT(netdev_evt_only, cfg80211_send_rx_auth,
- TP_PROTO(struct net_device *netdev),
- TP_ARGS(netdev)
-);
-
TRACE_EVENT(cfg80211_send_rx_assoc,
TP_PROTO(struct net_device *netdev,
const struct cfg80211_rx_assoc_resp_data *data),
@@ -3480,21 +3463,6 @@ TRACE_EVENT(cfg80211_reg_can_beacon,
__entry->prohibited_flags, __entry->permitting_flags)
);
-TRACE_EVENT(cfg80211_chandef_dfs_required,
- TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef),
- TP_ARGS(wiphy, chandef),
- TP_STRUCT__entry(
- WIPHY_ENTRY
- CHAN_DEF_ENTRY
- ),
- TP_fast_assign(
- WIPHY_ASSIGN;
- CHAN_DEF_ASSIGN(chandef);
- ),
- TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT,
- WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
-);
-
TRACE_EVENT(cfg80211_ch_switch_notify,
TP_PROTO(struct net_device *netdev,
struct cfg80211_chan_def *chandef,
@@ -3862,30 +3830,6 @@ DEFINE_EVENT(cfg80211_bss_evt, cfg80211_return_bss,
TP_ARGS(pub)
);
-TRACE_EVENT(cfg80211_return_uint,
- TP_PROTO(unsigned int ret),
- TP_ARGS(ret),
- TP_STRUCT__entry(
- __field(unsigned int, ret)
- ),
- TP_fast_assign(
- __entry->ret = ret;
- ),
- TP_printk("ret: %d", __entry->ret)
-);
-
-TRACE_EVENT(cfg80211_return_u32,
- TP_PROTO(u32 ret),
- TP_ARGS(ret),
- TP_STRUCT__entry(
- __field(u32, ret)
- ),
- TP_fast_assign(
- __entry->ret = ret;
- ),
- TP_printk("ret: %u", __entry->ret)
-);
-
TRACE_EVENT(cfg80211_report_wowlan_wakeup,
TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_wowlan_wakeup *wakeup),
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 240c68baa3d1..d12d49134c88 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -2584,7 +2584,7 @@ int cfg80211_get_radio_idx_by_chan(struct wiphy *wiphy,
}
}
- return -ENOENT;
+ return -EINVAL;
}
EXPORT_SYMBOL(cfg80211_get_radio_idx_by_chan);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 9c3acecc14b1..72e34bd2d925 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -36,6 +36,20 @@
#define TX_BATCH_SIZE 32
#define MAX_PER_SOCKET_BUDGET 32
+struct xsk_addr_node {
+ u64 addr;
+ struct list_head addr_node;
+};
+
+struct xsk_addr_head {
+ u32 num_descs;
+ struct list_head addrs_list;
+};
+
+static struct kmem_cache *xsk_tx_generic_cache;
+
+#define XSKCB(skb) ((struct xsk_addr_head *)((skb)->cb))
+
void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{
if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
@@ -532,24 +546,43 @@ static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
}
-static int xsk_cq_reserve_addr_locked(struct xsk_buff_pool *pool, u64 addr)
+static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&pool->cq_lock, flags);
- ret = xskq_prod_reserve_addr(pool->cq, addr);
+ ret = xskq_prod_reserve(pool->cq);
spin_unlock_irqrestore(&pool->cq_lock, flags);
return ret;
}
-static void xsk_cq_submit_locked(struct xsk_buff_pool *pool, u32 n)
+static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool,
+ struct sk_buff *skb)
{
+ struct xsk_addr_node *pos, *tmp;
+ u32 descs_processed = 0;
unsigned long flags;
+ u32 idx;
spin_lock_irqsave(&pool->cq_lock, flags);
- xskq_prod_submit_n(pool->cq, n);
+ idx = xskq_get_prod(pool->cq);
+
+ xskq_prod_write_addr(pool->cq, idx,
+ (u64)(uintptr_t)skb_shinfo(skb)->destructor_arg);
+ descs_processed++;
+
+ if (unlikely(XSKCB(skb)->num_descs > 1)) {
+ list_for_each_entry_safe(pos, tmp, &XSKCB(skb)->addrs_list, addr_node) {
+ xskq_prod_write_addr(pool->cq, idx + descs_processed,
+ pos->addr);
+ descs_processed++;
+ list_del(&pos->addr_node);
+ kmem_cache_free(xsk_tx_generic_cache, pos);
+ }
+ }
+ xskq_prod_submit_n(pool->cq, descs_processed);
spin_unlock_irqrestore(&pool->cq_lock, flags);
}
@@ -562,9 +595,14 @@ static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
spin_unlock_irqrestore(&pool->cq_lock, flags);
}
+static void xsk_inc_num_desc(struct sk_buff *skb)
+{
+ XSKCB(skb)->num_descs++;
+}
+
static u32 xsk_get_num_desc(struct sk_buff *skb)
{
- return skb ? (long)skb_shinfo(skb)->destructor_arg : 0;
+ return XSKCB(skb)->num_descs;
}
static void xsk_destruct_skb(struct sk_buff *skb)
@@ -576,23 +614,33 @@ static void xsk_destruct_skb(struct sk_buff *skb)
*compl->tx_timestamp = ktime_get_tai_fast_ns();
}
- xsk_cq_submit_locked(xdp_sk(skb->sk)->pool, xsk_get_num_desc(skb));
+ xsk_cq_submit_addr_locked(xdp_sk(skb->sk)->pool, skb);
sock_wfree(skb);
}
-static void xsk_set_destructor_arg(struct sk_buff *skb)
+static void xsk_set_destructor_arg(struct sk_buff *skb, u64 addr)
{
- long num = xsk_get_num_desc(xdp_sk(skb->sk)->skb) + 1;
-
- skb_shinfo(skb)->destructor_arg = (void *)num;
+ BUILD_BUG_ON(sizeof(struct xsk_addr_head) > sizeof(skb->cb));
+ INIT_LIST_HEAD(&XSKCB(skb)->addrs_list);
+ XSKCB(skb)->num_descs = 0;
+ skb_shinfo(skb)->destructor_arg = (void *)(uintptr_t)addr;
}
static void xsk_consume_skb(struct sk_buff *skb)
{
struct xdp_sock *xs = xdp_sk(skb->sk);
+ u32 num_descs = xsk_get_num_desc(skb);
+ struct xsk_addr_node *pos, *tmp;
+
+ if (unlikely(num_descs > 1)) {
+ list_for_each_entry_safe(pos, tmp, &XSKCB(skb)->addrs_list, addr_node) {
+ list_del(&pos->addr_node);
+ kmem_cache_free(xsk_tx_generic_cache, pos);
+ }
+ }
skb->destructor = sock_wfree;
- xsk_cq_cancel_locked(xs->pool, xsk_get_num_desc(skb));
+ xsk_cq_cancel_locked(xs->pool, num_descs);
/* Free skb without triggering the perf drop trace */
consume_skb(skb);
xs->skb = NULL;
@@ -609,6 +657,7 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
{
struct xsk_buff_pool *pool = xs->pool;
u32 hr, len, ts, offset, copy, copied;
+ struct xsk_addr_node *xsk_addr;
struct sk_buff *skb = xs->skb;
struct page *page;
void *buffer;
@@ -623,6 +672,19 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
return ERR_PTR(err);
skb_reserve(skb, hr);
+
+ xsk_set_destructor_arg(skb, desc->addr);
+ } else {
+ xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache, GFP_KERNEL);
+ if (!xsk_addr)
+ return ERR_PTR(-ENOMEM);
+
+ /* in case of -EOVERFLOW that could happen below,
+ * xsk_consume_skb() will release this node as whole skb
+ * would be dropped, which implies freeing all list elements
+ */
+ xsk_addr->addr = desc->addr;
+ list_add_tail(&xsk_addr->addr_node, &XSKCB(skb)->addrs_list);
}
addr = desc->addr;
@@ -694,8 +756,11 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
err = skb_store_bits(skb, 0, buffer, len);
if (unlikely(err))
goto free_err;
+
+ xsk_set_destructor_arg(skb, desc->addr);
} else {
int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct xsk_addr_node *xsk_addr;
struct page *page;
u8 *vaddr;
@@ -710,12 +775,22 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
goto free_err;
}
+ xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache, GFP_KERNEL);
+ if (!xsk_addr) {
+ __free_page(page);
+ err = -ENOMEM;
+ goto free_err;
+ }
+
vaddr = kmap_local_page(page);
memcpy(vaddr, buffer, len);
kunmap_local(vaddr);
skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE);
refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
+
+ xsk_addr->addr = desc->addr;
+ list_add_tail(&xsk_addr->addr_node, &XSKCB(skb)->addrs_list);
}
if (first_frag && desc->options & XDP_TX_METADATA) {
@@ -759,7 +834,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
skb->mark = READ_ONCE(xs->sk.sk_mark);
skb->destructor = xsk_destruct_skb;
xsk_tx_metadata_to_compl(meta, &skb_shinfo(skb)->xsk_meta);
- xsk_set_destructor_arg(skb);
+ xsk_inc_num_desc(skb);
return skb;
@@ -769,7 +844,7 @@ free_err:
if (err == -EOVERFLOW) {
/* Drop the packet */
- xsk_set_destructor_arg(xs->skb);
+ xsk_inc_num_desc(xs->skb);
xsk_drop_skb(xs->skb);
xskq_cons_release(xs->tx);
} else {
@@ -812,7 +887,7 @@ static int __xsk_generic_xmit(struct sock *sk)
* if there is space in it. This avoids having to implement
* any buffering in the Tx path.
*/
- err = xsk_cq_reserve_addr_locked(xs->pool, desc.addr);
+ err = xsk_cq_reserve_locked(xs->pool);
if (err) {
err = -EAGAIN;
goto out;
@@ -1815,8 +1890,18 @@ static int __init xsk_init(void)
if (err)
goto out_pernet;
+ xsk_tx_generic_cache = kmem_cache_create("xsk_generic_xmit_cache",
+ sizeof(struct xsk_addr_node),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!xsk_tx_generic_cache) {
+ err = -ENOMEM;
+ goto out_unreg_notif;
+ }
+
return 0;
+out_unreg_notif:
+ unregister_netdevice_notifier(&xsk_netdev_notifier);
out_pernet:
unregister_pernet_subsys(&xsk_net_ops);
out_sk:
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 46d87e961ad6..f16f390370dc 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -344,6 +344,11 @@ static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
/* Functions for producers */
+static inline u32 xskq_get_prod(struct xsk_queue *q)
+{
+ return READ_ONCE(q->ring->producer);
+}
+
static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
{
u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
@@ -390,6 +395,13 @@ static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
return 0;
}
+static inline void xskq_prod_write_addr(struct xsk_queue *q, u32 idx, u64 addr)
+{
+ struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
+
+ ring->desc[idx & q->ring_mask] = addr;
+}
+
static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
u32 nb_entries)
{
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index ed53169e795c..fef97f2a5098 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -296,7 +296,7 @@ macro_rules! asm {
/// Gets the C string file name of a [`Location`].
///
-/// If `file_with_nul()` is not available, returns a string that warns about it.
+/// If `Location::file_as_c_str()` is not available, returns a string that warns about it.
///
/// [`Location`]: core::panic::Location
///
@@ -310,8 +310,8 @@ macro_rules! asm {
/// let caller = core::panic::Location::caller();
///
/// // Output:
-/// // - A path like "rust/kernel/example.rs" if file_with_nul() is available.
-/// // - "<Location::file_with_nul() not supported>" otherwise.
+/// // - A path like "rust/kernel/example.rs" if `file_as_c_str()` is available.
+/// // - "<Location::file_as_c_str() not supported>" otherwise.
/// let caller_file = file_from_location(caller);
///
/// // Prints out the message with caller's file name.
@@ -326,7 +326,12 @@ macro_rules! asm {
/// ```
#[inline]
pub fn file_from_location<'a>(loc: &'a core::panic::Location<'a>) -> &'a core::ffi::CStr {
- #[cfg(CONFIG_RUSTC_HAS_FILE_WITH_NUL)]
+ #[cfg(CONFIG_RUSTC_HAS_FILE_AS_C_STR)]
+ {
+ loc.file_as_c_str()
+ }
+
+ #[cfg(all(CONFIG_RUSTC_HAS_FILE_WITH_NUL, not(CONFIG_RUSTC_HAS_FILE_AS_C_STR)))]
{
loc.file_with_nul()
}
@@ -334,6 +339,6 @@ pub fn file_from_location<'a>(loc: &'a core::panic::Location<'a>) -> &'a core::f
#[cfg(not(CONFIG_RUSTC_HAS_FILE_WITH_NUL))]
{
let _ = loc;
- c"<Location::file_with_nul() not supported>"
+ c"<Location::file_as_c_str() not supported>"
}
}
diff --git a/samples/ftrace/ftrace-direct-modify.c b/samples/ftrace/ftrace-direct-modify.c
index cfea7a38befb..da3a9f2091f5 100644
--- a/samples/ftrace/ftrace-direct-modify.c
+++ b/samples/ftrace/ftrace-direct-modify.c
@@ -75,8 +75,8 @@ asm (
CALL_DEPTH_ACCOUNT
" call my_direct_func1\n"
" leave\n"
-" .size my_tramp1, .-my_tramp1\n"
ASM_RET
+" .size my_tramp1, .-my_tramp1\n"
" .type my_tramp2, @function\n"
" .globl my_tramp2\n"
diff --git a/scripts/generate_rust_target.rs b/scripts/generate_rust_target.rs
index 39c82908ff3a..38b3416bb979 100644
--- a/scripts/generate_rust_target.rs
+++ b/scripts/generate_rust_target.rs
@@ -225,7 +225,11 @@ fn main() {
ts.push("features", features);
ts.push("llvm-target", "x86_64-linux-gnu");
ts.push("supported-sanitizers", ["kcfi", "kernel-address"]);
- ts.push("target-pointer-width", "64");
+ if cfg.rustc_version_atleast(1, 91, 0) {
+ ts.push("target-pointer-width", 64);
+ } else {
+ ts.push("target-pointer-width", "64");
+ }
} else if cfg.has("X86_32") {
// This only works on UML, as i386 otherwise needs regparm support in rustc
if !cfg.has("UML") {
@@ -245,7 +249,11 @@ fn main() {
}
ts.push("features", features);
ts.push("llvm-target", "i386-unknown-linux-gnu");
- ts.push("target-pointer-width", "32");
+ if cfg.rustc_version_atleast(1, 91, 0) {
+ ts.push("target-pointer-width", 32);
+ } else {
+ ts.push("target-pointer-width", "32");
+ }
} else if cfg.has("LOONGARCH") {
panic!("loongarch uses the builtin rustc loongarch64-unknown-none-softfloat target");
} else {
diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile
index ed565eb52275..342e056c8c66 100644
--- a/tools/gpio/Makefile
+++ b/tools/gpio/Makefile
@@ -77,7 +77,7 @@ $(OUTPUT)gpio-watch: $(GPIO_WATCH_IN)
clean:
rm -f $(ALL_PROGRAMS)
- rm -f $(OUTPUT)include/linux/gpio.h
+ rm -rf $(OUTPUT)include
find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete -o -name '\.*.cmd' -delete
install: $(ALL_PROGRAMS)
diff --git a/tools/perf/tests/pe-file-parsing.c b/tools/perf/tests/pe-file-parsing.c
index 30c7da79e109..8b31d1d05f90 100644
--- a/tools/perf/tests/pe-file-parsing.c
+++ b/tools/perf/tests/pe-file-parsing.c
@@ -37,7 +37,7 @@ static int run_dir(const char *d)
size_t idx;
scnprintf(filename, PATH_MAX, "%s/pe-file.exe", d);
- ret = filename__read_build_id(filename, &bid);
+ ret = filename__read_build_id(filename, &bid, /*block=*/true);
TEST_ASSERT_VAL("Failed to read build_id",
ret == sizeof(expect_build_id));
TEST_ASSERT_VAL("Wrong build_id", !memcmp(bid.data, expect_build_id,
@@ -49,7 +49,7 @@ static int run_dir(const char *d)
!strcmp(debuglink, expect_debuglink));
scnprintf(debugfile, PATH_MAX, "%s/%s", d, debuglink);
- ret = filename__read_build_id(debugfile, &bid);
+ ret = filename__read_build_id(debugfile, &bid, /*block=*/true);
TEST_ASSERT_VAL("Failed to read debug file build_id",
ret == sizeof(expect_build_id));
TEST_ASSERT_VAL("Wrong build_id", !memcmp(bid.data, expect_build_id,
diff --git a/tools/perf/tests/shell/test_bpf_metadata.sh b/tools/perf/tests/shell/test_bpf_metadata.sh
index 69e3c2055134..be67d56e0f09 100755
--- a/tools/perf/tests/shell/test_bpf_metadata.sh
+++ b/tools/perf/tests/shell/test_bpf_metadata.sh
@@ -61,7 +61,7 @@ test_bpf_metadata() {
/perf_version/ {
if (entry) print $NF;
}
- ' | egrep "$VERS" > /dev/null
+ ' | grep -qF "$VERS"
then
echo "Basic BPF metadata test [Failed invalid output]"
err=1
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index 5b6d3e899e11..2298cd396c42 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -657,9 +657,15 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
info_node->info_linear = info_linear;
info_node->metadata = NULL;
if (!perf_env__insert_bpf_prog_info(env, info_node)) {
- free(info_linear);
+ /*
+ * Insert failed, likely because of a duplicate event
+ * made by the sideband thread. Ignore synthesizing the
+ * metadata.
+ */
free(info_node);
+ goto out;
}
+ /* info_linear is now owned by info_node and shouldn't be freed below. */
info_linear = NULL;
/*
@@ -827,18 +833,18 @@ int perf_event__synthesize_bpf_events(struct perf_session *session,
return err;
}
-static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
+static int perf_env__add_bpf_info(struct perf_env *env, u32 id)
{
struct bpf_prog_info_node *info_node;
struct perf_bpil *info_linear;
struct btf *btf = NULL;
u64 arrays;
u32 btf_id;
- int fd;
+ int fd, err = 0;
fd = bpf_prog_get_fd_by_id(id);
if (fd < 0)
- return;
+ return -EINVAL;
arrays = 1UL << PERF_BPIL_JITED_KSYMS;
arrays |= 1UL << PERF_BPIL_JITED_FUNC_LENS;
@@ -852,6 +858,7 @@ static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
info_linear = get_bpf_prog_info_linear(fd, arrays);
if (IS_ERR_OR_NULL(info_linear)) {
pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
+ err = PTR_ERR(info_linear);
goto out;
}
@@ -862,38 +869,46 @@ static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
info_node->info_linear = info_linear;
info_node->metadata = bpf_metadata_create(&info_linear->info);
if (!perf_env__insert_bpf_prog_info(env, info_node)) {
+ pr_debug("%s: duplicate add bpf info request for id %u\n",
+ __func__, btf_id);
free(info_linear);
free(info_node);
+ goto out;
}
- } else
+ } else {
free(info_linear);
+ err = -ENOMEM;
+ goto out;
+ }
if (btf_id == 0)
goto out;
btf = btf__load_from_kernel_by_id(btf_id);
- if (libbpf_get_error(btf)) {
- pr_debug("%s: failed to get BTF of id %u, aborting\n",
- __func__, btf_id);
- goto out;
+ if (!btf) {
+ err = -errno;
+ pr_debug("%s: failed to get BTF of id %u %d\n", __func__, btf_id, err);
+ } else {
+ perf_env__fetch_btf(env, btf_id, btf);
}
- perf_env__fetch_btf(env, btf_id, btf);
out:
btf__free(btf);
close(fd);
+ return err;
}
static int bpf_event__sb_cb(union perf_event *event, void *data)
{
struct perf_env *env = data;
+ int ret = 0;
if (event->header.type != PERF_RECORD_BPF_EVENT)
return -1;
switch (event->bpf.type) {
case PERF_BPF_EVENT_PROG_LOAD:
- perf_env__add_bpf_info(env, event->bpf.id);
+ ret = perf_env__add_bpf_info(env, event->bpf.id);
case PERF_BPF_EVENT_PROG_UNLOAD:
/*
@@ -907,7 +922,7 @@ static int bpf_event__sb_cb(union perf_event *event, void *data)
break;
}
- return 0;
+ return ret;
}
int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
diff --git a/tools/perf/util/bpf-utils.c b/tools/perf/util/bpf-utils.c
index 80b1d2b3729b..5a66dc8594aa 100644
--- a/tools/perf/util/bpf-utils.c
+++ b/tools/perf/util/bpf-utils.c
@@ -20,7 +20,7 @@ struct bpil_array_desc {
*/
};
-static struct bpil_array_desc bpil_array_desc[] = {
+static const struct bpil_array_desc bpil_array_desc[] = {
[PERF_BPIL_JITED_INSNS] = {
offsetof(struct bpf_prog_info, jited_prog_insns),
offsetof(struct bpf_prog_info, jited_prog_len),
@@ -115,7 +115,7 @@ get_bpf_prog_info_linear(int fd, __u64 arrays)
__u32 info_len = sizeof(info);
__u32 data_len = 0;
int i, err;
- void *ptr;
+ __u8 *ptr;
if (arrays >> PERF_BPIL_LAST_ARRAY)
return ERR_PTR(-EINVAL);
@@ -126,15 +126,15 @@ get_bpf_prog_info_linear(int fd, __u64 arrays)
pr_debug("can't get prog info: %s", strerror(errno));
return ERR_PTR(-EFAULT);
}
+ if (info.type >= __MAX_BPF_PROG_TYPE)
+ pr_debug("%s:%d: unexpected program type %u\n", __func__, __LINE__, info.type);
/* step 2: calculate total size of all arrays */
for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) {
+ const struct bpil_array_desc *desc = &bpil_array_desc[i];
bool include_array = (arrays & (1UL << i)) > 0;
- struct bpil_array_desc *desc;
__u32 count, size;
- desc = bpil_array_desc + i;
-
/* kernel is too old to support this field */
if (info_len < desc->array_offset + sizeof(__u32) ||
info_len < desc->count_offset + sizeof(__u32) ||
@@ -163,19 +163,20 @@ get_bpf_prog_info_linear(int fd, __u64 arrays)
ptr = info_linear->data;
for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) {
- struct bpil_array_desc *desc;
+ const struct bpil_array_desc *desc = &bpil_array_desc[i];
__u32 count, size;
if ((arrays & (1UL << i)) == 0)
continue;
- desc = bpil_array_desc + i;
count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
bpf_prog_info_set_offset_u32(&info_linear->info,
desc->count_offset, count);
bpf_prog_info_set_offset_u32(&info_linear->info,
desc->size_offset, size);
+ assert(ptr >= info_linear->data);
+ assert(ptr < &info_linear->data[data_len]);
bpf_prog_info_set_offset_u64(&info_linear->info,
desc->array_offset,
ptr_to_u64(ptr));
@@ -189,27 +190,45 @@ get_bpf_prog_info_linear(int fd, __u64 arrays)
free(info_linear);
return ERR_PTR(-EFAULT);
}
+ if (info_linear->info.type >= __MAX_BPF_PROG_TYPE) {
+ pr_debug("%s:%d: unexpected program type %u\n",
+ __func__, __LINE__, info_linear->info.type);
+ }
/* step 6: verify the data */
+ ptr = info_linear->data;
for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) {
- struct bpil_array_desc *desc;
- __u32 v1, v2;
+ const struct bpil_array_desc *desc = &bpil_array_desc[i];
+ __u32 count1, count2, size1, size2;
+ __u64 ptr2;
if ((arrays & (1UL << i)) == 0)
continue;
- desc = bpil_array_desc + i;
- v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
- v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
+ count1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
+ count2 = bpf_prog_info_read_offset_u32(&info_linear->info,
desc->count_offset);
- if (v1 != v2)
- pr_warning("%s: mismatch in element count\n", __func__);
+ if (count1 != count2) {
+ pr_warning("%s: mismatch in element count %u vs %u\n", __func__, count1, count2);
+ free(info_linear);
+ return ERR_PTR(-ERANGE);
+ }
- v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
- v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
+ size1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
+ size2 = bpf_prog_info_read_offset_u32(&info_linear->info,
desc->size_offset);
- if (v1 != v2)
- pr_warning("%s: mismatch in rec size\n", __func__);
+ if (size1 != size2) {
+ pr_warning("%s: mismatch in rec size %u vs %u\n", __func__, size1, size2);
+ free(info_linear);
+ return ERR_PTR(-ERANGE);
+ }
+ ptr2 = bpf_prog_info_read_offset_u64(&info_linear->info, desc->array_offset);
+ if (ptr_to_u64(ptr) != ptr2) {
+ pr_warning("%s: mismatch in array %p vs %llx\n", __func__, ptr, ptr2);
+ free(info_linear);
+ return ERR_PTR(-ERANGE);
+ }
+ ptr += roundup(count1 * size1, sizeof(__u64));
}
/* step 7: update info_len and data_len */
@@ -224,13 +243,12 @@ void bpil_addr_to_offs(struct perf_bpil *info_linear)
int i;
for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) {
- struct bpil_array_desc *desc;
+ const struct bpil_array_desc *desc = &bpil_array_desc[i];
__u64 addr, offs;
if ((info_linear->arrays & (1UL << i)) == 0)
continue;
- desc = bpil_array_desc + i;
addr = bpf_prog_info_read_offset_u64(&info_linear->info,
desc->array_offset);
offs = addr - ptr_to_u64(info_linear->data);
@@ -244,13 +262,12 @@ void bpil_offs_to_addr(struct perf_bpil *info_linear)
int i;
for (i = PERF_BPIL_FIRST_ARRAY; i < PERF_BPIL_LAST_ARRAY; ++i) {
- struct bpil_array_desc *desc;
+ const struct bpil_array_desc *desc = &bpil_array_desc[i];
__u64 addr, offs;
if ((info_linear->arrays & (1UL << i)) == 0)
continue;
- desc = bpil_array_desc + i;
offs = bpf_prog_info_read_offset_u64(&info_linear->info,
desc->array_offset);
addr = offs + ptr_to_u64(info_linear->data);
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 033c79231a54..1346fd180653 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -873,13 +873,17 @@ out:
#ifdef HAVE_LIBBFD_BUILDID_SUPPORT
-static int read_build_id(const char *filename, struct build_id *bid)
+static int read_build_id(const char *filename, struct build_id *bid, bool block)
{
size_t size = sizeof(bid->data);
- int err = -1;
+ int err = -1, fd;
bfd *abfd;
- abfd = bfd_openr(filename, NULL);
+ fd = open(filename, block ? O_RDONLY : (O_RDONLY | O_NONBLOCK));
+ if (fd < 0)
+ return -1;
+
+ abfd = bfd_fdopenr(filename, /*target=*/NULL, fd);
if (!abfd)
return -1;
diff --git a/tools/testing/selftests/bpf/prog_tests/free_timer.c b/tools/testing/selftests/bpf/prog_tests/free_timer.c
index b7b77a6b2979..0de8facca4c5 100644
--- a/tools/testing/selftests/bpf/prog_tests/free_timer.c
+++ b/tools/testing/selftests/bpf/prog_tests/free_timer.c
@@ -124,6 +124,10 @@ void test_free_timer(void)
int err;
skel = free_timer__open_and_load();
+ if (!skel && errno == EOPNOTSUPP) {
+ test__skip();
+ return;
+ }
if (!ASSERT_OK_PTR(skel, "open_load"))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/timer.c b/tools/testing/selftests/bpf/prog_tests/timer.c
index d66687f1ee6a..56f660ca567b 100644
--- a/tools/testing/selftests/bpf/prog_tests/timer.c
+++ b/tools/testing/selftests/bpf/prog_tests/timer.c
@@ -86,6 +86,10 @@ void serial_test_timer(void)
int err;
timer_skel = timer__open_and_load();
+ if (!timer_skel && errno == EOPNOTSUPP) {
+ test__skip();
+ return;
+ }
if (!ASSERT_OK_PTR(timer_skel, "timer_skel_load"))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/timer_crash.c b/tools/testing/selftests/bpf/prog_tests/timer_crash.c
index f74b82305da8..b841597c8a3a 100644
--- a/tools/testing/selftests/bpf/prog_tests/timer_crash.c
+++ b/tools/testing/selftests/bpf/prog_tests/timer_crash.c
@@ -12,6 +12,10 @@ static void test_timer_crash_mode(int mode)
struct timer_crash *skel;
skel = timer_crash__open_and_load();
+ if (!skel && errno == EOPNOTSUPP) {
+ test__skip();
+ return;
+ }
if (!ASSERT_OK_PTR(skel, "timer_crash__open_and_load"))
return;
skel->bss->pid = getpid();
diff --git a/tools/testing/selftests/bpf/prog_tests/timer_lockup.c b/tools/testing/selftests/bpf/prog_tests/timer_lockup.c
index 1a2f99596916..eb303fa1e09a 100644
--- a/tools/testing/selftests/bpf/prog_tests/timer_lockup.c
+++ b/tools/testing/selftests/bpf/prog_tests/timer_lockup.c
@@ -59,6 +59,10 @@ void test_timer_lockup(void)
}
skel = timer_lockup__open_and_load();
+ if (!skel && errno == EOPNOTSUPP) {
+ test__skip();
+ return;
+ }
if (!ASSERT_OK_PTR(skel, "timer_lockup__open_and_load"))
return;
diff --git a/tools/testing/selftests/bpf/prog_tests/timer_mim.c b/tools/testing/selftests/bpf/prog_tests/timer_mim.c
index 9ff7843909e7..c930c7d7105b 100644
--- a/tools/testing/selftests/bpf/prog_tests/timer_mim.c
+++ b/tools/testing/selftests/bpf/prog_tests/timer_mim.c
@@ -65,6 +65,10 @@ void serial_test_timer_mim(void)
goto cleanup;
timer_skel = timer_mim__open_and_load();
+ if (!timer_skel && errno == EOPNOTSUPP) {
+ test__skip();
+ return;
+ }
if (!ASSERT_OK_PTR(timer_skel, "timer_skel_load"))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h b/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
index d67466c1ff77..f90531cf3ee5 100644
--- a/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
+++ b/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h
@@ -302,7 +302,7 @@ int arena_spin_lock_slowpath(arena_spinlock_t __arena __arg_arena *lock, u32 val
* barriers.
*/
if (val & _Q_LOCKED_MASK)
- smp_cond_load_acquire_label(&lock->locked, !VAL, release_err);
+ (void)smp_cond_load_acquire_label(&lock->locked, !VAL, release_err);
/*
* take ownership and clear the pending bit.
@@ -380,7 +380,7 @@ queue:
/* Link @node into the waitqueue. */
WRITE_ONCE(prev->next, node);
- arch_mcs_spin_lock_contended_label(&node->locked, release_node_err);
+ (void)arch_mcs_spin_lock_contended_label(&node->locked, release_node_err);
/*
* While waiting for the MCS lock, the next pointer may have
diff --git a/tools/testing/selftests/bpf/progs/crypto_sanity.c b/tools/testing/selftests/bpf/progs/crypto_sanity.c
index 645be6cddf36..dfd8a258f14a 100644
--- a/tools/testing/selftests/bpf/progs/crypto_sanity.c
+++ b/tools/testing/selftests/bpf/progs/crypto_sanity.c
@@ -14,7 +14,7 @@ unsigned char key[256] = {};
u16 udp_test_port = 7777;
u32 authsize, key_len;
char algo[128] = {};
-char dst[16] = {};
+char dst[16] = {}, dst_bad[8] = {};
int status;
static int skb_dynptr_validate(struct __sk_buff *skb, struct bpf_dynptr *psrc)
@@ -59,10 +59,9 @@ int skb_crypto_setup(void *ctx)
.authsize = authsize,
};
struct bpf_crypto_ctx *cctx;
- int err = 0;
+ int err;
status = 0;
-
if (key_len > 256) {
status = -EINVAL;
return 0;
@@ -70,8 +69,8 @@ int skb_crypto_setup(void *ctx)
__builtin_memcpy(&params.algo, algo, sizeof(algo));
__builtin_memcpy(&params.key, key, sizeof(key));
- cctx = bpf_crypto_ctx_create(&params, sizeof(params), &err);
+ cctx = bpf_crypto_ctx_create(&params, sizeof(params), &err);
if (!cctx) {
status = err;
return 0;
@@ -80,7 +79,6 @@ int skb_crypto_setup(void *ctx)
err = crypto_ctx_insert(cctx);
if (err && err != -EEXIST)
status = err;
-
return 0;
}
@@ -92,6 +90,7 @@ int decrypt_sanity(struct __sk_buff *skb)
struct bpf_dynptr psrc, pdst;
int err;
+ status = 0;
err = skb_dynptr_validate(skb, &psrc);
if (err < 0) {
status = err;
@@ -110,13 +109,23 @@ int decrypt_sanity(struct __sk_buff *skb)
return TC_ACT_SHOT;
}
- /* dst is a global variable to make testing part easier to check. In real
- * production code, a percpu map should be used to store the result.
+ /* Check also bad case where the dst buffer is smaller than the
+ * skb's linear section.
+ */
+ bpf_dynptr_from_mem(dst_bad, sizeof(dst_bad), 0, &pdst);
+ status = bpf_crypto_decrypt(ctx, &psrc, &pdst, NULL);
+ if (!status)
+ status = -EIO;
+ if (status != -EINVAL)
+ goto err;
+
+ /* dst is a global variable to make testing part easier to check.
+ * In real production code, a percpu map should be used to store
+ * the result.
*/
bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
-
status = bpf_crypto_decrypt(ctx, &psrc, &pdst, NULL);
-
+err:
return TC_ACT_SHOT;
}
@@ -129,7 +138,6 @@ int encrypt_sanity(struct __sk_buff *skb)
int err;
status = 0;
-
err = skb_dynptr_validate(skb, &psrc);
if (err < 0) {
status = err;
@@ -148,13 +156,23 @@ int encrypt_sanity(struct __sk_buff *skb)
return TC_ACT_SHOT;
}
- /* dst is a global variable to make testing part easier to check. In real
- * production code, a percpu map should be used to store the result.
+ /* Check also bad case where the dst buffer is smaller than the
+ * skb's linear section.
+ */
+ bpf_dynptr_from_mem(dst_bad, sizeof(dst_bad), 0, &pdst);
+ status = bpf_crypto_encrypt(ctx, &psrc, &pdst, NULL);
+ if (!status)
+ status = -EIO;
+ if (status != -EINVAL)
+ goto err;
+
+ /* dst is a global variable to make testing part easier to check.
+ * In real production code, a percpu map should be used to store
+ * the result.
*/
bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
-
status = bpf_crypto_encrypt(ctx, &psrc, &pdst, NULL);
-
+err:
return TC_ACT_SHOT;
}
diff --git a/tools/testing/selftests/bpf/progs/linked_list_fail.c b/tools/testing/selftests/bpf/progs/linked_list_fail.c
index 6438982b928b..ddd26d1a083f 100644
--- a/tools/testing/selftests/bpf/progs/linked_list_fail.c
+++ b/tools/testing/selftests/bpf/progs/linked_list_fail.c
@@ -226,8 +226,7 @@ int obj_new_no_composite(void *ctx)
SEC("?tc")
int obj_new_no_struct(void *ctx)
{
-
- bpf_obj_new(union { int data; unsigned udata; });
+ (void)bpf_obj_new(union { int data; unsigned udata; });
return 0;
}
@@ -252,7 +251,7 @@ int new_null_ret(void *ctx)
SEC("?tc")
int obj_new_acq(void *ctx)
{
- bpf_obj_new(struct foo);
+ (void)bpf_obj_new(struct foo);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/string_kfuncs_success.c b/tools/testing/selftests/bpf/progs/string_kfuncs_success.c
index 46697f381878..a47690174e0e 100644
--- a/tools/testing/selftests/bpf/progs/string_kfuncs_success.c
+++ b/tools/testing/selftests/bpf/progs/string_kfuncs_success.c
@@ -30,8 +30,12 @@ __test(2) int test_strcspn(void *ctx) { return bpf_strcspn(str, "lo"); }
__test(6) int test_strstr_found(void *ctx) { return bpf_strstr(str, "world"); }
__test(-ENOENT) int test_strstr_notfound(void *ctx) { return bpf_strstr(str, "hi"); }
__test(0) int test_strstr_empty(void *ctx) { return bpf_strstr(str, ""); }
-__test(0) int test_strnstr_found(void *ctx) { return bpf_strnstr(str, "hello", 6); }
-__test(-ENOENT) int test_strnstr_notfound(void *ctx) { return bpf_strnstr(str, "hi", 10); }
+__test(0) int test_strnstr_found1(void *ctx) { return bpf_strnstr("", "", 0); }
+__test(0) int test_strnstr_found2(void *ctx) { return bpf_strnstr(str, "hello", 5); }
+__test(0) int test_strnstr_found3(void *ctx) { return bpf_strnstr(str, "hello", 6); }
+__test(-ENOENT) int test_strnstr_notfound1(void *ctx) { return bpf_strnstr(str, "hi", 10); }
+__test(-ENOENT) int test_strnstr_notfound2(void *ctx) { return bpf_strnstr(str, "hello", 4); }
+__test(-ENOENT) int test_strnstr_notfound3(void *ctx) { return bpf_strnstr("", "a", 0); }
__test(0) int test_strnstr_empty(void *ctx) { return bpf_strnstr(str, "", 1); }
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c b/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c
index 63ce708d93ed..e4b7c2b457ee 100644
--- a/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c
+++ b/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c
@@ -2,6 +2,13 @@
// Copyright (c) 2025 Miklos Szeredi <miklos@szeredi.hu>
#define _GNU_SOURCE
+
+// Needed for linux/fanotify.h
+typedef struct {
+ int val[2];
+} __kernel_fsid_t;
+#define __kernel_fsid_t __kernel_fsid_t
+
#include <fcntl.h>
#include <sched.h>
#include <stdio.h>
@@ -10,20 +17,12 @@
#include <sys/mount.h>
#include <unistd.h>
#include <sys/syscall.h>
+#include <sys/fanotify.h>
#include "../../kselftest_harness.h"
#include "../statmount/statmount.h"
#include "../utils.h"
-// Needed for linux/fanotify.h
-#ifndef __kernel_fsid_t
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-#endif
-
-#include <sys/fanotify.h>
-
static const char root_mntpoint_templ[] = "/tmp/mount-notify_test_root.XXXXXX";
static const int mark_cmds[] = {
diff --git a/tools/testing/selftests/filesystems/mount-notify/mount-notify_test_ns.c b/tools/testing/selftests/filesystems/mount-notify/mount-notify_test_ns.c
index 090a5ca65004..9f57ca46e3af 100644
--- a/tools/testing/selftests/filesystems/mount-notify/mount-notify_test_ns.c
+++ b/tools/testing/selftests/filesystems/mount-notify/mount-notify_test_ns.c
@@ -2,6 +2,13 @@
// Copyright (c) 2025 Miklos Szeredi <miklos@szeredi.hu>
#define _GNU_SOURCE
+
+// Needed for linux/fanotify.h
+typedef struct {
+ int val[2];
+} __kernel_fsid_t;
+#define __kernel_fsid_t __kernel_fsid_t
+
#include <fcntl.h>
#include <sched.h>
#include <stdio.h>
@@ -10,21 +17,12 @@
#include <sys/mount.h>
#include <unistd.h>
#include <sys/syscall.h>
+#include <sys/fanotify.h>
#include "../../kselftest_harness.h"
-#include "../../pidfd/pidfd.h"
#include "../statmount/statmount.h"
#include "../utils.h"
-// Needed for linux/fanotify.h
-#ifndef __kernel_fsid_t
-typedef struct {
- int val[2];
-} __kernel_fsid_t;
-#endif
-
-#include <sys/fanotify.h>
-
static const char root_mntpoint_templ[] = "/tmp/mount-notify_test_root.XXXXXX";
static const int mark_types[] = {
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 08db9f0e5810..ae1afe75bc86 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -117,6 +117,7 @@ TEST_PROGS += skf_net_off.sh
TEST_GEN_FILES += skf_net_off
TEST_GEN_FILES += tfo
TEST_PROGS += tfo_passive.sh
+TEST_PROGS += broadcast_ether_dst.sh
TEST_PROGS += broadcast_pmtu.sh
TEST_PROGS += ipv6_force_forwarding.sh
TEST_GEN_PROGS += ipv6_fragmentation
diff --git a/tools/testing/selftests/net/broadcast_ether_dst.sh b/tools/testing/selftests/net/broadcast_ether_dst.sh
new file mode 100755
index 000000000000..334a7eca8a80
--- /dev/null
+++ b/tools/testing/selftests/net/broadcast_ether_dst.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Author: Brett A C Sheffield <bacs@librecast.net>
+# Author: Oscar Maes <oscmaes92@gmail.com>
+#
+# Ensure destination ethernet field is correctly set for
+# broadcast packets
+
+source lib.sh
+
+CLIENT_IP4="192.168.0.1"
+GW_IP4="192.168.0.2"
+
+setup() {
+ setup_ns CLIENT_NS SERVER_NS
+
+ ip -net "${SERVER_NS}" link add link1 type veth \
+ peer name link0 netns "${CLIENT_NS}"
+
+ ip -net "${CLIENT_NS}" link set link0 up
+ ip -net "${CLIENT_NS}" addr add "${CLIENT_IP4}"/24 dev link0
+
+ ip -net "${SERVER_NS}" link set link1 up
+
+ ip -net "${CLIENT_NS}" route add default via "${GW_IP4}"
+ ip netns exec "${CLIENT_NS}" arp -s "${GW_IP4}" 00:11:22:33:44:55
+}
+
+cleanup() {
+ rm -f "${CAPFILE}" "${OUTPUT}"
+ ip -net "${SERVER_NS}" link del link1
+ cleanup_ns "${CLIENT_NS}" "${SERVER_NS}"
+}
+
+test_broadcast_ether_dst() {
+ local rc=0
+ CAPFILE=$(mktemp -u cap.XXXXXXXXXX)
+ OUTPUT=$(mktemp -u out.XXXXXXXXXX)
+
+ echo "Testing ethernet broadcast destination"
+
+ # start tcpdump listening for icmp
+ # tcpdump will exit after receiving a single packet
+ # timeout will kill tcpdump if it is still running after 2s
+ timeout 2s ip netns exec "${CLIENT_NS}" \
+ tcpdump -i link0 -c 1 -w "${CAPFILE}" icmp &> "${OUTPUT}" &
+ pid=$!
+ slowwait 1 grep -qs "listening" "${OUTPUT}"
+
+ # send broadcast ping
+ ip netns exec "${CLIENT_NS}" \
+ ping -W0.01 -c1 -b 255.255.255.255 &> /dev/null
+
+ # wait for tcpdump for exit after receiving packet
+ wait "${pid}"
+
+ # compare ethernet destination field to ff:ff:ff:ff:ff:ff
+ ether_dst=$(tcpdump -r "${CAPFILE}" -tnne 2>/dev/null | \
+ awk '{sub(/,/,"",$3); print $3}')
+ if [[ "${ether_dst}" == "ff:ff:ff:ff:ff:ff" ]]; then
+ echo "[ OK ]"
+ rc="${ksft_pass}"
+ else
+ echo "[FAIL] expected dst ether addr to be ff:ff:ff:ff:ff:ff," \
+ "got ${ether_dst}"
+ rc="${ksft_fail}"
+ fi
+
+ return "${rc}"
+}
+
+if [ ! -x "$(command -v tcpdump)" ]; then
+ echo "SKIP: Could not run test without tcpdump tool"
+ exit "${ksft_skip}"
+fi
+
+trap cleanup EXIT
+
+setup
+test_broadcast_ether_dst
+
+exit $?
diff --git a/tools/testing/selftests/net/can/config b/tools/testing/selftests/net/can/config
new file mode 100644
index 000000000000..188f79796670
--- /dev/null
+++ b/tools/testing/selftests/net/can/config
@@ -0,0 +1,3 @@
+CONFIG_CAN=m
+CONFIG_CAN_DEV=m
+CONFIG_CAN_VCAN=m
diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
index 0883da74ab7b..844a580ae74e 100755
--- a/tools/testing/selftests/net/fcnal-test.sh
+++ b/tools/testing/selftests/net/fcnal-test.sh
@@ -189,7 +189,7 @@ show_hint()
kill_procs()
{
killall nettest ping ping6 >/dev/null 2>&1
- sleep 1
+ slowwait 2 sh -c 'test -z "$(pgrep '"'^(nettest|ping|ping6)$'"')"'
}
set_ping_group()
@@ -424,6 +424,8 @@ create_ns()
ip netns exec ${ns} sysctl -qw net.ipv6.conf.all.keep_addr_on_down=1
ip netns exec ${ns} sysctl -qw net.ipv6.conf.all.forwarding=1
ip netns exec ${ns} sysctl -qw net.ipv6.conf.default.forwarding=1
+ ip netns exec ${ns} sysctl -qw net.ipv6.conf.default.accept_dad=0
+ ip netns exec ${ns} sysctl -qw net.ipv6.conf.all.accept_dad=0
}
# create veth pair to connect namespaces and apply addresses.
diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile
index 0a0d4c2a85f7..e6f482a600da 100644
--- a/tools/testing/selftests/net/forwarding/Makefile
+++ b/tools/testing/selftests/net/forwarding/Makefile
@@ -5,6 +5,7 @@ TEST_PROGS = \
bridge_fdb_learning_limit.sh \
bridge_igmp.sh \
bridge_locked_port.sh \
+ bridge_fdb_local_vlan_0.sh \
bridge_mdb.sh \
bridge_mdb_host.sh \
bridge_mdb_max.sh \
diff --git a/tools/testing/selftests/net/forwarding/bridge_fdb_local_vlan_0.sh b/tools/testing/selftests/net/forwarding/bridge_fdb_local_vlan_0.sh
new file mode 100755
index 000000000000..5a0b43aff5aa
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/bridge_fdb_local_vlan_0.sh
@@ -0,0 +1,374 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +-----------------------+ +-----------------------+ +-----------------------+
+# | H1 (vrf) | | H2 (vrf) | | H3 (vrf) |
+# | + $h1 | | + $h2 | | + $h3 |
+# | | 192.0.2.1/28 | | | 192.0.2.2/28 | | | 192.0.2.18/28 |
+# | | 2001:db8:1::1/64 | | | 2001:db8:1::2/64 | | | 2001:db8:2::2/64 |
+# | | | | | | | | |
+# +----|------------------+ +----|------------------+ +----|------------------+
+# | | |
+# +----|-------------------------|-------------------------|------------------+
+# | +--|-------------------------|------------------+ | |
+# | | + $swp1 + $swp2 | + $swp3 |
+# | | | 192.0.2.17/28 |
+# | | BR1 (802.1q) | 2001:db8:2::1/64 |
+# | | 192.0.2.3/28 | |
+# | | 2001:db8:1::3/64 | |
+# | +-----------------------------------------------+ SW |
+# +---------------------------------------------------------------------------+
+#
+#shellcheck disable=SC2317 # SC doesn't see our uses of functions.
+#shellcheck disable=SC2034 # ... and global variables
+
+ALL_TESTS="
+ test_d_no_sharing
+ test_d_sharing
+ test_q_no_sharing
+ test_q_sharing
+"
+
+NUM_NETIFS=6
+source lib.sh
+
+pMAC=00:11:22:33:44:55
+bMAC=00:11:22:33:44:66
+mMAC=00:11:22:33:44:77
+xMAC=00:11:22:33:44:88
+
+host_create()
+{
+ local h=$1; shift
+ local ipv4=$1; shift
+ local ipv6=$1; shift
+
+ simple_if_init "$h" "$ipv4" "$ipv6"
+ defer simple_if_fini "$h" "$ipv4" "$ipv6"
+
+ ip_route_add vrf "v$h" 192.0.2.16/28 nexthop via 192.0.2.3
+ ip_route_add vrf "v$h" 2001:db8:2::/64 nexthop via 2001:db8:1::3
+}
+
+h3_create()
+{
+ simple_if_init "$h3" 192.0.2.18/28 2001:db8:2::2/64
+ defer simple_if_fini "$h3" 192.0.2.18/28 2001:db8:2::2/64
+
+ ip_route_add vrf "v$h3" 192.0.2.0/28 nexthop via 192.0.2.17
+ ip_route_add vrf "v$h3" 2001:db8:1::/64 nexthop via 2001:db8:2::1
+
+ tc qdisc add dev "$h3" clsact
+ defer tc qdisc del dev "$h3" clsact
+
+ tc filter add dev "$h3" ingress proto ip pref 104 \
+ flower skip_hw ip_proto udp dst_port 4096 \
+ action pass
+ defer tc filter del dev "$h3" ingress proto ip pref 104
+
+ tc qdisc add dev "$h2" clsact
+ defer tc qdisc del dev "$h2" clsact
+
+ tc filter add dev "$h2" ingress proto ip pref 104 \
+ flower skip_hw ip_proto udp dst_port 4096 \
+ action pass
+ defer tc filter del dev "$h2" ingress proto ip pref 104
+}
+
+switch_create()
+{
+ ip_link_set_up "$swp1"
+
+ ip_link_set_up "$swp2"
+
+ ip_addr_add "$swp3" 192.0.2.17/28
+ ip_addr_add "$swp3" 2001:db8:2::1/64
+ ip_link_set_up "$swp3"
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ swp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
+ vrf_prepare
+ defer vrf_cleanup
+
+ forwarding_enable
+ defer forwarding_restore
+
+ host_create "$h1" 192.0.2.1/28 2001:db8:1::1/64
+ host_create "$h2" 192.0.2.2/28 2001:db8:1::2/64
+ h3_create
+
+ switch_create
+}
+
+adf_bridge_create()
+{
+ local dev
+ local mac
+
+ ip_link_add br up type bridge vlan_default_pvid 0 "$@"
+ mac=$(mac_get br)
+ ip_addr_add br 192.0.2.3/28
+ ip_addr_add br 2001:db8:1::3/64
+
+ bridge_vlan_add dev br vid 1 pvid untagged self
+ bridge_vlan_add dev br vid 2 self
+ bridge_vlan_add dev br vid 3 self
+
+ for dev in "$swp1" "$swp2"; do
+ ip_link_set_master "$dev" br
+ bridge_vlan_add dev "$dev" vid 1 pvid untagged
+ bridge_vlan_add dev "$dev" vid 2
+ bridge_vlan_add dev "$dev" vid 3
+ done
+
+ ip_link_set_addr br "$mac"
+}
+
+check_fdb_local_vlan_0_support()
+{
+ if ip_link_add XXbr up type bridge vlan_filtering 1 fdb_local_vlan_0 1 \
+ &>/dev/null; then
+ return 0
+ fi
+
+ log_test_skip "FDB sharing" \
+ "iproute 2 or the kernel do not support fdb_local_vlan_0"
+}
+
+check_mac_presence()
+{
+ local should_fail=$1; shift
+ local dev=$1; shift
+ local vlan=$1; shift
+ local mac
+
+ mac=$(mac_get "$dev")
+
+ if ((vlan == 0)); then
+ vlan=null
+ fi
+
+ bridge -j fdb show dev "$dev" |
+ jq -e --arg mac "$mac" --argjson vlan "$vlan" \
+ '.[] | select(.mac == $mac) | select(.vlan == $vlan)' > /dev/null
+ check_err_fail "$should_fail" $? "FDB dev $dev vid $vlan addr $mac exists"
+}
+
+do_sharing_test()
+{
+ local should_fail=$1; shift
+ local what=$1; shift
+ local dev
+
+ RET=0
+
+ for dev in "$swp1" "$swp2" br; do
+ check_mac_presence 0 "$dev" 0
+ check_mac_presence "$should_fail" "$dev" 1
+ check_mac_presence "$should_fail" "$dev" 2
+ check_mac_presence "$should_fail" "$dev" 3
+ done
+
+ log_test "$what"
+}
+
+do_end_to_end_test()
+{
+ local mac=$1; shift
+ local what=$1; shift
+ local probe_dev=${1-$h3}; shift
+ local expect=${1-10}; shift
+
+ local t0
+ local t1
+ local dd
+
+ RET=0
+
+ # In mausezahn, use $dev MAC as the destination MAC. In the MAC sharing
+ # context, that will cause an FDB miss on VLAN 1 and prompt a second
+ # lookup in VLAN 0.
+
+ t0=$(tc_rule_stats_get "$probe_dev" 104 ingress)
+
+ $MZ "$h1" -c 10 -p 64 -a own -b "$mac" \
+ -A 192.0.2.1 -B 192.0.2.18 -t udp "dp=4096,sp=2048" -q
+ sleep 1
+
+ t1=$(tc_rule_stats_get "$probe_dev" 104 ingress)
+ dd=$((t1 - t0))
+
+ ((dd == expect))
+ check_err $? "Expected $expect packets on $probe_dev got $dd"
+
+ log_test "$what"
+}
+
+do_tests()
+{
+ local should_fail=$1; shift
+ local what=$1; shift
+ local swp1_mac
+ local br_mac
+
+ swp1_mac=$(mac_get "$swp1")
+ br_mac=$(mac_get br)
+
+ do_sharing_test "$should_fail" "$what"
+ do_end_to_end_test "$swp1_mac" "$what: end to end, $swp1 MAC"
+ do_end_to_end_test "$br_mac" "$what: end to end, br MAC"
+}
+
+bridge_standard()
+{
+ local vlan_filtering=$1; shift
+
+ if ((vlan_filtering)); then
+ echo 802.1q
+ else
+ echo 802.1d
+ fi
+}
+
+nonexistent_fdb_test()
+{
+ local vlan_filtering=$1; shift
+ local standard
+
+ standard=$(bridge_standard "$vlan_filtering")
+
+ # We expect flooding, so $h2 should get the traffic.
+ do_end_to_end_test "$xMAC" "$standard: Nonexistent FDB" "$h2"
+}
+
+misleading_fdb_test()
+{
+ local vlan_filtering=$1; shift
+ local standard
+
+ standard=$(bridge_standard "$vlan_filtering")
+
+ defer_scope_push
+ # Add an FDB entry on VLAN 0. The lookup on VLAN-aware bridge
+ # shouldn't pick this up even with fdb_local_vlan_0 enabled, so
+ # the traffic should be flooded. This all holds on
+ # vlan_filtering bridge, on non-vlan_filtering one the FDB entry
+ # is expected to be found as usual, no flooding takes place.
+ #
+ # Adding only on VLAN 0 is a bit tricky, because bridge is
+ # trying to be nice and interprets the request as if the FDB
+ # should be added on each VLAN.
+
+ bridge fdb add "$mMAC" dev "$swp1" master
+ bridge fdb del "$mMAC" dev "$swp1" vlan 1 master
+ bridge fdb del "$mMAC" dev "$swp1" vlan 2 master
+ bridge fdb del "$mMAC" dev "$swp1" vlan 3 master
+
+ local expect=$((vlan_filtering ? 10 : 0))
+ do_end_to_end_test "$mMAC" \
+ "$standard: Lookup of non-local MAC on VLAN 0" \
+ "$h2" "$expect"
+ defer_scope_pop
+}
+
+change_mac()
+{
+ local dev=$1; shift
+ local mac=$1; shift
+ local cur_mac
+
+ cur_mac=$(mac_get "$dev")
+
+ log_info "Change $dev MAC $cur_mac -> $mac"
+ ip_link_set_addr "$dev" "$mac"
+ defer log_info "Change $dev MAC back"
+}
+
+do_test_no_sharing()
+{
+ local vlan_filtering=$1; shift
+ local standard
+
+ standard=$(bridge_standard "$vlan_filtering")
+
+ adf_bridge_create vlan_filtering "$vlan_filtering"
+ setup_wait
+
+ do_tests 0 "$standard, no FDB sharing"
+
+ change_mac "$swp1" "$pMAC"
+ change_mac br "$bMAC"
+
+ do_tests 0 "$standard, no FDB sharing after MAC change"
+
+ in_defer_scope check_fdb_local_vlan_0_support || return
+
+ log_info "Set fdb_local_vlan_0=1"
+ ip link set dev br type bridge fdb_local_vlan_0 1
+
+ do_tests 1 "$standard, fdb sharing after toggle"
+}
+
+do_test_sharing()
+{
+ local vlan_filtering=$1; shift
+ local standard
+
+ standard=$(bridge_standard "$vlan_filtering")
+
+ in_defer_scope check_fdb_local_vlan_0_support || return
+
+ adf_bridge_create vlan_filtering "$vlan_filtering" fdb_local_vlan_0 1
+ setup_wait
+
+ do_tests 1 "$standard, FDB sharing"
+
+ nonexistent_fdb_test "$vlan_filtering"
+ misleading_fdb_test "$vlan_filtering"
+
+ change_mac "$swp1" "$pMAC"
+ change_mac br "$bMAC"
+
+ do_tests 1 "$standard, FDB sharing after MAC change"
+
+ log_info "Set fdb_local_vlan_0=0"
+ ip link set dev br type bridge fdb_local_vlan_0 0
+
+ do_tests 0 "$standard, No FDB sharing after toggle"
+}
+
+test_d_no_sharing()
+{
+ do_test_no_sharing 0
+}
+
+test_d_sharing()
+{
+ do_test_sharing 0
+}
+
+test_q_no_sharing()
+{
+ do_test_no_sharing 1
+}
+
+test_q_sharing()
+{
+ do_test_sharing 1
+}
+
+
+trap cleanup EXIT
+
+setup_prepare
+tests_run
diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh
index 4dca6893aa8a..fea9c37fe338 100644
--- a/tools/testing/selftests/net/lib.sh
+++ b/tools/testing/selftests/net/lib.sh
@@ -547,8 +547,8 @@ ip_link_add()
{
local name=$1; shift
- ip link add name "$name" "$@"
- defer ip link del dev "$name"
+ ip link add name "$name" "$@" && \
+ defer ip link del dev "$name"
}
ip_link_set_master()
@@ -556,8 +556,8 @@ ip_link_set_master()
local member=$1; shift
local master=$1; shift
- ip link set dev "$member" master "$master"
- defer ip link set dev "$member" nomaster
+ ip link set dev "$member" master "$master" && \
+ defer ip link set dev "$member" nomaster
}
ip_link_set_addr()
@@ -566,8 +566,8 @@ ip_link_set_addr()
local addr=$1; shift
local old_addr=$(mac_get "$name")
- ip link set dev "$name" address "$addr"
- defer ip link set dev "$name" address "$old_addr"
+ ip link set dev "$name" address "$addr" && \
+ defer ip link set dev "$name" address "$old_addr"
}
ip_link_has_flag()
@@ -590,8 +590,8 @@ ip_link_set_up()
local name=$1; shift
if ! ip_link_is_up "$name"; then
- ip link set dev "$name" up
- defer ip link set dev "$name" down
+ ip link set dev "$name" up && \
+ defer ip link set dev "$name" down
fi
}
@@ -600,8 +600,8 @@ ip_link_set_down()
local name=$1; shift
if ip_link_is_up "$name"; then
- ip link set dev "$name" down
- defer ip link set dev "$name" up
+ ip link set dev "$name" down && \
+ defer ip link set dev "$name" up
fi
}
@@ -609,20 +609,20 @@ ip_addr_add()
{
local name=$1; shift
- ip addr add dev "$name" "$@"
- defer ip addr del dev "$name" "$@"
+ ip addr add dev "$name" "$@" && \
+ defer ip addr del dev "$name" "$@"
}
ip_route_add()
{
- ip route add "$@"
- defer ip route del "$@"
+ ip route add "$@" && \
+ defer ip route del "$@"
}
bridge_vlan_add()
{
- bridge vlan add "$@"
- defer bridge vlan del "$@"
+ bridge vlan add "$@" && \
+ defer bridge vlan del "$@"
}
wait_local_port_listen()
diff --git a/tools/testing/selftests/net/lib/sh/defer.sh b/tools/testing/selftests/net/lib/sh/defer.sh
index 082f5d38321b..47ab78c4d465 100644
--- a/tools/testing/selftests/net/lib/sh/defer.sh
+++ b/tools/testing/selftests/net/lib/sh/defer.sh
@@ -1,6 +1,10 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# Whether to pause and allow debugging when an executed deferred command has a
+# non-zero exit code.
+: "${DEFER_PAUSE_ON_FAIL:=no}"
+
# map[(scope_id,track,cleanup_id) -> cleanup_command]
# track={d=default | p=priority}
declare -A __DEFER__JOBS
@@ -38,8 +42,20 @@ __defer__run()
local track=$1; shift
local defer_ix=$1; shift
local defer_key=$(__defer__defer_key $track $defer_ix)
+ local ret
+
+ eval ${__DEFER__JOBS[$defer_key]}
+ ret=$?
+
+ if [[ "$DEFER_PAUSE_ON_FAIL" == yes && "$ret" -ne 0 ]]; then
+ echo "Deferred command (track $track index $defer_ix):"
+ echo " ${__DEFER__JOBS[$defer_key]}"
+ echo "... ended with an exit status of $ret"
+ echo "Hit enter to continue, 'q' to quit"
+ read a
+ [[ "$a" == q ]] && exit 1
+ fi
- ${__DEFER__JOBS[$defer_key]}
unset __DEFER__JOBS[$defer_key]
}
@@ -49,7 +65,7 @@ __defer__schedule()
local ndefers=$(__defer__ndefers $track)
local ndefers_key=$(__defer__ndefer_key $track)
local defer_key=$(__defer__defer_key $track $ndefers)
- local defer="$@"
+ local defer="${@@Q}"
__DEFER__JOBS[$defer_key]="$defer"
__DEFER__NJOBS[$ndefers_key]=$((ndefers + 1))
diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
index 7a3cb4c09e45..d847ff1737c3 100755
--- a/tools/testing/selftests/net/mptcp/diag.sh
+++ b/tools/testing/selftests/net/mptcp/diag.sh
@@ -28,7 +28,7 @@ flush_pids()
}
# This function is used in the cleanup trap
-#shellcheck disable=SC2317
+#shellcheck disable=SC2317,SC2329
cleanup()
{
ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGKILL &>/dev/null
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index 5e3c56253274..c2ab9f7f0d21 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -134,7 +134,7 @@ ns4=""
TEST_GROUP=""
# This function is used in the cleanup trap
-#shellcheck disable=SC2317
+#shellcheck disable=SC2317,SC2329
cleanup()
{
rm -f "$cin_disconnect"
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index b41cebfa1f92..6055ee5762e1 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -8,7 +8,7 @@
# ShellCheck incorrectly believes that most of the code here is unreachable
# because it's invoked by variable name, see how the "tests" array is used
-#shellcheck disable=SC2317
+#shellcheck disable=SC2317,SC2329
. "$(dirname "${0}")/mptcp_lib.sh"
diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
index 418a903c3a4d..f01989be6e9b 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
@@ -95,7 +95,7 @@ init()
}
# This function is used in the cleanup trap
-#shellcheck disable=SC2317
+#shellcheck disable=SC2317,SC2329
cleanup()
{
mptcp_lib_ns_exit "${ns1}" "${ns2}" "${ns_sbox}"
diff --git a/tools/testing/selftests/net/mptcp/pm_netlink.sh b/tools/testing/selftests/net/mptcp/pm_netlink.sh
index ac7ec6f94023..ec6a87588191 100755
--- a/tools/testing/selftests/net/mptcp/pm_netlink.sh
+++ b/tools/testing/selftests/net/mptcp/pm_netlink.sh
@@ -32,7 +32,7 @@ ns1=""
err=$(mktemp)
# This function is used in the cleanup trap
-#shellcheck disable=SC2317
+#shellcheck disable=SC2317,SC2329
cleanup()
{
rm -f "${err}"
@@ -70,8 +70,9 @@ format_endpoints() {
mptcp_lib_pm_nl_format_endpoints "${@}"
}
+# This function is invoked indirectly
+#shellcheck disable=SC2317,SC2329
get_endpoint() {
- # shellcheck disable=SC2317 # invoked indirectly
mptcp_lib_pm_nl_get_endpoint "${ns1}" "${@}"
}
diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
index 2329c2f8519b..1903e8e84a31 100755
--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
+++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
@@ -35,7 +35,7 @@ usage() {
}
# This function is used in the cleanup trap
-#shellcheck disable=SC2317
+#shellcheck disable=SC2317,SC2329
cleanup()
{
rm -f "$cout" "$sout"
diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
index 333064b0b5ac..970c329735ff 100755
--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
+++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
@@ -94,7 +94,7 @@ test_fail()
}
# This function is used in the cleanup trap
-#shellcheck disable=SC2317
+#shellcheck disable=SC2317,SC2329
cleanup()
{
print_title "Cleanup"
diff --git a/tools/testing/selftests/wireguard/qemu/kernel.config b/tools/testing/selftests/wireguard/qemu/kernel.config
index 0a5381717e9f..936b18be07cf 100644
--- a/tools/testing/selftests/wireguard/qemu/kernel.config
+++ b/tools/testing/selftests/wireguard/qemu/kernel.config
@@ -20,9 +20,10 @@ CONFIG_NETFILTER_XTABLES_LEGACY=y
CONFIG_NETFILTER_XT_NAT=y
CONFIG_NETFILTER_XT_MATCH_LENGTH=y
CONFIG_NETFILTER_XT_MARK=y
-CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_NETFILTER_XT_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP_NF_IPTABLES_LEGACY=y
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_MANGLE=y
@@ -48,7 +49,6 @@ CONFIG_JUMP_LABEL=y
CONFIG_FUTEX=y
CONFIG_SHMEM=y
CONFIG_SLUB=y
-CONFIG_SPARSEMEM_VMEMMAP=y
CONFIG_SMP=y
CONFIG_SCHED_SMT=y
CONFIG_SCHED_MC=y